Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22#include <linux/mmu_context.h>
 23#include "amdgpu.h"
 24#include "amdgpu_amdkfd.h"
 
 25#include "gc/gc_10_3_0_offset.h"
 26#include "gc/gc_10_3_0_sh_mask.h"
 27#include "navi10_enum.h"
 28#include "oss/osssys_5_0_0_offset.h"
 29#include "oss/osssys_5_0_0_sh_mask.h"
 
 
 30#include "soc15_common.h"
 31#include "v10_structs.h"
 32#include "nv.h"
 33#include "nvd.h"
 34#include "gfxhub_v2_1.h"
 35
 36enum hqd_dequeue_request_type {
 37	NO_ACTION = 0,
 38	DRAIN_PIPE,
 39	RESET_WAVES,
 40	SAVE_WAVES
 41};
 42
 43static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
 44{
 45	return (struct amdgpu_device *)kgd;
 46}
 47
 48static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
 49			uint32_t queue, uint32_t vmid)
 50{
 51	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 52
 53	mutex_lock(&adev->srbm_mutex);
 54	nv_grbm_select(adev, mec, pipe, queue, vmid);
 55}
 56
 57static void unlock_srbm(struct kgd_dev *kgd)
 58{
 59	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 60
 61	nv_grbm_select(adev, 0, 0, 0, 0);
 62	mutex_unlock(&adev->srbm_mutex);
 63}
 64
 65static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
 66				uint32_t queue_id)
 67{
 68	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 69
 70	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
 71	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
 72
 73	lock_srbm(kgd, mec, pipe, queue_id, 0);
 74}
 75
 76static uint64_t get_queue_mask(struct amdgpu_device *adev,
 77			       uint32_t pipe_id, uint32_t queue_id)
 78{
 79	unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
 80			queue_id;
 81
 82	return 1ull << bit;
 83}
 84
 85static void release_queue(struct kgd_dev *kgd)
 86{
 87	unlock_srbm(kgd);
 88}
 89
 90static void program_sh_mem_settings_v10_3(struct kgd_dev *kgd, uint32_t vmid,
 91					uint32_t sh_mem_config,
 92					uint32_t sh_mem_ape1_base,
 93					uint32_t sh_mem_ape1_limit,
 94					uint32_t sh_mem_bases)
 95{
 96	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 97
 98	lock_srbm(kgd, 0, 0, 0, vmid);
 99
100	WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
101	WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
102	/* APE1 no longer exists on GFX9 */
103
104	unlock_srbm(kgd);
105}
106
107/* ATC is defeatured on Sienna_Cichlid */
108static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid,
109					unsigned int vmid)
110{
111	struct amdgpu_device *adev = get_amdgpu_device(kgd);
112
113	uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT;
114
115	/* Mapping vmid to pasid also for IH block */
116	pr_debug("mapping vmid %d -> pasid %d in IH block for GFX client\n",
117			vmid, pasid);
118	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, value);
119
120	return 0;
121}
122
123static int init_interrupts_v10_3(struct kgd_dev *kgd, uint32_t pipe_id)
 
124{
125	struct amdgpu_device *adev = get_amdgpu_device(kgd);
126	uint32_t mec;
127	uint32_t pipe;
128
129	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
130	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
131
132	lock_srbm(kgd, mec, pipe, 0, 0);
133
134	WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
135		CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
136		CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
137
138	unlock_srbm(kgd);
139
140	return 0;
141}
142
143static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
144				unsigned int engine_id,
145				unsigned int queue_id)
146{
147	uint32_t sdma_engine_reg_base = 0;
148	uint32_t sdma_rlc_reg_offset;
149
150	switch (engine_id) {
151	default:
152		dev_warn(adev->dev,
153			 "Invalid sdma engine id (%d), using engine id 0\n",
154			 engine_id);
155		fallthrough;
156	case 0:
157		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
158				mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
159		break;
160	case 1:
161		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
162				mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
163		break;
164	case 2:
165		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
166				mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
167		break;
168	case 3:
169		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
170				mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
171		break;
172	}
173
174	sdma_rlc_reg_offset = sdma_engine_reg_base
175		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
176
177	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
178			queue_id, sdma_rlc_reg_offset);
179
180	return sdma_rlc_reg_offset;
181}
182
183static inline struct v10_compute_mqd *get_mqd(void *mqd)
184{
185	return (struct v10_compute_mqd *)mqd;
186}
187
188static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
189{
190	return (struct v10_sdma_mqd *)mqd;
191}
192
193static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
194			uint32_t queue_id, uint32_t __user *wptr,
195			uint32_t wptr_shift, uint32_t wptr_mask,
196			struct mm_struct *mm)
197{
198	struct amdgpu_device *adev = get_amdgpu_device(kgd);
199	struct v10_compute_mqd *m;
200	uint32_t *mqd_hqd;
201	uint32_t reg, hqd_base, data;
202
203	m = get_mqd(mqd);
204
205	pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
206	acquire_queue(kgd, pipe_id, queue_id);
207
208	/* HIQ is set during driver init period with vmid set to 0*/
209	if (m->cp_hqd_vmid == 0) {
210		uint32_t value, mec, pipe;
211
212		mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
213		pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
214
215		pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
216			mec, pipe, queue_id);
217		value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
218		value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
219			((mec << 5) | (pipe << 3) | queue_id | 0x80));
220		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
221	}
222
223	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
224	mqd_hqd = &m->cp_mqd_base_addr_lo;
225	hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
226
227	for (reg = hqd_base;
228	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
229		WREG32(reg, mqd_hqd[reg - hqd_base]);
230
231
232	/* Activate doorbell logic before triggering WPTR poll. */
233	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
234			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
235	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
236
237	if (wptr) {
238		/* Don't read wptr with get_user because the user
239		 * context may not be accessible (if this function
240		 * runs in a work queue). Instead trigger a one-shot
241		 * polling read from memory in the CP. This assumes
242		 * that wptr is GPU-accessible in the queue's VMID via
243		 * ATC or SVM. WPTR==RPTR before starting the poll so
244		 * the CP starts fetching new commands from the right
245		 * place.
246		 *
247		 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
248		 * tricky. Assume that the queue didn't overflow. The
249		 * number of valid bits in the 32-bit RPTR depends on
250		 * the queue size. The remaining bits are taken from
251		 * the saved 64-bit WPTR. If the WPTR wrapped, add the
252		 * queue size.
253		 */
254		uint32_t queue_size =
255			2 << REG_GET_FIELD(m->cp_hqd_pq_control,
256					   CP_HQD_PQ_CONTROL, QUEUE_SIZE);
257		uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
258
259		if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
260			guessed_wptr += queue_size;
261		guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
262		guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
263
264		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
265		       lower_32_bits(guessed_wptr));
266		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
267		       upper_32_bits(guessed_wptr));
268		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
269		       lower_32_bits((uint64_t)wptr));
270		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
271		       upper_32_bits((uint64_t)wptr));
272		pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
273			 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
274		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
275		       (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
276	}
277
278	/* Start the EOP fetcher */
279	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
280	       REG_SET_FIELD(m->cp_hqd_eop_rptr,
281			     CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
282
283	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
284	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
285
286	release_queue(kgd);
287
288	return 0;
289}
290
291static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd,
292			    uint32_t pipe_id, uint32_t queue_id,
293			    uint32_t doorbell_off)
294{
295	struct amdgpu_device *adev = get_amdgpu_device(kgd);
296	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
297	struct v10_compute_mqd *m;
298	uint32_t mec, pipe;
299	int r;
300
301	m = get_mqd(mqd);
302
303	acquire_queue(kgd, pipe_id, queue_id);
304
305	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
306	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
307
308	pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
309		 mec, pipe, queue_id);
310
311	spin_lock(&adev->gfx.kiq.ring_lock);
312	r = amdgpu_ring_alloc(kiq_ring, 7);
313	if (r) {
314		pr_err("Failed to alloc KIQ (%d).\n", r);
315		goto out_unlock;
316	}
317
318	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
319	amdgpu_ring_write(kiq_ring,
320			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
321			  PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
322			  PACKET3_MAP_QUEUES_QUEUE(queue_id) |
323			  PACKET3_MAP_QUEUES_PIPE(pipe) |
324			  PACKET3_MAP_QUEUES_ME((mec - 1)) |
325			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
326			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
327			  PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
328			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
329	amdgpu_ring_write(kiq_ring,
330			  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
331	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
332	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
333	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
334	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
335	amdgpu_ring_commit(kiq_ring);
336
337out_unlock:
338	spin_unlock(&adev->gfx.kiq.ring_lock);
339	release_queue(kgd);
340
341	return r;
342}
343
344static int hqd_dump_v10_3(struct kgd_dev *kgd,
345			uint32_t pipe_id, uint32_t queue_id,
346			uint32_t (**dump)[2], uint32_t *n_regs)
347{
348	struct amdgpu_device *adev = get_amdgpu_device(kgd);
349	uint32_t i = 0, reg;
350#define HQD_N_REGS 56
351#define DUMP_REG(addr) do {				\
352		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
353			break;				\
354		(*dump)[i][0] = (addr) << 2;		\
355		(*dump)[i++][1] = RREG32(addr);		\
356	} while (0)
357
358	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
359	if (*dump == NULL)
360		return -ENOMEM;
361
362	acquire_queue(kgd, pipe_id, queue_id);
363
364	for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
365	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
366		DUMP_REG(reg);
367
368	release_queue(kgd);
369
370	WARN_ON_ONCE(i != HQD_N_REGS);
371	*n_regs = i;
372
373	return 0;
374}
375
376static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd,
377			     uint32_t __user *wptr, struct mm_struct *mm)
378{
379	struct amdgpu_device *adev = get_amdgpu_device(kgd);
380	struct v10_sdma_mqd *m;
381	uint32_t sdma_rlc_reg_offset;
382	unsigned long end_jiffies;
383	uint32_t data;
384	uint64_t data64;
385	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
386
387	m = get_sdma_mqd(mqd);
388	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
389					    m->sdma_queue_id);
390
391	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
392		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
393
394	end_jiffies = msecs_to_jiffies(2000) + jiffies;
395	while (true) {
396		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
397		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
398			break;
399		if (time_after(jiffies, end_jiffies)) {
400			pr_err("SDMA RLC not idle in %s\n", __func__);
401			return -ETIME;
402		}
403		usleep_range(500, 1000);
404	}
405
406	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
407	       m->sdmax_rlcx_doorbell_offset);
408
409	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
410			     ENABLE, 1);
411	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
412	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
413				m->sdmax_rlcx_rb_rptr);
414	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
415				m->sdmax_rlcx_rb_rptr_hi);
416
417	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
418	if (read_user_wptr(mm, wptr64, data64)) {
419		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
420		       lower_32_bits(data64));
421		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
422		       upper_32_bits(data64));
423	} else {
424		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
425		       m->sdmax_rlcx_rb_rptr);
426		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
427		       m->sdmax_rlcx_rb_rptr_hi);
428	}
429	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
430
431	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
432	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
433			m->sdmax_rlcx_rb_base_hi);
434	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
435			m->sdmax_rlcx_rb_rptr_addr_lo);
436	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
437			m->sdmax_rlcx_rb_rptr_addr_hi);
438
439	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
440			     RB_ENABLE, 1);
441	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
442
443	return 0;
444}
445
446static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
447			     uint32_t engine_id, uint32_t queue_id,
448			     uint32_t (**dump)[2], uint32_t *n_regs)
449{
450	struct amdgpu_device *adev = get_amdgpu_device(kgd);
451	uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
452			engine_id, queue_id);
453	uint32_t i = 0, reg;
454#undef HQD_N_REGS
455#define HQD_N_REGS (19+6+7+10)
456
457	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
458	if (*dump == NULL)
459		return -ENOMEM;
460
461	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
462		DUMP_REG(sdma_rlc_reg_offset + reg);
463	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
464		DUMP_REG(sdma_rlc_reg_offset + reg);
465	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
466	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
467		DUMP_REG(sdma_rlc_reg_offset + reg);
468	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
469	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
470		DUMP_REG(sdma_rlc_reg_offset + reg);
471
472	WARN_ON_ONCE(i != HQD_N_REGS);
473	*n_regs = i;
474
475	return 0;
476}
477
478static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address,
479				uint32_t pipe_id, uint32_t queue_id)
 
480{
481	struct amdgpu_device *adev = get_amdgpu_device(kgd);
482	uint32_t act;
483	bool retval = false;
484	uint32_t low, high;
485
486	acquire_queue(kgd, pipe_id, queue_id);
487	act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
488	if (act) {
489		low = lower_32_bits(queue_address >> 8);
490		high = upper_32_bits(queue_address >> 8);
491
492		if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
493		   high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
494			retval = true;
495	}
496	release_queue(kgd);
497	return retval;
498}
499
500static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd)
 
501{
502	struct amdgpu_device *adev = get_amdgpu_device(kgd);
503	struct v10_sdma_mqd *m;
504	uint32_t sdma_rlc_reg_offset;
505	uint32_t sdma_rlc_rb_cntl;
506
507	m = get_sdma_mqd(mqd);
508	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
509					    m->sdma_queue_id);
510
511	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
512
513	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
514		return true;
515
516	return false;
517}
518
519static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
520				enum kfd_preempt_type reset_type,
521				unsigned int utimeout, uint32_t pipe_id,
522				uint32_t queue_id)
523{
524	struct amdgpu_device *adev = get_amdgpu_device(kgd);
525	enum hqd_dequeue_request_type type;
526	unsigned long end_jiffies;
527	uint32_t temp;
528	struct v10_compute_mqd *m = get_mqd(mqd);
529
530	acquire_queue(kgd, pipe_id, queue_id);
531
532	if (m->cp_hqd_vmid == 0)
533		WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
534
535	switch (reset_type) {
536	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
537		type = DRAIN_PIPE;
538		break;
539	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
540		type = RESET_WAVES;
541		break;
 
 
 
542	default:
543		type = DRAIN_PIPE;
544		break;
545	}
546
547	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
548
549	end_jiffies = (utimeout * HZ / 1000) + jiffies;
550	while (true) {
551		temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
552		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
553			break;
554		if (time_after(jiffies, end_jiffies)) {
555			pr_err("cp queue pipe %d queue %d preemption failed\n",
556					pipe_id, queue_id);
557			release_queue(kgd);
558			return -ETIME;
559		}
560		usleep_range(500, 1000);
561	}
562
563	release_queue(kgd);
564	return 0;
565}
566
567static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
568				unsigned int utimeout)
569{
570	struct amdgpu_device *adev = get_amdgpu_device(kgd);
571	struct v10_sdma_mqd *m;
572	uint32_t sdma_rlc_reg_offset;
573	uint32_t temp;
574	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
575
576	m = get_sdma_mqd(mqd);
577	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
578					    m->sdma_queue_id);
579
580	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
581	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
582	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
583
584	while (true) {
585		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
586		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
587			break;
588		if (time_after(jiffies, end_jiffies)) {
589			pr_err("SDMA RLC not idle in %s\n", __func__);
590			return -ETIME;
591		}
592		usleep_range(500, 1000);
593	}
594
595	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
596	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
597		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
598		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
599
600	m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
601	m->sdmax_rlcx_rb_rptr_hi =
602		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
603
604	return 0;
605}
606
607
608static int address_watch_disable_v10_3(struct kgd_dev *kgd)
609{
610	return 0;
611}
612
613static int address_watch_execute_v10_3(struct kgd_dev *kgd,
614					unsigned int watch_point_id,
615					uint32_t cntl_val,
616					uint32_t addr_hi,
617					uint32_t addr_lo)
618{
619	return 0;
620}
621
622static int wave_control_execute_v10_3(struct kgd_dev *kgd,
623					uint32_t gfx_index_val,
624					uint32_t sq_cmd)
625{
626	struct amdgpu_device *adev = get_amdgpu_device(kgd);
627	uint32_t data = 0;
628
629	mutex_lock(&adev->grbm_idx_mutex);
630
631	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val);
632	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
633
634	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
635		INSTANCE_BROADCAST_WRITES, 1);
636	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
637		SA_BROADCAST_WRITES, 1);
638	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
639		SE_BROADCAST_WRITES, 1);
640
641	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
642	mutex_unlock(&adev->grbm_idx_mutex);
643
644	return 0;
645}
646
647static uint32_t address_watch_get_offset_v10_3(struct kgd_dev *kgd,
648					unsigned int watch_point_id,
649					unsigned int reg_offset)
650{
651	return 0;
652}
653
654static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t vmid,
655		uint64_t page_table_base)
656{
657	struct amdgpu_device *adev = get_amdgpu_device(kgd);
658
659	/* SDMA is on gfxhub as well for Navi1* series */
660	gfxhub_v2_1_setup_vm_pt_regs(adev, vmid, page_table_base);
661}
662
663#if 0
664uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd,
665				uint32_t trap_debug_wave_launch_mode,
666				uint32_t vmid)
667{
668	struct amdgpu_device *adev = get_amdgpu_device(kgd);
669	uint32_t data = 0;
670	uint32_t orig_wave_cntl_value;
671	uint32_t orig_stall_vmid;
672
673	mutex_lock(&adev->grbm_idx_mutex);
674
675	orig_wave_cntl_value = RREG32(SOC15_REG_OFFSET(GC,
676				0,
677				mmSPI_GDBG_WAVE_CNTL));
678	orig_stall_vmid = REG_GET_FIELD(orig_wave_cntl_value,
679			SPI_GDBG_WAVE_CNTL,
680			STALL_VMID);
681
682	data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1);
683	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
684
685	data = 0;
686	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
687
688	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), orig_stall_vmid);
689
690	mutex_unlock(&adev->grbm_idx_mutex);
691
692	return 0;
693}
694
695uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd)
696{
697	struct amdgpu_device *adev = get_amdgpu_device(kgd);
698
699	mutex_lock(&adev->grbm_idx_mutex);
700
701	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
702
703	mutex_unlock(&adev->grbm_idx_mutex);
704
705	return 0;
706}
707
708uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd,
709						uint32_t trap_override,
710						uint32_t trap_mask)
711{
712	struct amdgpu_device *adev = get_amdgpu_device(kgd);
713	uint32_t data = 0;
714
715	mutex_lock(&adev->grbm_idx_mutex);
716
717	data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
718	data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1);
719	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
720
721	data = 0;
722	data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK,
723			EXCP_EN, trap_mask);
724	data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK,
725			REPLACE, trap_override);
726	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
727
728	data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
729	data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 0);
730	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
731
732	mutex_unlock(&adev->grbm_idx_mutex);
733
734	return 0;
735}
736
737uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd,
738					uint8_t wave_launch_mode,
739					uint32_t vmid)
740{
741	struct amdgpu_device *adev = get_amdgpu_device(kgd);
742	uint32_t data = 0;
743	bool is_stall_mode;
744	bool is_mode_set;
745
746	is_stall_mode = (wave_launch_mode == 4);
747	is_mode_set = (wave_launch_mode != 0 && wave_launch_mode != 4);
748
749	mutex_lock(&adev->grbm_idx_mutex);
750
751	data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
752			VMID_MASK, is_mode_set ? 1 << vmid : 0);
753	data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
754			MODE, is_mode_set ? wave_launch_mode : 0);
755	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data);
756
757	data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
758	data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL,
759			STALL_VMID, is_stall_mode ? 1 << vmid : 0);
760	data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL,
761			STALL_RA, is_stall_mode ? 1 : 0);
762	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
763
764	mutex_unlock(&adev->grbm_idx_mutex);
765
766	return 0;
767}
768
769/* kgd_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values
770 * The values read are:
771 *	ib_offload_wait_time     -- Wait Count for Indirect Buffer Offloads.
772 *	atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads.
773 *	wrm_offload_wait_time    -- Wait Count for WAIT_REG_MEM Offloads.
774 *	gws_wait_time            -- Wait Count for Global Wave Syncs.
775 *	que_sleep_wait_time      -- Wait Count for Dequeue Retry.
776 *	sch_wave_wait_time       -- Wait Count for Scheduling Wave Message.
777 *	sem_rearm_wait_time      -- Wait Count for Semaphore re-arm.
778 *	deq_retry_wait_time      -- Wait Count for Global Wave Syncs.
779 */
780void get_iq_wait_times_v10_3(struct kgd_dev *kgd,
781					uint32_t *wait_times)
782
783{
784	struct amdgpu_device *adev = get_amdgpu_device(kgd);
785
786	*wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2));
787}
788
789void build_grace_period_packet_info_v10_3(struct kgd_dev *kgd,
790						uint32_t wait_times,
791						uint32_t grace_period,
792						uint32_t *reg_offset,
793						uint32_t *reg_data)
794{
795	*reg_data = wait_times;
796
797	*reg_data = REG_SET_FIELD(*reg_data,
798			CP_IQ_WAIT_TIME2,
799			SCH_WAVE,
800			grace_period);
801
802	*reg_offset = mmCP_IQ_WAIT_TIME2;
803}
804#endif
805
806const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
807	.program_sh_mem_settings = program_sh_mem_settings_v10_3,
808	.set_pasid_vmid_mapping = set_pasid_vmid_mapping_v10_3,
809	.init_interrupts = init_interrupts_v10_3,
810	.hqd_load = hqd_load_v10_3,
811	.hiq_mqd_load = hiq_mqd_load_v10_3,
812	.hqd_sdma_load = hqd_sdma_load_v10_3,
813	.hqd_dump = hqd_dump_v10_3,
814	.hqd_sdma_dump = hqd_sdma_dump_v10_3,
815	.hqd_is_occupied = hqd_is_occupied_v10_3,
816	.hqd_sdma_is_occupied = hqd_sdma_is_occupied_v10_3,
817	.hqd_destroy = hqd_destroy_v10_3,
818	.hqd_sdma_destroy = hqd_sdma_destroy_v10_3,
819	.address_watch_disable = address_watch_disable_v10_3,
820	.address_watch_execute = address_watch_execute_v10_3,
821	.wave_control_execute = wave_control_execute_v10_3,
822	.address_watch_get_offset = address_watch_get_offset_v10_3,
823	.get_atc_vmid_pasid_mapping_info = NULL,
824	.set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
825	.get_hive_id = amdgpu_amdkfd_get_hive_id,
826#if 0
827	.enable_debug_trap = enable_debug_trap_v10_3,
828	.disable_debug_trap = disable_debug_trap_v10_3,
829	.set_wave_launch_trap_override = set_wave_launch_trap_override_v10_3,
830	.set_wave_launch_mode = set_wave_launch_mode_v10_3,
831	.get_iq_wait_times = get_iq_wait_times_v10_3,
832	.build_grace_period_packet_info = build_grace_period_packet_info_v10_3,
833#endif
 
 
 
834};
v6.13.7
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22#include <linux/mmu_context.h>
 23#include "amdgpu.h"
 24#include "amdgpu_amdkfd.h"
 25#include "amdgpu_amdkfd_gfx_v10.h"
 26#include "gc/gc_10_3_0_offset.h"
 27#include "gc/gc_10_3_0_sh_mask.h"
 
 28#include "oss/osssys_5_0_0_offset.h"
 29#include "oss/osssys_5_0_0_sh_mask.h"
 30#include "athub/athub_2_1_0_offset.h"
 31#include "athub/athub_2_1_0_sh_mask.h"
 32#include "soc15_common.h"
 33#include "v10_structs.h"
 34#include "nv.h"
 35#include "nvd.h"
 
 36
 37enum hqd_dequeue_request_type {
 38	NO_ACTION = 0,
 39	DRAIN_PIPE,
 40	RESET_WAVES,
 41	SAVE_WAVES
 42};
 43
 44static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
 
 
 
 
 
 45			uint32_t queue, uint32_t vmid)
 46{
 
 
 47	mutex_lock(&adev->srbm_mutex);
 48	nv_grbm_select(adev, mec, pipe, queue, vmid);
 49}
 50
 51static void unlock_srbm(struct amdgpu_device *adev)
 52{
 
 
 53	nv_grbm_select(adev, 0, 0, 0, 0);
 54	mutex_unlock(&adev->srbm_mutex);
 55}
 56
 57static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
 58				uint32_t queue_id)
 59{
 
 
 60	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
 61	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
 62
 63	lock_srbm(adev, mec, pipe, queue_id, 0);
 64}
 65
 66static uint64_t get_queue_mask(struct amdgpu_device *adev,
 67			       uint32_t pipe_id, uint32_t queue_id)
 68{
 69	unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
 70			queue_id;
 71
 72	return 1ull << bit;
 73}
 74
 75static void release_queue(struct amdgpu_device *adev)
 76{
 77	unlock_srbm(adev);
 78}
 79
 80static void program_sh_mem_settings_v10_3(struct amdgpu_device *adev, uint32_t vmid,
 81					uint32_t sh_mem_config,
 82					uint32_t sh_mem_ape1_base,
 83					uint32_t sh_mem_ape1_limit,
 84					uint32_t sh_mem_bases, uint32_t inst)
 85{
 86	lock_srbm(adev, 0, 0, 0, vmid);
 
 
 87
 88	WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
 89	WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
 90	/* APE1 no longer exists on GFX9 */
 91
 92	unlock_srbm(adev);
 93}
 94
 95/* ATC is defeatured on Sienna_Cichlid */
 96static int set_pasid_vmid_mapping_v10_3(struct amdgpu_device *adev, unsigned int pasid,
 97					unsigned int vmid, uint32_t inst)
 98{
 
 
 99	uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT;
100
101	/* Mapping vmid to pasid also for IH block */
102	pr_debug("mapping vmid %d -> pasid %d in IH block for GFX client\n",
103			vmid, pasid);
104	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, value);
105
106	return 0;
107}
108
109static int init_interrupts_v10_3(struct amdgpu_device *adev, uint32_t pipe_id,
110				uint32_t inst)
111{
 
112	uint32_t mec;
113	uint32_t pipe;
114
115	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
116	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
117
118	lock_srbm(adev, mec, pipe, 0, 0);
119
120	WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
121		CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
122		CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
123
124	unlock_srbm(adev);
125
126	return 0;
127}
128
129static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
130				unsigned int engine_id,
131				unsigned int queue_id)
132{
133	uint32_t sdma_engine_reg_base = 0;
134	uint32_t sdma_rlc_reg_offset;
135
136	switch (engine_id) {
137	default:
138		dev_warn(adev->dev,
139			 "Invalid sdma engine id (%d), using engine id 0\n",
140			 engine_id);
141		fallthrough;
142	case 0:
143		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
144				mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
145		break;
146	case 1:
147		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
148				mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
149		break;
150	case 2:
151		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
152				mmSDMA2_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
153		break;
154	case 3:
155		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
156				mmSDMA3_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
157		break;
158	}
159
160	sdma_rlc_reg_offset = sdma_engine_reg_base
161		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
162
163	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
164			queue_id, sdma_rlc_reg_offset);
165
166	return sdma_rlc_reg_offset;
167}
168
169static inline struct v10_compute_mqd *get_mqd(void *mqd)
170{
171	return (struct v10_compute_mqd *)mqd;
172}
173
174static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
175{
176	return (struct v10_sdma_mqd *)mqd;
177}
178
179static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
180			uint32_t pipe_id, uint32_t queue_id,
181			uint32_t __user *wptr, uint32_t wptr_shift,
182			uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
183{
 
184	struct v10_compute_mqd *m;
185	uint32_t *mqd_hqd;
186	uint32_t reg, hqd_base, data;
187
188	m = get_mqd(mqd);
189
190	pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
191	acquire_queue(adev, pipe_id, queue_id);
192
193	/* HIQ is set during driver init period with vmid set to 0*/
194	if (m->cp_hqd_vmid == 0) {
195		uint32_t value, mec, pipe;
196
197		mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
198		pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
199
200		pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
201			mec, pipe, queue_id);
202		value = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
203		value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
204			((mec << 5) | (pipe << 3) | queue_id | 0x80));
205		WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, value);
206	}
207
208	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
209	mqd_hqd = &m->cp_mqd_base_addr_lo;
210	hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
211
212	for (reg = hqd_base;
213	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
214		WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]);
215
216
217	/* Activate doorbell logic before triggering WPTR poll. */
218	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
219			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
220	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data);
221
222	if (wptr) {
223		/* Don't read wptr with get_user because the user
224		 * context may not be accessible (if this function
225		 * runs in a work queue). Instead trigger a one-shot
226		 * polling read from memory in the CP. This assumes
227		 * that wptr is GPU-accessible in the queue's VMID via
228		 * ATC or SVM. WPTR==RPTR before starting the poll so
229		 * the CP starts fetching new commands from the right
230		 * place.
231		 *
232		 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
233		 * tricky. Assume that the queue didn't overflow. The
234		 * number of valid bits in the 32-bit RPTR depends on
235		 * the queue size. The remaining bits are taken from
236		 * the saved 64-bit WPTR. If the WPTR wrapped, add the
237		 * queue size.
238		 */
239		uint32_t queue_size =
240			2 << REG_GET_FIELD(m->cp_hqd_pq_control,
241					   CP_HQD_PQ_CONTROL, QUEUE_SIZE);
242		uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
243
244		if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
245			guessed_wptr += queue_size;
246		guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
247		guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
248
249		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
250		       lower_32_bits(guessed_wptr));
251		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
252		       upper_32_bits(guessed_wptr));
253		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
254		       lower_32_bits((uint64_t)wptr));
255		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
256		       upper_32_bits((uint64_t)wptr));
257		pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
258			 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
259		WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
260		       (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
261	}
262
263	/* Start the EOP fetcher */
264	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
265	       REG_SET_FIELD(m->cp_hqd_eop_rptr,
266			     CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
267
268	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
269	WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
270
271	release_queue(adev);
272
273	return 0;
274}
275
276static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
277			    uint32_t pipe_id, uint32_t queue_id,
278			    uint32_t doorbell_off, uint32_t inst)
279{
280	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
 
281	struct v10_compute_mqd *m;
282	uint32_t mec, pipe;
283	int r;
284
285	m = get_mqd(mqd);
286
287	acquire_queue(adev, pipe_id, queue_id);
288
289	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
290	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
291
292	pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
293		 mec, pipe, queue_id);
294
295	spin_lock(&adev->gfx.kiq[0].ring_lock);
296	r = amdgpu_ring_alloc(kiq_ring, 7);
297	if (r) {
298		pr_err("Failed to alloc KIQ (%d).\n", r);
299		goto out_unlock;
300	}
301
302	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
303	amdgpu_ring_write(kiq_ring,
304			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
305			  PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
306			  PACKET3_MAP_QUEUES_QUEUE(queue_id) |
307			  PACKET3_MAP_QUEUES_PIPE(pipe) |
308			  PACKET3_MAP_QUEUES_ME((mec - 1)) |
309			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
310			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
311			  PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
312			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
313	amdgpu_ring_write(kiq_ring,
314			  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
315	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
316	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
317	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
318	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
319	amdgpu_ring_commit(kiq_ring);
320
321out_unlock:
322	spin_unlock(&adev->gfx.kiq[0].ring_lock);
323	release_queue(adev);
324
325	return r;
326}
327
328static int hqd_dump_v10_3(struct amdgpu_device *adev,
329			uint32_t pipe_id, uint32_t queue_id,
330			uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
331{
 
332	uint32_t i = 0, reg;
333#define HQD_N_REGS 56
334#define DUMP_REG(addr) do {				\
335		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
336			break;				\
337		(*dump)[i][0] = (addr) << 2;		\
338		(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr);		\
339	} while (0)
340
341	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
342	if (*dump == NULL)
343		return -ENOMEM;
344
345	acquire_queue(adev, pipe_id, queue_id);
346
347	for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
348	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
349		DUMP_REG(reg);
350
351	release_queue(adev);
352
353	WARN_ON_ONCE(i != HQD_N_REGS);
354	*n_regs = i;
355
356	return 0;
357}
358
359static int hqd_sdma_load_v10_3(struct amdgpu_device *adev, void *mqd,
360			     uint32_t __user *wptr, struct mm_struct *mm)
361{
 
362	struct v10_sdma_mqd *m;
363	uint32_t sdma_rlc_reg_offset;
364	unsigned long end_jiffies;
365	uint32_t data;
366	uint64_t data64;
367	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
368
369	m = get_sdma_mqd(mqd);
370	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
371					    m->sdma_queue_id);
372
373	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
374		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
375
376	end_jiffies = msecs_to_jiffies(2000) + jiffies;
377	while (true) {
378		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
379		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
380			break;
381		if (time_after(jiffies, end_jiffies)) {
382			pr_err("SDMA RLC not idle in %s\n", __func__);
383			return -ETIME;
384		}
385		usleep_range(500, 1000);
386	}
387
388	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
389	       m->sdmax_rlcx_doorbell_offset);
390
391	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
392			     ENABLE, 1);
393	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
394	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
395				m->sdmax_rlcx_rb_rptr);
396	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
397				m->sdmax_rlcx_rb_rptr_hi);
398
399	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
400	if (read_user_wptr(mm, wptr64, data64)) {
401		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
402		       lower_32_bits(data64));
403		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
404		       upper_32_bits(data64));
405	} else {
406		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
407		       m->sdmax_rlcx_rb_rptr);
408		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
409		       m->sdmax_rlcx_rb_rptr_hi);
410	}
411	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
412
413	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
414	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
415			m->sdmax_rlcx_rb_base_hi);
416	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
417			m->sdmax_rlcx_rb_rptr_addr_lo);
418	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
419			m->sdmax_rlcx_rb_rptr_addr_hi);
420
421	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
422			     RB_ENABLE, 1);
423	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
424
425	return 0;
426}
427
428static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev,
429			     uint32_t engine_id, uint32_t queue_id,
430			     uint32_t (**dump)[2], uint32_t *n_regs)
431{
 
432	uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
433			engine_id, queue_id);
434	uint32_t i = 0, reg;
435#undef HQD_N_REGS
436#define HQD_N_REGS (19+6+7+12)
437
438	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
439	if (*dump == NULL)
440		return -ENOMEM;
441
442	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
443		DUMP_REG(sdma_rlc_reg_offset + reg);
444	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
445		DUMP_REG(sdma_rlc_reg_offset + reg);
446	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
447	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
448		DUMP_REG(sdma_rlc_reg_offset + reg);
449	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
450	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
451		DUMP_REG(sdma_rlc_reg_offset + reg);
452
453	WARN_ON_ONCE(i != HQD_N_REGS);
454	*n_regs = i;
455
456	return 0;
457}
458
459static bool hqd_is_occupied_v10_3(struct amdgpu_device *adev,
460				uint64_t queue_address, uint32_t pipe_id,
461				uint32_t queue_id, uint32_t inst)
462{
 
463	uint32_t act;
464	bool retval = false;
465	uint32_t low, high;
466
467	acquire_queue(adev, pipe_id, queue_id);
468	act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
469	if (act) {
470		low = lower_32_bits(queue_address >> 8);
471		high = upper_32_bits(queue_address >> 8);
472
473		if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
474		   high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
475			retval = true;
476	}
477	release_queue(adev);
478	return retval;
479}
480
481static bool hqd_sdma_is_occupied_v10_3(struct amdgpu_device *adev,
482				void *mqd)
483{
 
484	struct v10_sdma_mqd *m;
485	uint32_t sdma_rlc_reg_offset;
486	uint32_t sdma_rlc_rb_cntl;
487
488	m = get_sdma_mqd(mqd);
489	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
490					    m->sdma_queue_id);
491
492	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
493
494	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
495		return true;
496
497	return false;
498}
499
500static int hqd_destroy_v10_3(struct amdgpu_device *adev, void *mqd,
501				enum kfd_preempt_type reset_type,
502				unsigned int utimeout, uint32_t pipe_id,
503				uint32_t queue_id, uint32_t inst)
504{
 
505	enum hqd_dequeue_request_type type;
506	unsigned long end_jiffies;
507	uint32_t temp;
508	struct v10_compute_mqd *m = get_mqd(mqd);
509
510	acquire_queue(adev, pipe_id, queue_id);
511
512	if (m->cp_hqd_vmid == 0)
513		WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
514
515	switch (reset_type) {
516	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
517		type = DRAIN_PIPE;
518		break;
519	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
520		type = RESET_WAVES;
521		break;
522	case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
523		type = SAVE_WAVES;
524		break;
525	default:
526		type = DRAIN_PIPE;
527		break;
528	}
529
530	WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type);
531
532	end_jiffies = (utimeout * HZ / 1000) + jiffies;
533	while (true) {
534		temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
535		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
536			break;
537		if (time_after(jiffies, end_jiffies)) {
538			pr_err("cp queue pipe %d queue %d preemption failed\n",
539					pipe_id, queue_id);
540			release_queue(adev);
541			return -ETIME;
542		}
543		usleep_range(500, 1000);
544	}
545
546	release_queue(adev);
547	return 0;
548}
549
550static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd,
551				unsigned int utimeout)
552{
 
553	struct v10_sdma_mqd *m;
554	uint32_t sdma_rlc_reg_offset;
555	uint32_t temp;
556	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
557
558	m = get_sdma_mqd(mqd);
559	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
560					    m->sdma_queue_id);
561
562	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
563	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
564	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
565
566	while (true) {
567		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
568		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
569			break;
570		if (time_after(jiffies, end_jiffies)) {
571			pr_err("SDMA RLC not idle in %s\n", __func__);
572			return -ETIME;
573		}
574		usleep_range(500, 1000);
575	}
576
577	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
578	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
579		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
580		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
581
582	m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
583	m->sdmax_rlcx_rb_rptr_hi =
584		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
585
586	return 0;
587}
588
589static int wave_control_execute_v10_3(struct amdgpu_device *adev,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
590					uint32_t gfx_index_val,
591					uint32_t sq_cmd, uint32_t inst)
592{
 
593	uint32_t data = 0;
594
595	mutex_lock(&adev->grbm_idx_mutex);
596
597	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
598	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
599
600	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
601		INSTANCE_BROADCAST_WRITES, 1);
602	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
603		SA_BROADCAST_WRITES, 1);
604	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
605		SE_BROADCAST_WRITES, 1);
606
607	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
608	mutex_unlock(&adev->grbm_idx_mutex);
609
610	return 0;
611}
612
613static bool get_atc_vmid_pasid_mapping_info_v10_3(struct amdgpu_device *adev,
614					uint8_t vmid, uint16_t *p_pasid)
 
615{
616	uint32_t value;
 
 
 
617
618	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
619		     + vmid);
620	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
 
 
 
 
 
 
 
 
 
 
 
621
622	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
 
 
623}
624
625static void set_vm_context_page_table_base_v10_3(struct amdgpu_device *adev,
626		uint32_t vmid, uint64_t page_table_base)
 
627{
628	/* SDMA is on gfxhub as well for Navi1* series */
629	adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
630}
631
632static void program_trap_handler_settings_v10_3(struct amdgpu_device *adev,
633			uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
634			uint32_t inst)
635{
636	lock_srbm(adev, 0, 0, 0, vmid);
637
638	/*
639	 * Program TBA registers
640	 */
641	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
642			lower_32_bits(tba_addr >> 8));
643	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
644			upper_32_bits(tba_addr >> 8) |
645			(1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
646
647	/*
648	 * Program TMA registers
649	 */
650	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
651			lower_32_bits(tma_addr >> 8));
652	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
653			 upper_32_bits(tma_addr >> 8));
 
 
 
 
 
 
 
 
 
 
654
655	unlock_srbm(adev);
656}
 
657
658const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
659	.program_sh_mem_settings = program_sh_mem_settings_v10_3,
660	.set_pasid_vmid_mapping = set_pasid_vmid_mapping_v10_3,
661	.init_interrupts = init_interrupts_v10_3,
662	.hqd_load = hqd_load_v10_3,
663	.hiq_mqd_load = hiq_mqd_load_v10_3,
664	.hqd_sdma_load = hqd_sdma_load_v10_3,
665	.hqd_dump = hqd_dump_v10_3,
666	.hqd_sdma_dump = hqd_sdma_dump_v10_3,
667	.hqd_is_occupied = hqd_is_occupied_v10_3,
668	.hqd_sdma_is_occupied = hqd_sdma_is_occupied_v10_3,
669	.hqd_destroy = hqd_destroy_v10_3,
670	.hqd_sdma_destroy = hqd_sdma_destroy_v10_3,
 
 
671	.wave_control_execute = wave_control_execute_v10_3,
672	.get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info_v10_3,
 
673	.set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
674	.program_trap_handler_settings = program_trap_handler_settings_v10_3,
675	.get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
676	.build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
677	.enable_debug_trap = kgd_gfx_v10_enable_debug_trap,
678	.disable_debug_trap = kgd_gfx_v10_disable_debug_trap,
679	.validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request,
680	.set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override,
681	.set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode,
682	.set_address_watch = kgd_gfx_v10_set_address_watch,
683	.clear_address_watch = kgd_gfx_v10_clear_address_watch,
684	.hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
685	.hqd_reset = kgd_gfx_v10_hqd_reset
686};