Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22
 23#define pr_fmt(fmt) "kfd2kgd: " fmt
 24
 25#include <linux/module.h>
 26#include <linux/fdtable.h>
 27#include <linux/uaccess.h>
 28#include <linux/mmu_context.h>
 29
 30#include "amdgpu.h"
 31#include "amdgpu_amdkfd.h"
 32#include "soc15_hw_ip.h"
 33#include "gc/gc_9_0_offset.h"
 34#include "gc/gc_9_0_sh_mask.h"
 35#include "vega10_enum.h"
 36#include "sdma0/sdma0_4_0_offset.h"
 37#include "sdma0/sdma0_4_0_sh_mask.h"
 38#include "sdma1/sdma1_4_0_offset.h"
 39#include "sdma1/sdma1_4_0_sh_mask.h"
 40#include "athub/athub_1_0_offset.h"
 41#include "athub/athub_1_0_sh_mask.h"
 42#include "oss/osssys_4_0_offset.h"
 43#include "oss/osssys_4_0_sh_mask.h"
 44#include "soc15_common.h"
 45#include "v9_structs.h"
 46#include "soc15.h"
 47#include "soc15d.h"
 48#include "mmhub_v1_0.h"
 49#include "gfxhub_v1_0.h"
 50#include "gmc_v9_0.h"
 51
 52
 53#define V9_PIPE_PER_MEC		(4)
 54#define V9_QUEUES_PER_PIPE_MEC	(8)
 55
 56enum hqd_dequeue_request_type {
 57	NO_ACTION = 0,
 58	DRAIN_PIPE,
 59	RESET_WAVES
 60};
 61
 62
 63/* Because of REG_GET_FIELD() being used, we put this function in the
 64 * asic specific file.
 65 */
 66int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
 67		struct tile_config *config)
 68{
 69	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 70
 71	config->gb_addr_config = adev->gfx.config.gb_addr_config;
 72
 73	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
 74	config->num_tile_configs =
 75			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
 76	config->macro_tile_config_ptr =
 77			adev->gfx.config.macrotile_mode_array;
 78	config->num_macro_tile_configs =
 79			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
 80
 81	return 0;
 82}
 83
 84static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
 85{
 86	return (struct amdgpu_device *)kgd;
 87}
 88
 89static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
 90			uint32_t queue, uint32_t vmid)
 91{
 92	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 93
 94	mutex_lock(&adev->srbm_mutex);
 95	soc15_grbm_select(adev, mec, pipe, queue, vmid);
 96}
 97
 98static void unlock_srbm(struct kgd_dev *kgd)
 99{
100	struct amdgpu_device *adev = get_amdgpu_device(kgd);
101
102	soc15_grbm_select(adev, 0, 0, 0, 0);
103	mutex_unlock(&adev->srbm_mutex);
104}
105
106static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
107				uint32_t queue_id)
108{
109	struct amdgpu_device *adev = get_amdgpu_device(kgd);
110
111	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
112	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
113
114	lock_srbm(kgd, mec, pipe, queue_id, 0);
115}
116
117static uint32_t get_queue_mask(struct amdgpu_device *adev,
118			       uint32_t pipe_id, uint32_t queue_id)
119{
120	unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
121			    queue_id) & 31;
122
123	return ((uint32_t)1) << bit;
124}
125
126static void release_queue(struct kgd_dev *kgd)
127{
128	unlock_srbm(kgd);
129}
130
131void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
132					uint32_t sh_mem_config,
133					uint32_t sh_mem_ape1_base,
134					uint32_t sh_mem_ape1_limit,
135					uint32_t sh_mem_bases)
136{
137	struct amdgpu_device *adev = get_amdgpu_device(kgd);
138
139	lock_srbm(kgd, 0, 0, 0, vmid);
140
141	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
142	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
143	/* APE1 no longer exists on GFX9 */
144
145	unlock_srbm(kgd);
146}
147
148int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
149					unsigned int vmid)
150{
151	struct amdgpu_device *adev = get_amdgpu_device(kgd);
152
153	/*
154	 * We have to assume that there is no outstanding mapping.
155	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
156	 * a mapping is in progress or because a mapping finished
157	 * and the SW cleared it.
158	 * So the protocol is to always wait & clear.
159	 */
160	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
161			ATC_VMID0_PASID_MAPPING__VALID_MASK;
162
163	/*
164	 * need to do this twice, once for gfx and once for mmhub
165	 * for ATC add 16 to VMID for mmhub, for IH different registers.
166	 * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
167	 */
168
169	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
170	       pasid_mapping);
171
172	while (!(RREG32(SOC15_REG_OFFSET(
173				ATHUB, 0,
174				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
175		 (1U << vmid)))
176		cpu_relax();
177
178	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
179				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
180	       1U << vmid);
181
182	/* Mapping vmid to pasid also for IH block */
183	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
184	       pasid_mapping);
185
186	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID16_PASID_MAPPING) + vmid,
187	       pasid_mapping);
188
189	while (!(RREG32(SOC15_REG_OFFSET(
190				ATHUB, 0,
191				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
192		 (1U << (vmid + 16))))
193		cpu_relax();
194
195	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
196				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
197	       1U << (vmid + 16));
198
199	/* Mapping vmid to pasid also for IH block */
200	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid,
201	       pasid_mapping);
202	return 0;
203}
204
205/* TODO - RING0 form of field is obsolete, seems to date back to SI
206 * but still works
207 */
208
209int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
210{
211	struct amdgpu_device *adev = get_amdgpu_device(kgd);
212	uint32_t mec;
213	uint32_t pipe;
214
215	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
216	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
217
218	lock_srbm(kgd, mec, pipe, 0, 0);
219
220	WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
221		CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
222		CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
223
224	unlock_srbm(kgd);
225
226	return 0;
227}
228
229static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
230				unsigned int engine_id,
231				unsigned int queue_id)
232{
233	uint32_t base[2] = {
234		SOC15_REG_OFFSET(SDMA0, 0,
235				 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
236		SOC15_REG_OFFSET(SDMA1, 0,
237				 mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
238	};
239	uint32_t retval;
240
241	retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
242					       mmSDMA0_RLC0_RB_CNTL);
243
244	pr_debug("sdma base address: 0x%x\n", retval);
245
246	return retval;
247}
248
249static inline struct v9_mqd *get_mqd(void *mqd)
250{
251	return (struct v9_mqd *)mqd;
252}
253
254static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
255{
256	return (struct v9_sdma_mqd *)mqd;
257}
258
259int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
260			uint32_t queue_id, uint32_t __user *wptr,
261			uint32_t wptr_shift, uint32_t wptr_mask,
262			struct mm_struct *mm)
263{
264	struct amdgpu_device *adev = get_amdgpu_device(kgd);
265	struct v9_mqd *m;
266	uint32_t *mqd_hqd;
267	uint32_t reg, hqd_base, data;
268
269	m = get_mqd(mqd);
270
271	acquire_queue(kgd, pipe_id, queue_id);
272
273	/* HIQ is set during driver init period with vmid set to 0*/
274	if (m->cp_hqd_vmid == 0) {
275		uint32_t value, mec, pipe;
276
277		mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
278		pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
279
280		pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
281			mec, pipe, queue_id);
282		value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
283		value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
284			((mec << 5) | (pipe << 3) | queue_id | 0x80));
285		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
286	}
287
288	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
289	mqd_hqd = &m->cp_mqd_base_addr_lo;
290	hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
291
292	for (reg = hqd_base;
293	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
294		WREG32_RLC(reg, mqd_hqd[reg - hqd_base]);
295
296
297	/* Activate doorbell logic before triggering WPTR poll. */
298	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
299			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
300	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
301
302	if (wptr) {
303		/* Don't read wptr with get_user because the user
304		 * context may not be accessible (if this function
305		 * runs in a work queue). Instead trigger a one-shot
306		 * polling read from memory in the CP. This assumes
307		 * that wptr is GPU-accessible in the queue's VMID via
308		 * ATC or SVM. WPTR==RPTR before starting the poll so
309		 * the CP starts fetching new commands from the right
310		 * place.
311		 *
312		 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
313		 * tricky. Assume that the queue didn't overflow. The
314		 * number of valid bits in the 32-bit RPTR depends on
315		 * the queue size. The remaining bits are taken from
316		 * the saved 64-bit WPTR. If the WPTR wrapped, add the
317		 * queue size.
318		 */
319		uint32_t queue_size =
320			2 << REG_GET_FIELD(m->cp_hqd_pq_control,
321					   CP_HQD_PQ_CONTROL, QUEUE_SIZE);
322		uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
323
324		if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
325			guessed_wptr += queue_size;
326		guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
327		guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
328
329		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
330		       lower_32_bits(guessed_wptr));
331		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
332		       upper_32_bits(guessed_wptr));
333		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
334		       lower_32_bits((uintptr_t)wptr));
335		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
336		       upper_32_bits((uintptr_t)wptr));
337		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
338		       get_queue_mask(adev, pipe_id, queue_id));
339	}
340
341	/* Start the EOP fetcher */
342	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
343	       REG_SET_FIELD(m->cp_hqd_eop_rptr,
344			     CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
345
346	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
347	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
348
349	release_queue(kgd);
350
351	return 0;
352}
353
354int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
355			uint32_t pipe_id, uint32_t queue_id,
356			uint32_t (**dump)[2], uint32_t *n_regs)
357{
358	struct amdgpu_device *adev = get_amdgpu_device(kgd);
359	uint32_t i = 0, reg;
360#define HQD_N_REGS 56
361#define DUMP_REG(addr) do {				\
362		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
363			break;				\
364		(*dump)[i][0] = (addr) << 2;		\
365		(*dump)[i++][1] = RREG32(addr);		\
366	} while (0)
367
368	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
369	if (*dump == NULL)
370		return -ENOMEM;
371
372	acquire_queue(kgd, pipe_id, queue_id);
373
374	for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
375	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
376		DUMP_REG(reg);
377
378	release_queue(kgd);
379
380	WARN_ON_ONCE(i != HQD_N_REGS);
381	*n_regs = i;
382
383	return 0;
384}
385
386static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
387			     uint32_t __user *wptr, struct mm_struct *mm)
388{
389	struct amdgpu_device *adev = get_amdgpu_device(kgd);
390	struct v9_sdma_mqd *m;
391	uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
392	unsigned long end_jiffies;
393	uint32_t data;
394	uint64_t data64;
395	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
396
397	m = get_sdma_mqd(mqd);
398	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
399					    m->sdma_queue_id);
400	sdmax_gfx_context_cntl = m->sdma_engine_id ?
401		SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
402		SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
403
404	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
405		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
406
407	end_jiffies = msecs_to_jiffies(2000) + jiffies;
408	while (true) {
409		data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
410		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
411			break;
412		if (time_after(jiffies, end_jiffies))
413			return -ETIME;
414		usleep_range(500, 1000);
415	}
416	data = RREG32(sdmax_gfx_context_cntl);
417	data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
418			     RESUME_CTX, 0);
419	WREG32(sdmax_gfx_context_cntl, data);
420
421	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
422	       m->sdmax_rlcx_doorbell_offset);
423
424	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
425			     ENABLE, 1);
426	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
427	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
428	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
429				m->sdmax_rlcx_rb_rptr_hi);
430
431	WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
432	if (read_user_wptr(mm, wptr64, data64)) {
433		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
434		       lower_32_bits(data64));
435		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
436		       upper_32_bits(data64));
437	} else {
438		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
439		       m->sdmax_rlcx_rb_rptr);
440		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
441		       m->sdmax_rlcx_rb_rptr_hi);
442	}
443	WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
444
445	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
446	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
447			m->sdmax_rlcx_rb_base_hi);
448	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
449			m->sdmax_rlcx_rb_rptr_addr_lo);
450	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
451			m->sdmax_rlcx_rb_rptr_addr_hi);
452
453	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
454			     RB_ENABLE, 1);
455	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
456
457	return 0;
458}
459
460static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
461			     uint32_t engine_id, uint32_t queue_id,
462			     uint32_t (**dump)[2], uint32_t *n_regs)
463{
464	struct amdgpu_device *adev = get_amdgpu_device(kgd);
465	uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
466	uint32_t i = 0, reg;
467#undef HQD_N_REGS
468#define HQD_N_REGS (19+6+7+10)
469
470	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
471	if (*dump == NULL)
472		return -ENOMEM;
473
474	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
475		DUMP_REG(sdma_base_addr + reg);
476	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
477		DUMP_REG(sdma_base_addr + reg);
478	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
479	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
480		DUMP_REG(sdma_base_addr + reg);
481	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
482	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
483		DUMP_REG(sdma_base_addr + reg);
484
485	WARN_ON_ONCE(i != HQD_N_REGS);
486	*n_regs = i;
487
488	return 0;
489}
490
491bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
492				uint32_t pipe_id, uint32_t queue_id)
493{
494	struct amdgpu_device *adev = get_amdgpu_device(kgd);
495	uint32_t act;
496	bool retval = false;
497	uint32_t low, high;
498
499	acquire_queue(kgd, pipe_id, queue_id);
500	act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
501	if (act) {
502		low = lower_32_bits(queue_address >> 8);
503		high = upper_32_bits(queue_address >> 8);
504
505		if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
506		   high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
507			retval = true;
508	}
509	release_queue(kgd);
510	return retval;
511}
512
513static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
514{
515	struct amdgpu_device *adev = get_amdgpu_device(kgd);
516	struct v9_sdma_mqd *m;
517	uint32_t sdma_base_addr;
518	uint32_t sdma_rlc_rb_cntl;
519
520	m = get_sdma_mqd(mqd);
521	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
522					    m->sdma_queue_id);
523
524	sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
525
526	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
527		return true;
528
529	return false;
530}
531
532int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
533				enum kfd_preempt_type reset_type,
534				unsigned int utimeout, uint32_t pipe_id,
535				uint32_t queue_id)
536{
537	struct amdgpu_device *adev = get_amdgpu_device(kgd);
538	enum hqd_dequeue_request_type type;
539	unsigned long end_jiffies;
540	uint32_t temp;
541	struct v9_mqd *m = get_mqd(mqd);
542
543	if (adev->in_gpu_reset)
544		return -EIO;
545
546	acquire_queue(kgd, pipe_id, queue_id);
547
548	if (m->cp_hqd_vmid == 0)
549		WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
550
551	switch (reset_type) {
552	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
553		type = DRAIN_PIPE;
554		break;
555	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
556		type = RESET_WAVES;
557		break;
558	default:
559		type = DRAIN_PIPE;
560		break;
561	}
562
563	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
564
565	end_jiffies = (utimeout * HZ / 1000) + jiffies;
566	while (true) {
567		temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
568		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
569			break;
570		if (time_after(jiffies, end_jiffies)) {
571			pr_err("cp queue preemption time out.\n");
572			release_queue(kgd);
573			return -ETIME;
574		}
575		usleep_range(500, 1000);
576	}
577
578	release_queue(kgd);
579	return 0;
580}
581
582static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
583				unsigned int utimeout)
584{
585	struct amdgpu_device *adev = get_amdgpu_device(kgd);
586	struct v9_sdma_mqd *m;
587	uint32_t sdma_base_addr;
588	uint32_t temp;
589	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
590
591	m = get_sdma_mqd(mqd);
592	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
593					    m->sdma_queue_id);
594
595	temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
596	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
597	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
598
599	while (true) {
600		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
601		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
602			break;
603		if (time_after(jiffies, end_jiffies))
604			return -ETIME;
605		usleep_range(500, 1000);
606	}
607
608	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
609	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
610		RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
611		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
612
613	m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
614	m->sdmax_rlcx_rb_rptr_hi =
615		RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
616
617	return 0;
618}
619
620bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
621							uint8_t vmid)
622{
623	uint32_t reg;
624	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
625
626	reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
627		     + vmid);
628	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
629}
630
631uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
632								uint8_t vmid)
633{
634	uint32_t reg;
635	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
636
637	reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
638		     + vmid);
639	return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
640}
641
642static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
643			uint32_t flush_type)
644{
645	signed long r;
646	uint32_t seq;
647	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
648
649	spin_lock(&adev->gfx.kiq.ring_lock);
650	amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
651	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
652	amdgpu_ring_write(ring,
653			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
654			PACKET3_INVALIDATE_TLBS_ALL_HUB(1) |
655			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
656			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
657	amdgpu_fence_emit_polling(ring, &seq);
658	amdgpu_ring_commit(ring);
659	spin_unlock(&adev->gfx.kiq.ring_lock);
660
661	r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
662	if (r < 1) {
663		DRM_ERROR("wait for kiq fence error: %ld.\n", r);
664		return -ETIME;
665	}
666
667	return 0;
668}
669
670int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
671{
672	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
673	int vmid, i;
674	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
675	uint32_t flush_type = 0;
676
677	if (adev->in_gpu_reset)
678		return -EIO;
679	if (adev->gmc.xgmi.num_physical_nodes &&
680		adev->asic_type == CHIP_VEGA20)
681		flush_type = 2;
682
683	if (ring->sched.ready)
684		return invalidate_tlbs_with_kiq(adev, pasid, flush_type);
685
686	for (vmid = 0; vmid < 16; vmid++) {
687		if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
688			continue;
689		if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
690			if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
691				== pasid) {
692				for (i = 0; i < adev->num_vmhubs; i++)
693					amdgpu_gmc_flush_gpu_tlb(adev, vmid,
694								i, flush_type);
695				break;
696			}
697		}
698	}
699
700	return 0;
701}
702
703int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
704{
705	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
706	int i;
707
708	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
709		pr_err("non kfd vmid %d\n", vmid);
710		return 0;
711	}
712
713	/* Use legacy mode tlb invalidation.
714	 *
715	 * Currently on Raven the code below is broken for anything but
716	 * legacy mode due to a MMHUB power gating problem. A workaround
717	 * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ
718	 * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack
719	 * bit.
720	 *
721	 * TODO 1: agree on the right set of invalidation registers for
722	 * KFD use. Use the last one for now. Invalidate both GC and
723	 * MMHUB.
724	 *
725	 * TODO 2: support range-based invalidation, requires kfg2kgd
726	 * interface change
727	 */
728	for (i = 0; i < adev->num_vmhubs; i++)
729		amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
730
731	return 0;
732}
733
734int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd)
735{
736	return 0;
737}
738
739int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd,
740					unsigned int watch_point_id,
741					uint32_t cntl_val,
742					uint32_t addr_hi,
743					uint32_t addr_lo)
744{
745	return 0;
746}
747
748int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd,
749					uint32_t gfx_index_val,
750					uint32_t sq_cmd)
751{
752	struct amdgpu_device *adev = get_amdgpu_device(kgd);
753	uint32_t data = 0;
754
755	mutex_lock(&adev->grbm_idx_mutex);
756
757	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
758	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
759
760	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
761		INSTANCE_BROADCAST_WRITES, 1);
762	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
763		SH_BROADCAST_WRITES, 1);
764	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
765		SE_BROADCAST_WRITES, 1);
766
767	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
768	mutex_unlock(&adev->grbm_idx_mutex);
769
770	return 0;
771}
772
773uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
774					unsigned int watch_point_id,
775					unsigned int reg_offset)
776{
777	return 0;
778}
779
780void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd,
781					uint64_t va, uint32_t vmid)
782{
783	/* No longer needed on GFXv9. The scratch base address is
784	 * passed to the shader by the CP. It's the user mode driver's
785	 * responsibility.
786	 */
787}
788
789void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
790		uint64_t page_table_base)
791{
792	struct amdgpu_device *adev = get_amdgpu_device(kgd);
793
794	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
795		pr_err("trying to set page table base for wrong VMID %u\n",
796		       vmid);
797		return;
798	}
799
800	/* TODO: take advantage of per-process address space size. For
801	 * now, all processes share the same address space size, like
802	 * on GFX8 and older.
803	 */
804	if (adev->asic_type == CHIP_ARCTURUS) {
805		/* Two MMHUBs */
806		mmhub_v9_4_setup_vm_pt_regs(adev, 0, vmid, page_table_base);
807		mmhub_v9_4_setup_vm_pt_regs(adev, 1, vmid, page_table_base);
808	} else
809		mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
810
811	gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
812}
813
814static const struct kfd2kgd_calls kfd2kgd = {
815	.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
816	.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
817	.init_interrupts = kgd_gfx_v9_init_interrupts,
818	.hqd_load = kgd_gfx_v9_hqd_load,
819	.hqd_sdma_load = kgd_hqd_sdma_load,
820	.hqd_dump = kgd_gfx_v9_hqd_dump,
821	.hqd_sdma_dump = kgd_hqd_sdma_dump,
822	.hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
823	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
824	.hqd_destroy = kgd_gfx_v9_hqd_destroy,
825	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
826	.address_watch_disable = kgd_gfx_v9_address_watch_disable,
827	.address_watch_execute = kgd_gfx_v9_address_watch_execute,
828	.wave_control_execute = kgd_gfx_v9_wave_control_execute,
829	.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
830	.get_atc_vmid_pasid_mapping_pasid =
831			kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid,
832	.get_atc_vmid_pasid_mapping_valid =
833			kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid,
834	.set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va,
835	.get_tile_config = kgd_gfx_v9_get_tile_config,
836	.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
837	.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
838	.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
839	.get_hive_id = amdgpu_amdkfd_get_hive_id,
840};
841
842struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
843{
844	return (struct kfd2kgd_calls *)&kfd2kgd;
845}