Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22#include "amdgpu.h"
 23#include "amdgpu_amdkfd.h"
 24#include "gc/gc_9_0_offset.h"
 25#include "gc/gc_9_0_sh_mask.h"
 26#include "vega10_enum.h"
 27#include "sdma0/sdma0_4_0_offset.h"
 28#include "sdma0/sdma0_4_0_sh_mask.h"
 29#include "sdma1/sdma1_4_0_offset.h"
 30#include "sdma1/sdma1_4_0_sh_mask.h"
 31#include "athub/athub_1_0_offset.h"
 32#include "athub/athub_1_0_sh_mask.h"
 33#include "oss/osssys_4_0_offset.h"
 34#include "oss/osssys_4_0_sh_mask.h"
 35#include "soc15_common.h"
 36#include "v9_structs.h"
 37#include "soc15.h"
 38#include "soc15d.h"
 39#include "mmhub_v1_0.h"
 40#include "gfxhub_v1_0.h"
 41
 42
 43enum hqd_dequeue_request_type {
 44	NO_ACTION = 0,
 45	DRAIN_PIPE,
 46	RESET_WAVES
 47};
 48
 49static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
 50{
 51	return (struct amdgpu_device *)kgd;
 52}
 53
 54static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
 55			uint32_t queue, uint32_t vmid)
 56{
 57	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 58
 59	mutex_lock(&adev->srbm_mutex);
 60	soc15_grbm_select(adev, mec, pipe, queue, vmid);
 61}
 62
 63static void unlock_srbm(struct kgd_dev *kgd)
 64{
 65	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 66
 67	soc15_grbm_select(adev, 0, 0, 0, 0);
 68	mutex_unlock(&adev->srbm_mutex);
 69}
 70
 71static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
 72				uint32_t queue_id)
 73{
 74	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 75
 76	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
 77	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
 78
 79	lock_srbm(kgd, mec, pipe, queue_id, 0);
 80}
 81
 82static uint64_t get_queue_mask(struct amdgpu_device *adev,
 83			       uint32_t pipe_id, uint32_t queue_id)
 84{
 85	unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
 86			queue_id;
 87
 88	return 1ull << bit;
 89}
 90
 91static void release_queue(struct kgd_dev *kgd)
 92{
 93	unlock_srbm(kgd);
 94}
 95
 96void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
 97					uint32_t sh_mem_config,
 98					uint32_t sh_mem_ape1_base,
 99					uint32_t sh_mem_ape1_limit,
100					uint32_t sh_mem_bases)
101{
102	struct amdgpu_device *adev = get_amdgpu_device(kgd);
103
104	lock_srbm(kgd, 0, 0, 0, vmid);
105
106	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
107	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
108	/* APE1 no longer exists on GFX9 */
109
110	unlock_srbm(kgd);
111}
112
113int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
114					unsigned int vmid)
115{
116	struct amdgpu_device *adev = get_amdgpu_device(kgd);
117
118	/*
119	 * We have to assume that there is no outstanding mapping.
120	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
121	 * a mapping is in progress or because a mapping finished
122	 * and the SW cleared it.
123	 * So the protocol is to always wait & clear.
124	 */
125	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
126			ATC_VMID0_PASID_MAPPING__VALID_MASK;
127
128	/*
129	 * need to do this twice, once for gfx and once for mmhub
130	 * for ATC add 16 to VMID for mmhub, for IH different registers.
131	 * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
132	 */
133
134	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
135	       pasid_mapping);
136
137	while (!(RREG32(SOC15_REG_OFFSET(
138				ATHUB, 0,
139				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
140		 (1U << vmid)))
141		cpu_relax();
142
143	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
144				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
145	       1U << vmid);
146
147	/* Mapping vmid to pasid also for IH block */
148	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
149	       pasid_mapping);
150
151	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID16_PASID_MAPPING) + vmid,
152	       pasid_mapping);
153
154	while (!(RREG32(SOC15_REG_OFFSET(
155				ATHUB, 0,
156				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
157		 (1U << (vmid + 16))))
158		cpu_relax();
159
160	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
161				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
162	       1U << (vmid + 16));
163
164	/* Mapping vmid to pasid also for IH block */
165	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid,
166	       pasid_mapping);
167	return 0;
168}
169
170/* TODO - RING0 form of field is obsolete, seems to date back to SI
171 * but still works
172 */
173
174int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
175{
176	struct amdgpu_device *adev = get_amdgpu_device(kgd);
177	uint32_t mec;
178	uint32_t pipe;
179
180	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
181	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
182
183	lock_srbm(kgd, mec, pipe, 0, 0);
184
185	WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
186		CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
187		CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
188
189	unlock_srbm(kgd);
190
191	return 0;
192}
193
194static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
195				unsigned int engine_id,
196				unsigned int queue_id)
197{
198	uint32_t sdma_engine_reg_base = 0;
199	uint32_t sdma_rlc_reg_offset;
200
201	switch (engine_id) {
202	default:
203		dev_warn(adev->dev,
204			 "Invalid sdma engine id (%d), using engine id 0\n",
205			 engine_id);
206		fallthrough;
207	case 0:
208		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
209				mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
210		break;
211	case 1:
212		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
213				mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
214		break;
215	}
216
217	sdma_rlc_reg_offset = sdma_engine_reg_base
218		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
219
220	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
221		 queue_id, sdma_rlc_reg_offset);
222
223	return sdma_rlc_reg_offset;
224}
225
226static inline struct v9_mqd *get_mqd(void *mqd)
227{
228	return (struct v9_mqd *)mqd;
229}
230
231static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
232{
233	return (struct v9_sdma_mqd *)mqd;
234}
235
236int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
237			uint32_t queue_id, uint32_t __user *wptr,
238			uint32_t wptr_shift, uint32_t wptr_mask,
239			struct mm_struct *mm)
240{
241	struct amdgpu_device *adev = get_amdgpu_device(kgd);
242	struct v9_mqd *m;
243	uint32_t *mqd_hqd;
244	uint32_t reg, hqd_base, data;
245
246	m = get_mqd(mqd);
247
248	acquire_queue(kgd, pipe_id, queue_id);
249
250	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
251	mqd_hqd = &m->cp_mqd_base_addr_lo;
252	hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
253
254	for (reg = hqd_base;
255	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
256		WREG32_RLC(reg, mqd_hqd[reg - hqd_base]);
257
258
259	/* Activate doorbell logic before triggering WPTR poll. */
260	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
261			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
262	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
263
264	if (wptr) {
265		/* Don't read wptr with get_user because the user
266		 * context may not be accessible (if this function
267		 * runs in a work queue). Instead trigger a one-shot
268		 * polling read from memory in the CP. This assumes
269		 * that wptr is GPU-accessible in the queue's VMID via
270		 * ATC or SVM. WPTR==RPTR before starting the poll so
271		 * the CP starts fetching new commands from the right
272		 * place.
273		 *
274		 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
275		 * tricky. Assume that the queue didn't overflow. The
276		 * number of valid bits in the 32-bit RPTR depends on
277		 * the queue size. The remaining bits are taken from
278		 * the saved 64-bit WPTR. If the WPTR wrapped, add the
279		 * queue size.
280		 */
281		uint32_t queue_size =
282			2 << REG_GET_FIELD(m->cp_hqd_pq_control,
283					   CP_HQD_PQ_CONTROL, QUEUE_SIZE);
284		uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
285
286		if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
287			guessed_wptr += queue_size;
288		guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
289		guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
290
291		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
292		       lower_32_bits(guessed_wptr));
293		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
294		       upper_32_bits(guessed_wptr));
295		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
296		       lower_32_bits((uintptr_t)wptr));
297		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
298		       upper_32_bits((uintptr_t)wptr));
299		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
300		       (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
301	}
302
303	/* Start the EOP fetcher */
304	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
305	       REG_SET_FIELD(m->cp_hqd_eop_rptr,
306			     CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
307
308	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
309	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
310
311	release_queue(kgd);
312
313	return 0;
314}
315
316int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
317			    uint32_t pipe_id, uint32_t queue_id,
318			    uint32_t doorbell_off)
319{
320	struct amdgpu_device *adev = get_amdgpu_device(kgd);
321	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
322	struct v9_mqd *m;
323	uint32_t mec, pipe;
324	int r;
325
326	m = get_mqd(mqd);
327
328	acquire_queue(kgd, pipe_id, queue_id);
329
330	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
331	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
332
333	pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
334		 mec, pipe, queue_id);
335
336	spin_lock(&adev->gfx.kiq.ring_lock);
337	r = amdgpu_ring_alloc(kiq_ring, 7);
338	if (r) {
339		pr_err("Failed to alloc KIQ (%d).\n", r);
340		goto out_unlock;
341	}
342
343	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
344	amdgpu_ring_write(kiq_ring,
345			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
346			  PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
347			  PACKET3_MAP_QUEUES_QUEUE(queue_id) |
348			  PACKET3_MAP_QUEUES_PIPE(pipe) |
349			  PACKET3_MAP_QUEUES_ME((mec - 1)) |
350			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
351			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
352			  PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
353			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
354	amdgpu_ring_write(kiq_ring,
355			  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
356	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
357	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
358	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
359	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
360	amdgpu_ring_commit(kiq_ring);
361
362out_unlock:
363	spin_unlock(&adev->gfx.kiq.ring_lock);
364	release_queue(kgd);
365
366	return r;
367}
368
369int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
370			uint32_t pipe_id, uint32_t queue_id,
371			uint32_t (**dump)[2], uint32_t *n_regs)
372{
373	struct amdgpu_device *adev = get_amdgpu_device(kgd);
374	uint32_t i = 0, reg;
375#define HQD_N_REGS 56
376#define DUMP_REG(addr) do {				\
377		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
378			break;				\
379		(*dump)[i][0] = (addr) << 2;		\
380		(*dump)[i++][1] = RREG32(addr);		\
381	} while (0)
382
383	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
384	if (*dump == NULL)
385		return -ENOMEM;
386
387	acquire_queue(kgd, pipe_id, queue_id);
388
389	for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
390	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
391		DUMP_REG(reg);
392
393	release_queue(kgd);
394
395	WARN_ON_ONCE(i != HQD_N_REGS);
396	*n_regs = i;
397
398	return 0;
399}
400
401static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
402			     uint32_t __user *wptr, struct mm_struct *mm)
403{
404	struct amdgpu_device *adev = get_amdgpu_device(kgd);
405	struct v9_sdma_mqd *m;
406	uint32_t sdma_rlc_reg_offset;
407	unsigned long end_jiffies;
408	uint32_t data;
409	uint64_t data64;
410	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
411
412	m = get_sdma_mqd(mqd);
413	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
414					    m->sdma_queue_id);
415
416	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
417		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
418
419	end_jiffies = msecs_to_jiffies(2000) + jiffies;
420	while (true) {
421		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
422		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
423			break;
424		if (time_after(jiffies, end_jiffies)) {
425			pr_err("SDMA RLC not idle in %s\n", __func__);
426			return -ETIME;
427		}
428		usleep_range(500, 1000);
429	}
430
431	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
432	       m->sdmax_rlcx_doorbell_offset);
433
434	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
435			     ENABLE, 1);
436	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
437	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
438				m->sdmax_rlcx_rb_rptr);
439	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
440				m->sdmax_rlcx_rb_rptr_hi);
441
442	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
443	if (read_user_wptr(mm, wptr64, data64)) {
444		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
445		       lower_32_bits(data64));
446		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
447		       upper_32_bits(data64));
448	} else {
449		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
450		       m->sdmax_rlcx_rb_rptr);
451		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
452		       m->sdmax_rlcx_rb_rptr_hi);
453	}
454	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
455
456	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
457	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
458			m->sdmax_rlcx_rb_base_hi);
459	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
460			m->sdmax_rlcx_rb_rptr_addr_lo);
461	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
462			m->sdmax_rlcx_rb_rptr_addr_hi);
463
464	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
465			     RB_ENABLE, 1);
466	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
467
468	return 0;
469}
470
471static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
472			     uint32_t engine_id, uint32_t queue_id,
473			     uint32_t (**dump)[2], uint32_t *n_regs)
474{
475	struct amdgpu_device *adev = get_amdgpu_device(kgd);
476	uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
477			engine_id, queue_id);
478	uint32_t i = 0, reg;
479#undef HQD_N_REGS
480#define HQD_N_REGS (19+6+7+10)
481
482	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
483	if (*dump == NULL)
484		return -ENOMEM;
485
486	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
487		DUMP_REG(sdma_rlc_reg_offset + reg);
488	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
489		DUMP_REG(sdma_rlc_reg_offset + reg);
490	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
491	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
492		DUMP_REG(sdma_rlc_reg_offset + reg);
493	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
494	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
495		DUMP_REG(sdma_rlc_reg_offset + reg);
496
497	WARN_ON_ONCE(i != HQD_N_REGS);
498	*n_regs = i;
499
500	return 0;
501}
502
503bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
504				uint32_t pipe_id, uint32_t queue_id)
505{
506	struct amdgpu_device *adev = get_amdgpu_device(kgd);
507	uint32_t act;
508	bool retval = false;
509	uint32_t low, high;
510
511	acquire_queue(kgd, pipe_id, queue_id);
512	act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
513	if (act) {
514		low = lower_32_bits(queue_address >> 8);
515		high = upper_32_bits(queue_address >> 8);
516
517		if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
518		   high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
519			retval = true;
520	}
521	release_queue(kgd);
522	return retval;
523}
524
525static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
526{
527	struct amdgpu_device *adev = get_amdgpu_device(kgd);
528	struct v9_sdma_mqd *m;
529	uint32_t sdma_rlc_reg_offset;
530	uint32_t sdma_rlc_rb_cntl;
531
532	m = get_sdma_mqd(mqd);
533	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
534					    m->sdma_queue_id);
535
536	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
537
538	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
539		return true;
540
541	return false;
542}
543
544int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
545				enum kfd_preempt_type reset_type,
546				unsigned int utimeout, uint32_t pipe_id,
547				uint32_t queue_id)
548{
549	struct amdgpu_device *adev = get_amdgpu_device(kgd);
550	enum hqd_dequeue_request_type type;
551	unsigned long end_jiffies;
552	uint32_t temp;
553	struct v9_mqd *m = get_mqd(mqd);
554
555	if (adev->in_gpu_reset)
556		return -EIO;
557
558	acquire_queue(kgd, pipe_id, queue_id);
559
560	if (m->cp_hqd_vmid == 0)
561		WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
562
563	switch (reset_type) {
564	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
565		type = DRAIN_PIPE;
566		break;
567	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
568		type = RESET_WAVES;
569		break;
570	default:
571		type = DRAIN_PIPE;
572		break;
573	}
574
575	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
576
577	end_jiffies = (utimeout * HZ / 1000) + jiffies;
578	while (true) {
579		temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
580		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
581			break;
582		if (time_after(jiffies, end_jiffies)) {
583			pr_err("cp queue preemption time out.\n");
584			release_queue(kgd);
585			return -ETIME;
586		}
587		usleep_range(500, 1000);
588	}
589
590	release_queue(kgd);
591	return 0;
592}
593
594static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
595				unsigned int utimeout)
596{
597	struct amdgpu_device *adev = get_amdgpu_device(kgd);
598	struct v9_sdma_mqd *m;
599	uint32_t sdma_rlc_reg_offset;
600	uint32_t temp;
601	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
602
603	m = get_sdma_mqd(mqd);
604	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
605					    m->sdma_queue_id);
606
607	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
608	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
609	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
610
611	while (true) {
612		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
613		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
614			break;
615		if (time_after(jiffies, end_jiffies)) {
616			pr_err("SDMA RLC not idle in %s\n", __func__);
617			return -ETIME;
618		}
619		usleep_range(500, 1000);
620	}
621
622	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
623	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
624		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
625		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
626
627	m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
628	m->sdmax_rlcx_rb_rptr_hi =
629		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
630
631	return 0;
632}
633
634bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
635					uint8_t vmid, uint16_t *p_pasid)
636{
637	uint32_t value;
638	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
639
640	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
641		     + vmid);
642	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
643
644	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
645}
646
647int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd)
648{
649	return 0;
650}
651
652int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd,
653					unsigned int watch_point_id,
654					uint32_t cntl_val,
655					uint32_t addr_hi,
656					uint32_t addr_lo)
657{
658	return 0;
659}
660
661int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd,
662					uint32_t gfx_index_val,
663					uint32_t sq_cmd)
664{
665	struct amdgpu_device *adev = get_amdgpu_device(kgd);
666	uint32_t data = 0;
667
668	mutex_lock(&adev->grbm_idx_mutex);
669
670	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
671	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
672
673	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
674		INSTANCE_BROADCAST_WRITES, 1);
675	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
676		SH_BROADCAST_WRITES, 1);
677	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
678		SE_BROADCAST_WRITES, 1);
679
680	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
681	mutex_unlock(&adev->grbm_idx_mutex);
682
683	return 0;
684}
685
686uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
687					unsigned int watch_point_id,
688					unsigned int reg_offset)
689{
690	return 0;
691}
692
693static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
694			uint32_t vmid, uint64_t page_table_base)
695{
696	struct amdgpu_device *adev = get_amdgpu_device(kgd);
697
698	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
699		pr_err("trying to set page table base for wrong VMID %u\n",
700		       vmid);
701		return;
702	}
703
704	mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
705
706	gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
707}
708
709const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
710	.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
711	.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
712	.init_interrupts = kgd_gfx_v9_init_interrupts,
713	.hqd_load = kgd_gfx_v9_hqd_load,
714	.hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
715	.hqd_sdma_load = kgd_hqd_sdma_load,
716	.hqd_dump = kgd_gfx_v9_hqd_dump,
717	.hqd_sdma_dump = kgd_hqd_sdma_dump,
718	.hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
719	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
720	.hqd_destroy = kgd_gfx_v9_hqd_destroy,
721	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
722	.address_watch_disable = kgd_gfx_v9_address_watch_disable,
723	.address_watch_execute = kgd_gfx_v9_address_watch_execute,
724	.wave_control_execute = kgd_gfx_v9_wave_control_execute,
725	.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
726	.get_atc_vmid_pasid_mapping_info =
727			kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
728	.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
729	.get_hive_id = amdgpu_amdkfd_get_hive_id,
730	.get_unique_id = amdgpu_amdkfd_get_unique_id,
731};