Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22#include <linux/module.h>
 
 23#include <linux/uaccess.h>
 24#include <linux/firmware.h>
 25#include "amdgpu.h"
 26#include "amdgpu_amdkfd.h"
 27#include "amdgpu_amdkfd_arcturus.h"
 28#include "amdgpu_reset.h"
 29#include "sdma0/sdma0_4_2_2_offset.h"
 30#include "sdma0/sdma0_4_2_2_sh_mask.h"
 31#include "sdma1/sdma1_4_2_2_offset.h"
 32#include "sdma1/sdma1_4_2_2_sh_mask.h"
 33#include "sdma2/sdma2_4_2_2_offset.h"
 34#include "sdma2/sdma2_4_2_2_sh_mask.h"
 35#include "sdma3/sdma3_4_2_2_offset.h"
 36#include "sdma3/sdma3_4_2_2_sh_mask.h"
 37#include "sdma4/sdma4_4_2_2_offset.h"
 38#include "sdma4/sdma4_4_2_2_sh_mask.h"
 39#include "sdma5/sdma5_4_2_2_offset.h"
 40#include "sdma5/sdma5_4_2_2_sh_mask.h"
 41#include "sdma6/sdma6_4_2_2_offset.h"
 42#include "sdma6/sdma6_4_2_2_sh_mask.h"
 43#include "sdma7/sdma7_4_2_2_offset.h"
 44#include "sdma7/sdma7_4_2_2_sh_mask.h"
 45#include "v9_structs.h"
 46#include "soc15.h"
 47#include "soc15d.h"
 48#include "amdgpu_amdkfd_gfx_v9.h"
 49#include "gfxhub_v1_0.h"
 50#include "mmhub_v9_4.h"
 51#include "gc/gc_9_0_offset.h"
 52#include "gc/gc_9_0_sh_mask.h"
 53
 54#define HQD_N_REGS 56
 55#define DUMP_REG(addr) do {				\
 56		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
 57			break;				\
 58		(*dump)[i][0] = (addr) << 2;		\
 59		(*dump)[i++][1] = RREG32(addr);		\
 60	} while (0)
 61
 
 
 
 
 
 62static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
 63{
 64	return (struct v9_sdma_mqd *)mqd;
 65}
 66
 67static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
 68				unsigned int engine_id,
 69				unsigned int queue_id)
 70{
 71	uint32_t sdma_engine_reg_base = 0;
 72	uint32_t sdma_rlc_reg_offset;
 73
 74	switch (engine_id) {
 75	default:
 76		dev_warn(adev->dev,
 77			 "Invalid sdma engine id (%d), using engine id 0\n",
 78			 engine_id);
 79		fallthrough;
 80	case 0:
 81		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
 82				mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
 83		break;
 84	case 1:
 85		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
 86				mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL;
 87		break;
 88	case 2:
 89		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
 90				mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
 91		break;
 92	case 3:
 93		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
 94				mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL;
 95		break;
 96	case 4:
 97		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA4, 0,
 98				mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL;
 99		break;
100	case 5:
101		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA5, 0,
102				mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL;
103		break;
104	case 6:
105		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA6, 0,
106				mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL;
107		break;
108	case 7:
109		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA7, 0,
110				mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL;
111		break;
112	}
113
114	sdma_rlc_reg_offset = sdma_engine_reg_base
115		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
116
117	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
118			queue_id, sdma_rlc_reg_offset);
119
120	return sdma_rlc_reg_offset;
121}
122
123int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
124			     uint32_t __user *wptr, struct mm_struct *mm)
125{
 
126	struct v9_sdma_mqd *m;
127	uint32_t sdma_rlc_reg_offset;
128	unsigned long end_jiffies;
129	uint32_t data;
130	uint64_t data64;
131	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
132
133	m = get_sdma_mqd(mqd);
134	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
135					    m->sdma_queue_id);
136
137	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
138		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
139
140	end_jiffies = msecs_to_jiffies(2000) + jiffies;
141	while (true) {
142		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
143		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
144			break;
145		if (time_after(jiffies, end_jiffies)) {
146			pr_err("SDMA RLC not idle in %s\n", __func__);
147			return -ETIME;
148		}
149		usleep_range(500, 1000);
150	}
151
152	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
153	       m->sdmax_rlcx_doorbell_offset);
154
155	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
156			     ENABLE, 1);
157	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
158	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
159				m->sdmax_rlcx_rb_rptr);
160	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
161				m->sdmax_rlcx_rb_rptr_hi);
162
163	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
164	if (read_user_wptr(mm, wptr64, data64)) {
165		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
166		       lower_32_bits(data64));
167		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
168		       upper_32_bits(data64));
169	} else {
170		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
171		       m->sdmax_rlcx_rb_rptr);
172		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
173		       m->sdmax_rlcx_rb_rptr_hi);
174	}
175	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
176
177	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
178	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
179			m->sdmax_rlcx_rb_base_hi);
180	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
181			m->sdmax_rlcx_rb_rptr_addr_lo);
182	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
183			m->sdmax_rlcx_rb_rptr_addr_hi);
184
185	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
186			     RB_ENABLE, 1);
187	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
188
189	return 0;
190}
191
192int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev,
193			     uint32_t engine_id, uint32_t queue_id,
194			     uint32_t (**dump)[2], uint32_t *n_regs)
195{
 
196	uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
197			engine_id, queue_id);
198	uint32_t i = 0, reg;
199#undef HQD_N_REGS
200#define HQD_N_REGS (19+6+7+10)
201
202	*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
203	if (*dump == NULL)
204		return -ENOMEM;
205
206	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
207		DUMP_REG(sdma_rlc_reg_offset + reg);
208	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
209		DUMP_REG(sdma_rlc_reg_offset + reg);
210	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
211	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
212		DUMP_REG(sdma_rlc_reg_offset + reg);
213	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
214	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
215		DUMP_REG(sdma_rlc_reg_offset + reg);
216
217	WARN_ON_ONCE(i != HQD_N_REGS);
218	*n_regs = i;
219
220	return 0;
221}
222
223bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev,
224				void *mqd)
225{
 
226	struct v9_sdma_mqd *m;
227	uint32_t sdma_rlc_reg_offset;
228	uint32_t sdma_rlc_rb_cntl;
229
230	m = get_sdma_mqd(mqd);
231	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
232					    m->sdma_queue_id);
233
234	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
235
236	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
237		return true;
238
239	return false;
240}
241
242int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
243				unsigned int utimeout)
244{
 
245	struct v9_sdma_mqd *m;
246	uint32_t sdma_rlc_reg_offset;
247	uint32_t temp;
248	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
249
250	m = get_sdma_mqd(mqd);
251	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
252					    m->sdma_queue_id);
253
254	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
255	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
256	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
257
258	while (true) {
259		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
260		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
261			break;
262		if (time_after(jiffies, end_jiffies)) {
263			pr_err("SDMA RLC not idle in %s\n", __func__);
264			return -ETIME;
265		}
266		usleep_range(500, 1000);
267	}
268
269	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
270	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
271		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
272		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
273
274	m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
275	m->sdmax_rlcx_rb_rptr_hi =
276		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
277
278	return 0;
279}
280
281/*
282 * Helper used to suspend/resume gfx pipe for image post process work to set
283 * barrier behaviour.
284 */
285static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool suspend)
286{
287	int i, r = 0;
288
289	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
290		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
291
292		if (!amdgpu_ring_sched_ready(ring))
293			continue;
294
295		/* stop secheduler and drain ring. */
296		if (suspend) {
297			drm_sched_stop(&ring->sched, NULL);
298			r = amdgpu_fence_wait_empty(ring);
299			if (r)
300				goto out;
301		} else {
302			drm_sched_start(&ring->sched, 0);
303		}
304	}
305
306out:
307	/* return on resume or failure to drain rings. */
308	if (!suspend || r)
309		return r;
310
311	return amdgpu_device_ip_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GFX);
312}
313
314static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_waitcnt)
315{
316	uint32_t data;
317
318	WRITE_ONCE(adev->barrier_has_auto_waitcnt, enable_waitcnt);
319
320	if (!down_read_trylock(&adev->reset_domain->sem))
321		return;
322
323	amdgpu_amdkfd_suspend(adev, false);
324
325	if (suspend_resume_compute_scheduler(adev, true))
326		goto out;
327
328	data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CONFIG));
329	data = REG_SET_FIELD(data, SQ_CONFIG, DISABLE_BARRIER_WAITCNT,
330						!enable_waitcnt);
331	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CONFIG), data);
332
333out:
334	suspend_resume_compute_scheduler(adev, false);
335
336	amdgpu_amdkfd_resume(adev, false);
337
338	up_read(&adev->reset_domain->sem);
339}
340
341/*
342 * restore_dbg_registers is ignored here but is a general interface requirement
343 * for devices that support GFXOFF and where the RLC save/restore list
344 * does not support hw registers for debugging i.e. the driver has to manually
345 * initialize the debug mode registers after it has disabled GFX off during the
346 * debug session.
347 */
348static uint32_t kgd_arcturus_enable_debug_trap(struct amdgpu_device *adev,
349				bool restore_dbg_registers,
350				uint32_t vmid)
351{
352	mutex_lock(&adev->grbm_idx_mutex);
353
354	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
355
356	set_barrier_auto_waitcnt(adev, true);
357
358	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
359
360	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
361
362	mutex_unlock(&adev->grbm_idx_mutex);
363
364	return 0;
365}
366
367/*
368 * keep_trap_enabled is ignored here but is a general interface requirement
369 * for devices that support multi-process debugging where the performance
370 * overhead from trap temporary setup needs to be bypassed when the debug
371 * session has ended.
372 */
373static uint32_t kgd_arcturus_disable_debug_trap(struct amdgpu_device *adev,
374					bool keep_trap_enabled,
375					uint32_t vmid)
376{
377
378	mutex_lock(&adev->grbm_idx_mutex);
379
380	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
381
382	set_barrier_auto_waitcnt(adev, false);
383
384	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
385
386	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
387
388	mutex_unlock(&adev->grbm_idx_mutex);
389
390	return 0;
391}
392const struct kfd2kgd_calls arcturus_kfd2kgd = {
393	.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
394	.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
395	.init_interrupts = kgd_gfx_v9_init_interrupts,
396	.hqd_load = kgd_gfx_v9_hqd_load,
397	.hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
398	.hqd_sdma_load = kgd_arcturus_hqd_sdma_load,
399	.hqd_dump = kgd_gfx_v9_hqd_dump,
400	.hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump,
401	.hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
402	.hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied,
403	.hqd_destroy = kgd_gfx_v9_hqd_destroy,
404	.hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy,
 
 
405	.wave_control_execute = kgd_gfx_v9_wave_control_execute,
 
406	.get_atc_vmid_pasid_mapping_info =
407				kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
408	.set_vm_context_page_table_base =
409				kgd_gfx_v9_set_vm_context_page_table_base,
410	.enable_debug_trap = kgd_arcturus_enable_debug_trap,
411	.disable_debug_trap = kgd_arcturus_disable_debug_trap,
412	.validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request,
413	.set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override,
414	.set_wave_launch_mode = kgd_gfx_v9_set_wave_launch_mode,
415	.set_address_watch = kgd_gfx_v9_set_address_watch,
416	.clear_address_watch = kgd_gfx_v9_clear_address_watch,
417	.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
418	.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
419	.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
420	.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
421	.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
422	.hqd_reset = kgd_gfx_v9_hqd_reset
423};
v5.14.15
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22#include <linux/module.h>
 23#include <linux/fdtable.h>
 24#include <linux/uaccess.h>
 25#include <linux/firmware.h>
 26#include "amdgpu.h"
 27#include "amdgpu_amdkfd.h"
 28#include "amdgpu_amdkfd_arcturus.h"
 
 29#include "sdma0/sdma0_4_2_2_offset.h"
 30#include "sdma0/sdma0_4_2_2_sh_mask.h"
 31#include "sdma1/sdma1_4_2_2_offset.h"
 32#include "sdma1/sdma1_4_2_2_sh_mask.h"
 33#include "sdma2/sdma2_4_2_2_offset.h"
 34#include "sdma2/sdma2_4_2_2_sh_mask.h"
 35#include "sdma3/sdma3_4_2_2_offset.h"
 36#include "sdma3/sdma3_4_2_2_sh_mask.h"
 37#include "sdma4/sdma4_4_2_2_offset.h"
 38#include "sdma4/sdma4_4_2_2_sh_mask.h"
 39#include "sdma5/sdma5_4_2_2_offset.h"
 40#include "sdma5/sdma5_4_2_2_sh_mask.h"
 41#include "sdma6/sdma6_4_2_2_offset.h"
 42#include "sdma6/sdma6_4_2_2_sh_mask.h"
 43#include "sdma7/sdma7_4_2_2_offset.h"
 44#include "sdma7/sdma7_4_2_2_sh_mask.h"
 45#include "v9_structs.h"
 46#include "soc15.h"
 47#include "soc15d.h"
 48#include "amdgpu_amdkfd_gfx_v9.h"
 49#include "gfxhub_v1_0.h"
 50#include "mmhub_v9_4.h"
 
 
 51
 52#define HQD_N_REGS 56
 53#define DUMP_REG(addr) do {				\
 54		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
 55			break;				\
 56		(*dump)[i][0] = (addr) << 2;		\
 57		(*dump)[i++][1] = RREG32(addr);		\
 58	} while (0)
 59
 60static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
 61{
 62	return (struct amdgpu_device *)kgd;
 63}
 64
 65static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
 66{
 67	return (struct v9_sdma_mqd *)mqd;
 68}
 69
 70static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
 71				unsigned int engine_id,
 72				unsigned int queue_id)
 73{
 74	uint32_t sdma_engine_reg_base = 0;
 75	uint32_t sdma_rlc_reg_offset;
 76
 77	switch (engine_id) {
 78	default:
 79		dev_warn(adev->dev,
 80			 "Invalid sdma engine id (%d), using engine id 0\n",
 81			 engine_id);
 82		fallthrough;
 83	case 0:
 84		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
 85				mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
 86		break;
 87	case 1:
 88		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
 89				mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL;
 90		break;
 91	case 2:
 92		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
 93				mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
 94		break;
 95	case 3:
 96		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
 97				mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL;
 98		break;
 99	case 4:
100		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA4, 0,
101				mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL;
102		break;
103	case 5:
104		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA5, 0,
105				mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL;
106		break;
107	case 6:
108		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA6, 0,
109				mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL;
110		break;
111	case 7:
112		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA7, 0,
113				mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL;
114		break;
115	}
116
117	sdma_rlc_reg_offset = sdma_engine_reg_base
118		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
119
120	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
121			queue_id, sdma_rlc_reg_offset);
122
123	return sdma_rlc_reg_offset;
124}
125
126int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
127			     uint32_t __user *wptr, struct mm_struct *mm)
128{
129	struct amdgpu_device *adev = get_amdgpu_device(kgd);
130	struct v9_sdma_mqd *m;
131	uint32_t sdma_rlc_reg_offset;
132	unsigned long end_jiffies;
133	uint32_t data;
134	uint64_t data64;
135	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
136
137	m = get_sdma_mqd(mqd);
138	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
139					    m->sdma_queue_id);
140
141	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
142		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
143
144	end_jiffies = msecs_to_jiffies(2000) + jiffies;
145	while (true) {
146		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
147		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
148			break;
149		if (time_after(jiffies, end_jiffies)) {
150			pr_err("SDMA RLC not idle in %s\n", __func__);
151			return -ETIME;
152		}
153		usleep_range(500, 1000);
154	}
155
156	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
157	       m->sdmax_rlcx_doorbell_offset);
158
159	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
160			     ENABLE, 1);
161	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
162	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
163				m->sdmax_rlcx_rb_rptr);
164	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
165				m->sdmax_rlcx_rb_rptr_hi);
166
167	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
168	if (read_user_wptr(mm, wptr64, data64)) {
169		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
170		       lower_32_bits(data64));
171		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
172		       upper_32_bits(data64));
173	} else {
174		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
175		       m->sdmax_rlcx_rb_rptr);
176		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
177		       m->sdmax_rlcx_rb_rptr_hi);
178	}
179	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
180
181	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
182	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
183			m->sdmax_rlcx_rb_base_hi);
184	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
185			m->sdmax_rlcx_rb_rptr_addr_lo);
186	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
187			m->sdmax_rlcx_rb_rptr_addr_hi);
188
189	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
190			     RB_ENABLE, 1);
191	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
192
193	return 0;
194}
195
196int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd,
197			     uint32_t engine_id, uint32_t queue_id,
198			     uint32_t (**dump)[2], uint32_t *n_regs)
199{
200	struct amdgpu_device *adev = get_amdgpu_device(kgd);
201	uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
202			engine_id, queue_id);
203	uint32_t i = 0, reg;
204#undef HQD_N_REGS
205#define HQD_N_REGS (19+6+7+10)
206
207	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
208	if (*dump == NULL)
209		return -ENOMEM;
210
211	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
212		DUMP_REG(sdma_rlc_reg_offset + reg);
213	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
214		DUMP_REG(sdma_rlc_reg_offset + reg);
215	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
216	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
217		DUMP_REG(sdma_rlc_reg_offset + reg);
218	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
219	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
220		DUMP_REG(sdma_rlc_reg_offset + reg);
221
222	WARN_ON_ONCE(i != HQD_N_REGS);
223	*n_regs = i;
224
225	return 0;
226}
227
228bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
 
229{
230	struct amdgpu_device *adev = get_amdgpu_device(kgd);
231	struct v9_sdma_mqd *m;
232	uint32_t sdma_rlc_reg_offset;
233	uint32_t sdma_rlc_rb_cntl;
234
235	m = get_sdma_mqd(mqd);
236	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
237					    m->sdma_queue_id);
238
239	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
240
241	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
242		return true;
243
244	return false;
245}
246
247int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
248				unsigned int utimeout)
249{
250	struct amdgpu_device *adev = get_amdgpu_device(kgd);
251	struct v9_sdma_mqd *m;
252	uint32_t sdma_rlc_reg_offset;
253	uint32_t temp;
254	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
255
256	m = get_sdma_mqd(mqd);
257	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
258					    m->sdma_queue_id);
259
260	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
261	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
262	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
263
264	while (true) {
265		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
266		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
267			break;
268		if (time_after(jiffies, end_jiffies)) {
269			pr_err("SDMA RLC not idle in %s\n", __func__);
270			return -ETIME;
271		}
272		usleep_range(500, 1000);
273	}
274
275	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
276	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
277		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
278		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
279
280	m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
281	m->sdmax_rlcx_rb_rptr_hi =
282		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
283
284	return 0;
285}
286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287const struct kfd2kgd_calls arcturus_kfd2kgd = {
288	.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
289	.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
290	.init_interrupts = kgd_gfx_v9_init_interrupts,
291	.hqd_load = kgd_gfx_v9_hqd_load,
292	.hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
293	.hqd_sdma_load = kgd_arcturus_hqd_sdma_load,
294	.hqd_dump = kgd_gfx_v9_hqd_dump,
295	.hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump,
296	.hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
297	.hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied,
298	.hqd_destroy = kgd_gfx_v9_hqd_destroy,
299	.hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy,
300	.address_watch_disable = kgd_gfx_v9_address_watch_disable,
301	.address_watch_execute = kgd_gfx_v9_address_watch_execute,
302	.wave_control_execute = kgd_gfx_v9_wave_control_execute,
303	.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
304	.get_atc_vmid_pasid_mapping_info =
305				kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
306	.set_vm_context_page_table_base =
307				kgd_gfx_v9_set_vm_context_page_table_base,
308	.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy
 
 
 
 
 
 
 
 
 
 
 
 
309};