Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22
 23#include <linux/module.h>
 24#include <linux/fdtable.h>
 25#include <linux/uaccess.h>
 26#include <linux/firmware.h>
 27#include <drm/drmP.h>
 28#include "amdgpu.h"
 29#include "amdgpu_amdkfd.h"
 30#include "amdgpu_ucode.h"
 31#include "gfx_v8_0.h"
 32#include "gca/gfx_8_0_sh_mask.h"
 33#include "gca/gfx_8_0_d.h"
 34#include "gca/gfx_8_0_enum.h"
 35#include "oss/oss_3_0_sh_mask.h"
 36#include "oss/oss_3_0_d.h"
 37#include "gmc/gmc_8_1_sh_mask.h"
 38#include "gmc/gmc_8_1_d.h"
 39#include "vi_structs.h"
 40#include "vid.h"
 41
 42enum hqd_dequeue_request_type {
 43	NO_ACTION = 0,
 44	DRAIN_PIPE,
 45	RESET_WAVES
 46};
 47
 48struct vi_sdma_mqd;
 49
 50/*
 51 * Register access functions
 52 */
 53
 54static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
 55		uint32_t sh_mem_config,
 56		uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
 57		uint32_t sh_mem_bases);
 58static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
 59		unsigned int vmid);
 60static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
 61		uint32_t hpd_size, uint64_t hpd_gpu_addr);
 62static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
 63static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
 64			uint32_t queue_id, uint32_t __user *wptr,
 65			uint32_t wptr_shift, uint32_t wptr_mask,
 66			struct mm_struct *mm);
 67static int kgd_hqd_dump(struct kgd_dev *kgd,
 68			uint32_t pipe_id, uint32_t queue_id,
 69			uint32_t (**dump)[2], uint32_t *n_regs);
 70static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
 71			     uint32_t __user *wptr, struct mm_struct *mm);
 72static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
 73			     uint32_t engine_id, uint32_t queue_id,
 74			     uint32_t (**dump)[2], uint32_t *n_regs);
 75static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
 76		uint32_t pipe_id, uint32_t queue_id);
 77static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
 78static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
 79				enum kfd_preempt_type reset_type,
 80				unsigned int utimeout, uint32_t pipe_id,
 81				uint32_t queue_id);
 82static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
 83				unsigned int utimeout);
 84static int kgd_address_watch_disable(struct kgd_dev *kgd);
 85static int kgd_address_watch_execute(struct kgd_dev *kgd,
 86					unsigned int watch_point_id,
 87					uint32_t cntl_val,
 88					uint32_t addr_hi,
 89					uint32_t addr_lo);
 90static int kgd_wave_control_execute(struct kgd_dev *kgd,
 91					uint32_t gfx_index_val,
 92					uint32_t sq_cmd);
 93static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
 94					unsigned int watch_point_id,
 95					unsigned int reg_offset);
 96
 97static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
 98		uint8_t vmid);
 99static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
100		uint8_t vmid);
101static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
102static void set_scratch_backing_va(struct kgd_dev *kgd,
103					uint64_t va, uint32_t vmid);
104static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
105		uint32_t page_table_base);
106static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
107static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
108
109/* Because of REG_GET_FIELD() being used, we put this function in the
110 * asic specific file.
111 */
112static int get_tile_config(struct kgd_dev *kgd,
113		struct tile_config *config)
114{
115	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
116
117	config->gb_addr_config = adev->gfx.config.gb_addr_config;
118	config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
119				MC_ARB_RAMCFG, NOOFBANK);
120	config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
121				MC_ARB_RAMCFG, NOOFRANKS);
122
123	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
124	config->num_tile_configs =
125			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
126	config->macro_tile_config_ptr =
127			adev->gfx.config.macrotile_mode_array;
128	config->num_macro_tile_configs =
129			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
130
131	return 0;
132}
133
134static const struct kfd2kgd_calls kfd2kgd = {
135	.init_gtt_mem_allocation = alloc_gtt_mem,
136	.free_gtt_mem = free_gtt_mem,
137	.get_local_mem_info = get_local_mem_info,
138	.get_gpu_clock_counter = get_gpu_clock_counter,
139	.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
140	.alloc_pasid = amdgpu_pasid_alloc,
141	.free_pasid = amdgpu_pasid_free,
142	.program_sh_mem_settings = kgd_program_sh_mem_settings,
143	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
144	.init_pipeline = kgd_init_pipeline,
145	.init_interrupts = kgd_init_interrupts,
146	.hqd_load = kgd_hqd_load,
147	.hqd_sdma_load = kgd_hqd_sdma_load,
148	.hqd_dump = kgd_hqd_dump,
149	.hqd_sdma_dump = kgd_hqd_sdma_dump,
150	.hqd_is_occupied = kgd_hqd_is_occupied,
151	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
152	.hqd_destroy = kgd_hqd_destroy,
153	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
154	.address_watch_disable = kgd_address_watch_disable,
155	.address_watch_execute = kgd_address_watch_execute,
156	.wave_control_execute = kgd_wave_control_execute,
157	.address_watch_get_offset = kgd_address_watch_get_offset,
158	.get_atc_vmid_pasid_mapping_pasid =
159			get_atc_vmid_pasid_mapping_pasid,
160	.get_atc_vmid_pasid_mapping_valid =
161			get_atc_vmid_pasid_mapping_valid,
162	.get_fw_version = get_fw_version,
163	.set_scratch_backing_va = set_scratch_backing_va,
164	.get_tile_config = get_tile_config,
165	.get_cu_info = get_cu_info,
166	.get_vram_usage = amdgpu_amdkfd_get_vram_usage,
167	.create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
168	.acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
169	.destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
170	.get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
171	.set_vm_context_page_table_base = set_vm_context_page_table_base,
172	.alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
173	.free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
174	.map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
175	.unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
176	.sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
177	.map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
178	.restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
179	.invalidate_tlbs = invalidate_tlbs,
180	.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
181	.submit_ib = amdgpu_amdkfd_submit_ib,
182};
183
184struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
185{
186	return (struct kfd2kgd_calls *)&kfd2kgd;
187}
188
189static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
190{
191	return (struct amdgpu_device *)kgd;
192}
193
194static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
195			uint32_t queue, uint32_t vmid)
196{
197	struct amdgpu_device *adev = get_amdgpu_device(kgd);
198	uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
199
200	mutex_lock(&adev->srbm_mutex);
201	WREG32(mmSRBM_GFX_CNTL, value);
202}
203
204static void unlock_srbm(struct kgd_dev *kgd)
205{
206	struct amdgpu_device *adev = get_amdgpu_device(kgd);
207
208	WREG32(mmSRBM_GFX_CNTL, 0);
209	mutex_unlock(&adev->srbm_mutex);
210}
211
212static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
213				uint32_t queue_id)
214{
215	struct amdgpu_device *adev = get_amdgpu_device(kgd);
216
217	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
218	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
219
220	lock_srbm(kgd, mec, pipe, queue_id, 0);
221}
222
223static void release_queue(struct kgd_dev *kgd)
224{
225	unlock_srbm(kgd);
226}
227
228static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
229					uint32_t sh_mem_config,
230					uint32_t sh_mem_ape1_base,
231					uint32_t sh_mem_ape1_limit,
232					uint32_t sh_mem_bases)
233{
234	struct amdgpu_device *adev = get_amdgpu_device(kgd);
235
236	lock_srbm(kgd, 0, 0, 0, vmid);
237
238	WREG32(mmSH_MEM_CONFIG, sh_mem_config);
239	WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
240	WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
241	WREG32(mmSH_MEM_BASES, sh_mem_bases);
242
243	unlock_srbm(kgd);
244}
245
246static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
247					unsigned int vmid)
248{
249	struct amdgpu_device *adev = get_amdgpu_device(kgd);
250
251	/*
252	 * We have to assume that there is no outstanding mapping.
253	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
254	 * a mapping is in progress or because a mapping finished
255	 * and the SW cleared it.
256	 * So the protocol is to always wait & clear.
257	 */
258	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
259			ATC_VMID0_PASID_MAPPING__VALID_MASK;
260
261	WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
262
263	while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
264		cpu_relax();
265	WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
266
267	/* Mapping vmid to pasid also for IH block */
268	WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
269
270	return 0;
271}
272
273static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
274				uint32_t hpd_size, uint64_t hpd_gpu_addr)
275{
276	/* amdgpu owns the per-pipe state */
277	return 0;
278}
279
280static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
281{
282	struct amdgpu_device *adev = get_amdgpu_device(kgd);
283	uint32_t mec;
284	uint32_t pipe;
285
286	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
287	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
288
289	lock_srbm(kgd, mec, pipe, 0, 0);
290
291	WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK);
 
292
293	unlock_srbm(kgd);
294
295	return 0;
296}
297
298static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m)
299{
300	uint32_t retval;
301
302	retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
303		m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
304	pr_debug("kfd: sdma base address: 0x%x\n", retval);
 
 
305
306	return retval;
307}
308
309static inline struct vi_mqd *get_mqd(void *mqd)
310{
311	return (struct vi_mqd *)mqd;
312}
313
314static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
315{
316	return (struct vi_sdma_mqd *)mqd;
317}
318
319static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
320			uint32_t queue_id, uint32_t __user *wptr,
321			uint32_t wptr_shift, uint32_t wptr_mask,
322			struct mm_struct *mm)
323{
324	struct amdgpu_device *adev = get_amdgpu_device(kgd);
325	struct vi_mqd *m;
326	uint32_t *mqd_hqd;
327	uint32_t reg, wptr_val, data;
328	bool valid_wptr = false;
329
330	m = get_mqd(mqd);
331
332	acquire_queue(kgd, pipe_id, queue_id);
333
334	/* HIQ is set during driver init period with vmid set to 0*/
335	if (m->cp_hqd_vmid == 0) {
336		uint32_t value, mec, pipe;
337
338		mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
339		pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
340
341		pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
342			mec, pipe, queue_id);
343		value = RREG32(mmRLC_CP_SCHEDULERS);
344		value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
345			((mec << 5) | (pipe << 3) | queue_id | 0x80));
346		WREG32(mmRLC_CP_SCHEDULERS, value);
347	}
348
349	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
350	mqd_hqd = &m->cp_mqd_base_addr_lo;
351
352	for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++)
353		WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
354
355	/* Tonga errata: EOP RPTR/WPTR should be left unmodified.
356	 * This is safe since EOP RPTR==WPTR for any inactive HQD
357	 * on ASICs that do not support context-save.
358	 * EOP writes/reads can start anywhere in the ring.
359	 */
360	if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
361		WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
362		WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
363		WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
364	}
365
366	for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++)
367		WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
368
369	/* Copy userspace write pointer value to register.
370	 * Activate doorbell logic to monitor subsequent changes.
371	 */
372	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
373			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
374	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
375
376	/* read_user_ptr may take the mm->mmap_sem.
377	 * release srbm_mutex to avoid circular dependency between
378	 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
379	 */
380	release_queue(kgd);
381	valid_wptr = read_user_wptr(mm, wptr, wptr_val);
382	acquire_queue(kgd, pipe_id, queue_id);
383	if (valid_wptr)
384		WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
385
386	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
387	WREG32(mmCP_HQD_ACTIVE, data);
388
389	release_queue(kgd);
390
391	return 0;
392}
393
394static int kgd_hqd_dump(struct kgd_dev *kgd,
395			uint32_t pipe_id, uint32_t queue_id,
396			uint32_t (**dump)[2], uint32_t *n_regs)
397{
398	struct amdgpu_device *adev = get_amdgpu_device(kgd);
399	uint32_t i = 0, reg;
400#define HQD_N_REGS (54+4)
401#define DUMP_REG(addr) do {				\
402		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
403			break;				\
404		(*dump)[i][0] = (addr) << 2;		\
405		(*dump)[i++][1] = RREG32(addr);		\
406	} while (0)
407
408	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
409	if (*dump == NULL)
410		return -ENOMEM;
411
412	acquire_queue(kgd, pipe_id, queue_id);
413
414	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
415	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
416	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
417	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
418
419	for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
420		DUMP_REG(reg);
421
422	release_queue(kgd);
423
424	WARN_ON_ONCE(i != HQD_N_REGS);
425	*n_regs = i;
426
427	return 0;
428}
429
430static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
431			     uint32_t __user *wptr, struct mm_struct *mm)
432{
433	struct amdgpu_device *adev = get_amdgpu_device(kgd);
434	struct vi_sdma_mqd *m;
435	unsigned long end_jiffies;
436	uint32_t sdma_base_addr;
437	uint32_t data;
438
439	m = get_sdma_mqd(mqd);
440	sdma_base_addr = get_sdma_base_addr(m);
441	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
442		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
443
444	end_jiffies = msecs_to_jiffies(2000) + jiffies;
445	while (true) {
446		data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
447		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
448			break;
449		if (time_after(jiffies, end_jiffies))
 
450			return -ETIME;
 
451		usleep_range(500, 1000);
452	}
453	if (m->sdma_engine_id) {
454		data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
455		data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
456				RESUME_CTX, 0);
457		WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
458	} else {
459		data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
460		data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
461				RESUME_CTX, 0);
462		WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
463	}
464
465	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
466			     ENABLE, 1);
467	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
468	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
 
469
470	if (read_user_wptr(mm, wptr, data))
471		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
472	else
473		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
474		       m->sdmax_rlcx_rb_rptr);
475
476	WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
477				m->sdmax_rlcx_virtual_addr);
478	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
479	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
480			m->sdmax_rlcx_rb_base_hi);
481	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
482			m->sdmax_rlcx_rb_rptr_addr_lo);
483	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
484			m->sdmax_rlcx_rb_rptr_addr_hi);
485
486	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
487			     RB_ENABLE, 1);
488	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
489
490	return 0;
491}
492
493static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
494			     uint32_t engine_id, uint32_t queue_id,
495			     uint32_t (**dump)[2], uint32_t *n_regs)
496{
497	struct amdgpu_device *adev = get_amdgpu_device(kgd);
498	uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
499		queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
500	uint32_t i = 0, reg;
501#undef HQD_N_REGS
502#define HQD_N_REGS (19+4+2+3+7)
503
504	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
505	if (*dump == NULL)
506		return -ENOMEM;
507
508	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
509		DUMP_REG(sdma_offset + reg);
510	for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
511	     reg++)
512		DUMP_REG(sdma_offset + reg);
513	for (reg = mmSDMA0_RLC0_CSA_ADDR_LO; reg <= mmSDMA0_RLC0_CSA_ADDR_HI;
514	     reg++)
515		DUMP_REG(sdma_offset + reg);
516	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_DUMMY_REG;
517	     reg++)
518		DUMP_REG(sdma_offset + reg);
519	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL;
520	     reg++)
521		DUMP_REG(sdma_offset + reg);
522
523	WARN_ON_ONCE(i != HQD_N_REGS);
524	*n_regs = i;
525
526	return 0;
527}
528
529static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
530				uint32_t pipe_id, uint32_t queue_id)
531{
532	struct amdgpu_device *adev = get_amdgpu_device(kgd);
533	uint32_t act;
534	bool retval = false;
535	uint32_t low, high;
536
537	acquire_queue(kgd, pipe_id, queue_id);
538	act = RREG32(mmCP_HQD_ACTIVE);
539	if (act) {
540		low = lower_32_bits(queue_address >> 8);
541		high = upper_32_bits(queue_address >> 8);
542
543		if (low == RREG32(mmCP_HQD_PQ_BASE) &&
544				high == RREG32(mmCP_HQD_PQ_BASE_HI))
545			retval = true;
546	}
547	release_queue(kgd);
548	return retval;
549}
550
551static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
552{
553	struct amdgpu_device *adev = get_amdgpu_device(kgd);
554	struct vi_sdma_mqd *m;
555	uint32_t sdma_base_addr;
556	uint32_t sdma_rlc_rb_cntl;
557
558	m = get_sdma_mqd(mqd);
559	sdma_base_addr = get_sdma_base_addr(m);
560
561	sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
562
563	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
564		return true;
565
566	return false;
567}
568
569static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
570				enum kfd_preempt_type reset_type,
571				unsigned int utimeout, uint32_t pipe_id,
572				uint32_t queue_id)
573{
574	struct amdgpu_device *adev = get_amdgpu_device(kgd);
575	uint32_t temp;
576	enum hqd_dequeue_request_type type;
577	unsigned long flags, end_jiffies;
578	int retry;
579	struct vi_mqd *m = get_mqd(mqd);
580
 
 
 
581	acquire_queue(kgd, pipe_id, queue_id);
582
583	if (m->cp_hqd_vmid == 0)
584		WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
585
586	switch (reset_type) {
587	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
588		type = DRAIN_PIPE;
589		break;
590	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
591		type = RESET_WAVES;
592		break;
593	default:
594		type = DRAIN_PIPE;
595		break;
596	}
597
598	/* Workaround: If IQ timer is active and the wait time is close to or
599	 * equal to 0, dequeueing is not safe. Wait until either the wait time
600	 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
601	 * cleared before continuing. Also, ensure wait times are set to at
602	 * least 0x3.
603	 */
604	local_irq_save(flags);
605	preempt_disable();
606	retry = 5000; /* wait for 500 usecs at maximum */
607	while (true) {
608		temp = RREG32(mmCP_HQD_IQ_TIMER);
609		if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
610			pr_debug("HW is processing IQ\n");
611			goto loop;
612		}
613		if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
614			if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
615					== 3) /* SEM-rearm is safe */
616				break;
617			/* Wait time 3 is safe for CP, but our MMIO read/write
618			 * time is close to 1 microsecond, so check for 10 to
619			 * leave more buffer room
620			 */
621			if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
622					>= 10)
623				break;
624			pr_debug("IQ timer is active\n");
625		} else
626			break;
627loop:
628		if (!retry) {
629			pr_err("CP HQD IQ timer status time out\n");
630			break;
631		}
632		ndelay(100);
633		--retry;
634	}
635	retry = 1000;
636	while (true) {
637		temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
638		if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
639			break;
640		pr_debug("Dequeue request is pending\n");
641
642		if (!retry) {
643			pr_err("CP HQD dequeue request time out\n");
644			break;
645		}
646		ndelay(100);
647		--retry;
648	}
649	local_irq_restore(flags);
650	preempt_enable();
651
652	WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
653
654	end_jiffies = (utimeout * HZ / 1000) + jiffies;
655	while (true) {
656		temp = RREG32(mmCP_HQD_ACTIVE);
657		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
658			break;
659		if (time_after(jiffies, end_jiffies)) {
660			pr_err("cp queue preemption time out.\n");
661			release_queue(kgd);
662			return -ETIME;
663		}
664		usleep_range(500, 1000);
665	}
666
667	release_queue(kgd);
668	return 0;
669}
670
671static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
672				unsigned int utimeout)
673{
674	struct amdgpu_device *adev = get_amdgpu_device(kgd);
675	struct vi_sdma_mqd *m;
676	uint32_t sdma_base_addr;
677	uint32_t temp;
678	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
679
680	m = get_sdma_mqd(mqd);
681	sdma_base_addr = get_sdma_base_addr(m);
682
683	temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
684	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
685	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
686
687	while (true) {
688		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
689		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
690			break;
691		if (time_after(jiffies, end_jiffies))
 
692			return -ETIME;
 
693		usleep_range(500, 1000);
694	}
695
696	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
697	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
698		RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
699		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
700
701	m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
702
703	return 0;
704}
705
706static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
707							uint8_t vmid)
708{
709	uint32_t reg;
710	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
711
712	reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
713	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
714}
715
716static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
717								uint8_t vmid)
718{
719	uint32_t reg;
720	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
721
722	reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
723	return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
724}
725
726static int kgd_address_watch_disable(struct kgd_dev *kgd)
727{
728	return 0;
729}
730
731static int kgd_address_watch_execute(struct kgd_dev *kgd,
732					unsigned int watch_point_id,
733					uint32_t cntl_val,
734					uint32_t addr_hi,
735					uint32_t addr_lo)
736{
737	return 0;
738}
739
740static int kgd_wave_control_execute(struct kgd_dev *kgd,
741					uint32_t gfx_index_val,
742					uint32_t sq_cmd)
743{
744	struct amdgpu_device *adev = get_amdgpu_device(kgd);
745	uint32_t data = 0;
746
747	mutex_lock(&adev->grbm_idx_mutex);
748
749	WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
750	WREG32(mmSQ_CMD, sq_cmd);
751
752	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
753		INSTANCE_BROADCAST_WRITES, 1);
754	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
755		SH_BROADCAST_WRITES, 1);
756	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
757		SE_BROADCAST_WRITES, 1);
758
759	WREG32(mmGRBM_GFX_INDEX, data);
760	mutex_unlock(&adev->grbm_idx_mutex);
761
762	return 0;
763}
764
765static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
766					unsigned int watch_point_id,
767					unsigned int reg_offset)
768{
769	return 0;
770}
771
772static void set_scratch_backing_va(struct kgd_dev *kgd,
773					uint64_t va, uint32_t vmid)
774{
775	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
776
777	lock_srbm(kgd, 0, 0, 0, vmid);
778	WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
779	unlock_srbm(kgd);
780}
781
782static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
783{
784	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
785	const union amdgpu_firmware_header *hdr;
786
787	switch (type) {
788	case KGD_ENGINE_PFP:
789		hdr = (const union amdgpu_firmware_header *)
790						adev->gfx.pfp_fw->data;
791		break;
792
793	case KGD_ENGINE_ME:
794		hdr = (const union amdgpu_firmware_header *)
795						adev->gfx.me_fw->data;
796		break;
797
798	case KGD_ENGINE_CE:
799		hdr = (const union amdgpu_firmware_header *)
800						adev->gfx.ce_fw->data;
801		break;
802
803	case KGD_ENGINE_MEC1:
804		hdr = (const union amdgpu_firmware_header *)
805						adev->gfx.mec_fw->data;
806		break;
807
808	case KGD_ENGINE_MEC2:
809		hdr = (const union amdgpu_firmware_header *)
810						adev->gfx.mec2_fw->data;
811		break;
812
813	case KGD_ENGINE_RLC:
814		hdr = (const union amdgpu_firmware_header *)
815						adev->gfx.rlc_fw->data;
816		break;
817
818	case KGD_ENGINE_SDMA1:
819		hdr = (const union amdgpu_firmware_header *)
820						adev->sdma.instance[0].fw->data;
821		break;
822
823	case KGD_ENGINE_SDMA2:
824		hdr = (const union amdgpu_firmware_header *)
825						adev->sdma.instance[1].fw->data;
826		break;
827
828	default:
829		return 0;
830	}
831
832	if (hdr == NULL)
833		return 0;
834
835	/* Only 12 bit in use*/
836	return hdr->common.ucode_version;
837}
838
839static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
840		uint32_t page_table_base)
841{
842	struct amdgpu_device *adev = get_amdgpu_device(kgd);
843
844	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
845		pr_err("trying to set page table base for wrong VMID\n");
846		return;
847	}
848	WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base);
849}
850
851static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
852{
853	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
854	int vmid;
855	unsigned int tmp;
856
857	for (vmid = 0; vmid < 16; vmid++) {
858		if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
859			continue;
860
861		tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
862		if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
863			(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
864			WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
865			RREG32(mmVM_INVALIDATE_RESPONSE);
866			break;
867		}
868	}
869
870	return 0;
871}
872
873static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
874{
875	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
876
877	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
878		pr_err("non kfd vmid %d\n", vmid);
879		return -EINVAL;
880	}
881
882	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
883	RREG32(mmVM_INVALIDATE_RESPONSE);
884	return 0;
885}
 
 
 
 
 
 
 
 
v5.14.15
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22
 
 
 
 
 
 23#include "amdgpu.h"
 24#include "amdgpu_amdkfd.h"
 
 25#include "gfx_v8_0.h"
 26#include "gca/gfx_8_0_sh_mask.h"
 27#include "gca/gfx_8_0_d.h"
 28#include "gca/gfx_8_0_enum.h"
 29#include "oss/oss_3_0_sh_mask.h"
 30#include "oss/oss_3_0_d.h"
 31#include "gmc/gmc_8_1_sh_mask.h"
 32#include "gmc/gmc_8_1_d.h"
 33#include "vi_structs.h"
 34#include "vid.h"
 35
 36enum hqd_dequeue_request_type {
 37	NO_ACTION = 0,
 38	DRAIN_PIPE,
 39	RESET_WAVES
 40};
 41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
 43{
 44	return (struct amdgpu_device *)kgd;
 45}
 46
 47static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
 48			uint32_t queue, uint32_t vmid)
 49{
 50	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 51	uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
 52
 53	mutex_lock(&adev->srbm_mutex);
 54	WREG32(mmSRBM_GFX_CNTL, value);
 55}
 56
 57static void unlock_srbm(struct kgd_dev *kgd)
 58{
 59	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 60
 61	WREG32(mmSRBM_GFX_CNTL, 0);
 62	mutex_unlock(&adev->srbm_mutex);
 63}
 64
 65static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
 66				uint32_t queue_id)
 67{
 68	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 69
 70	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
 71	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
 72
 73	lock_srbm(kgd, mec, pipe, queue_id, 0);
 74}
 75
 76static void release_queue(struct kgd_dev *kgd)
 77{
 78	unlock_srbm(kgd);
 79}
 80
 81static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
 82					uint32_t sh_mem_config,
 83					uint32_t sh_mem_ape1_base,
 84					uint32_t sh_mem_ape1_limit,
 85					uint32_t sh_mem_bases)
 86{
 87	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 88
 89	lock_srbm(kgd, 0, 0, 0, vmid);
 90
 91	WREG32(mmSH_MEM_CONFIG, sh_mem_config);
 92	WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
 93	WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
 94	WREG32(mmSH_MEM_BASES, sh_mem_bases);
 95
 96	unlock_srbm(kgd);
 97}
 98
 99static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
100					unsigned int vmid)
101{
102	struct amdgpu_device *adev = get_amdgpu_device(kgd);
103
104	/*
105	 * We have to assume that there is no outstanding mapping.
106	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
107	 * a mapping is in progress or because a mapping finished
108	 * and the SW cleared it.
109	 * So the protocol is to always wait & clear.
110	 */
111	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
112			ATC_VMID0_PASID_MAPPING__VALID_MASK;
113
114	WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
115
116	while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
117		cpu_relax();
118	WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
119
120	/* Mapping vmid to pasid also for IH block */
121	WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
122
123	return 0;
124}
125
 
 
 
 
 
 
 
126static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
127{
128	struct amdgpu_device *adev = get_amdgpu_device(kgd);
129	uint32_t mec;
130	uint32_t pipe;
131
132	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
133	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
134
135	lock_srbm(kgd, mec, pipe, 0, 0);
136
137	WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
138			CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
139
140	unlock_srbm(kgd);
141
142	return 0;
143}
144
145static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m)
146{
147	uint32_t retval;
148
149	retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
150		m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
151
152	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
153			m->sdma_engine_id, m->sdma_queue_id, retval);
154
155	return retval;
156}
157
158static inline struct vi_mqd *get_mqd(void *mqd)
159{
160	return (struct vi_mqd *)mqd;
161}
162
163static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
164{
165	return (struct vi_sdma_mqd *)mqd;
166}
167
168static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
169			uint32_t queue_id, uint32_t __user *wptr,
170			uint32_t wptr_shift, uint32_t wptr_mask,
171			struct mm_struct *mm)
172{
173	struct amdgpu_device *adev = get_amdgpu_device(kgd);
174	struct vi_mqd *m;
175	uint32_t *mqd_hqd;
176	uint32_t reg, wptr_val, data;
177	bool valid_wptr = false;
178
179	m = get_mqd(mqd);
180
181	acquire_queue(kgd, pipe_id, queue_id);
182
183	/* HIQ is set during driver init period with vmid set to 0*/
184	if (m->cp_hqd_vmid == 0) {
185		uint32_t value, mec, pipe;
186
187		mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
188		pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
189
190		pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
191			mec, pipe, queue_id);
192		value = RREG32(mmRLC_CP_SCHEDULERS);
193		value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
194			((mec << 5) | (pipe << 3) | queue_id | 0x80));
195		WREG32(mmRLC_CP_SCHEDULERS, value);
196	}
197
198	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
199	mqd_hqd = &m->cp_mqd_base_addr_lo;
200
201	for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++)
202		WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
203
204	/* Tonga errata: EOP RPTR/WPTR should be left unmodified.
205	 * This is safe since EOP RPTR==WPTR for any inactive HQD
206	 * on ASICs that do not support context-save.
207	 * EOP writes/reads can start anywhere in the ring.
208	 */
209	if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
210		WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
211		WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
212		WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
213	}
214
215	for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++)
216		WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
217
218	/* Copy userspace write pointer value to register.
219	 * Activate doorbell logic to monitor subsequent changes.
220	 */
221	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
222			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
223	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
224
225	/* read_user_ptr may take the mm->mmap_lock.
226	 * release srbm_mutex to avoid circular dependency between
227	 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
228	 */
229	release_queue(kgd);
230	valid_wptr = read_user_wptr(mm, wptr, wptr_val);
231	acquire_queue(kgd, pipe_id, queue_id);
232	if (valid_wptr)
233		WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
234
235	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
236	WREG32(mmCP_HQD_ACTIVE, data);
237
238	release_queue(kgd);
239
240	return 0;
241}
242
243static int kgd_hqd_dump(struct kgd_dev *kgd,
244			uint32_t pipe_id, uint32_t queue_id,
245			uint32_t (**dump)[2], uint32_t *n_regs)
246{
247	struct amdgpu_device *adev = get_amdgpu_device(kgd);
248	uint32_t i = 0, reg;
249#define HQD_N_REGS (54+4)
250#define DUMP_REG(addr) do {				\
251		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
252			break;				\
253		(*dump)[i][0] = (addr) << 2;		\
254		(*dump)[i++][1] = RREG32(addr);		\
255	} while (0)
256
257	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
258	if (*dump == NULL)
259		return -ENOMEM;
260
261	acquire_queue(kgd, pipe_id, queue_id);
262
263	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
264	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
265	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
266	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
267
268	for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
269		DUMP_REG(reg);
270
271	release_queue(kgd);
272
273	WARN_ON_ONCE(i != HQD_N_REGS);
274	*n_regs = i;
275
276	return 0;
277}
278
279static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
280			     uint32_t __user *wptr, struct mm_struct *mm)
281{
282	struct amdgpu_device *adev = get_amdgpu_device(kgd);
283	struct vi_sdma_mqd *m;
284	unsigned long end_jiffies;
285	uint32_t sdma_rlc_reg_offset;
286	uint32_t data;
287
288	m = get_sdma_mqd(mqd);
289	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
290	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
291		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
292
293	end_jiffies = msecs_to_jiffies(2000) + jiffies;
294	while (true) {
295		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
296		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
297			break;
298		if (time_after(jiffies, end_jiffies)) {
299			pr_err("SDMA RLC not idle in %s\n", __func__);
300			return -ETIME;
301		}
302		usleep_range(500, 1000);
303	}
 
 
 
 
 
 
 
 
 
 
 
304
305	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
306			     ENABLE, 1);
307	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
308	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
309				m->sdmax_rlcx_rb_rptr);
310
311	if (read_user_wptr(mm, wptr, data))
312		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
313	else
314		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
315		       m->sdmax_rlcx_rb_rptr);
316
317	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
318				m->sdmax_rlcx_virtual_addr);
319	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
320	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
321			m->sdmax_rlcx_rb_base_hi);
322	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
323			m->sdmax_rlcx_rb_rptr_addr_lo);
324	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
325			m->sdmax_rlcx_rb_rptr_addr_hi);
326
327	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
328			     RB_ENABLE, 1);
329	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
330
331	return 0;
332}
333
334static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
335			     uint32_t engine_id, uint32_t queue_id,
336			     uint32_t (**dump)[2], uint32_t *n_regs)
337{
338	struct amdgpu_device *adev = get_amdgpu_device(kgd);
339	uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
340		queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
341	uint32_t i = 0, reg;
342#undef HQD_N_REGS
343#define HQD_N_REGS (19+4+2+3+7)
344
345	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
346	if (*dump == NULL)
347		return -ENOMEM;
348
349	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
350		DUMP_REG(sdma_offset + reg);
351	for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
352	     reg++)
353		DUMP_REG(sdma_offset + reg);
354	for (reg = mmSDMA0_RLC0_CSA_ADDR_LO; reg <= mmSDMA0_RLC0_CSA_ADDR_HI;
355	     reg++)
356		DUMP_REG(sdma_offset + reg);
357	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_DUMMY_REG;
358	     reg++)
359		DUMP_REG(sdma_offset + reg);
360	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL;
361	     reg++)
362		DUMP_REG(sdma_offset + reg);
363
364	WARN_ON_ONCE(i != HQD_N_REGS);
365	*n_regs = i;
366
367	return 0;
368}
369
370static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
371				uint32_t pipe_id, uint32_t queue_id)
372{
373	struct amdgpu_device *adev = get_amdgpu_device(kgd);
374	uint32_t act;
375	bool retval = false;
376	uint32_t low, high;
377
378	acquire_queue(kgd, pipe_id, queue_id);
379	act = RREG32(mmCP_HQD_ACTIVE);
380	if (act) {
381		low = lower_32_bits(queue_address >> 8);
382		high = upper_32_bits(queue_address >> 8);
383
384		if (low == RREG32(mmCP_HQD_PQ_BASE) &&
385				high == RREG32(mmCP_HQD_PQ_BASE_HI))
386			retval = true;
387	}
388	release_queue(kgd);
389	return retval;
390}
391
392static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
393{
394	struct amdgpu_device *adev = get_amdgpu_device(kgd);
395	struct vi_sdma_mqd *m;
396	uint32_t sdma_rlc_reg_offset;
397	uint32_t sdma_rlc_rb_cntl;
398
399	m = get_sdma_mqd(mqd);
400	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
401
402	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
403
404	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
405		return true;
406
407	return false;
408}
409
410static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
411				enum kfd_preempt_type reset_type,
412				unsigned int utimeout, uint32_t pipe_id,
413				uint32_t queue_id)
414{
415	struct amdgpu_device *adev = get_amdgpu_device(kgd);
416	uint32_t temp;
417	enum hqd_dequeue_request_type type;
418	unsigned long flags, end_jiffies;
419	int retry;
420	struct vi_mqd *m = get_mqd(mqd);
421
422	if (amdgpu_in_reset(adev))
423		return -EIO;
424
425	acquire_queue(kgd, pipe_id, queue_id);
426
427	if (m->cp_hqd_vmid == 0)
428		WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
429
430	switch (reset_type) {
431	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
432		type = DRAIN_PIPE;
433		break;
434	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
435		type = RESET_WAVES;
436		break;
437	default:
438		type = DRAIN_PIPE;
439		break;
440	}
441
442	/* Workaround: If IQ timer is active and the wait time is close to or
443	 * equal to 0, dequeueing is not safe. Wait until either the wait time
444	 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
445	 * cleared before continuing. Also, ensure wait times are set to at
446	 * least 0x3.
447	 */
448	local_irq_save(flags);
449	preempt_disable();
450	retry = 5000; /* wait for 500 usecs at maximum */
451	while (true) {
452		temp = RREG32(mmCP_HQD_IQ_TIMER);
453		if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
454			pr_debug("HW is processing IQ\n");
455			goto loop;
456		}
457		if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
458			if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
459					== 3) /* SEM-rearm is safe */
460				break;
461			/* Wait time 3 is safe for CP, but our MMIO read/write
462			 * time is close to 1 microsecond, so check for 10 to
463			 * leave more buffer room
464			 */
465			if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
466					>= 10)
467				break;
468			pr_debug("IQ timer is active\n");
469		} else
470			break;
471loop:
472		if (!retry) {
473			pr_err("CP HQD IQ timer status time out\n");
474			break;
475		}
476		ndelay(100);
477		--retry;
478	}
479	retry = 1000;
480	while (true) {
481		temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
482		if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
483			break;
484		pr_debug("Dequeue request is pending\n");
485
486		if (!retry) {
487			pr_err("CP HQD dequeue request time out\n");
488			break;
489		}
490		ndelay(100);
491		--retry;
492	}
493	local_irq_restore(flags);
494	preempt_enable();
495
496	WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
497
498	end_jiffies = (utimeout * HZ / 1000) + jiffies;
499	while (true) {
500		temp = RREG32(mmCP_HQD_ACTIVE);
501		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
502			break;
503		if (time_after(jiffies, end_jiffies)) {
504			pr_err("cp queue preemption time out.\n");
505			release_queue(kgd);
506			return -ETIME;
507		}
508		usleep_range(500, 1000);
509	}
510
511	release_queue(kgd);
512	return 0;
513}
514
515static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
516				unsigned int utimeout)
517{
518	struct amdgpu_device *adev = get_amdgpu_device(kgd);
519	struct vi_sdma_mqd *m;
520	uint32_t sdma_rlc_reg_offset;
521	uint32_t temp;
522	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
523
524	m = get_sdma_mqd(mqd);
525	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
526
527	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
528	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
529	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
530
531	while (true) {
532		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
533		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
534			break;
535		if (time_after(jiffies, end_jiffies)) {
536			pr_err("SDMA RLC not idle in %s\n", __func__);
537			return -ETIME;
538		}
539		usleep_range(500, 1000);
540	}
541
542	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
543	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
544		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
545		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
546
547	m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
548
549	return 0;
550}
551
552static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
553					uint8_t vmid, uint16_t *p_pasid)
554{
555	uint32_t value;
556	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
557
558	value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
559	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
 
560
561	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
 
 
 
 
 
 
 
562}
563
564static int kgd_address_watch_disable(struct kgd_dev *kgd)
565{
566	return 0;
567}
568
569static int kgd_address_watch_execute(struct kgd_dev *kgd,
570					unsigned int watch_point_id,
571					uint32_t cntl_val,
572					uint32_t addr_hi,
573					uint32_t addr_lo)
574{
575	return 0;
576}
577
578static int kgd_wave_control_execute(struct kgd_dev *kgd,
579					uint32_t gfx_index_val,
580					uint32_t sq_cmd)
581{
582	struct amdgpu_device *adev = get_amdgpu_device(kgd);
583	uint32_t data = 0;
584
585	mutex_lock(&adev->grbm_idx_mutex);
586
587	WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
588	WREG32(mmSQ_CMD, sq_cmd);
589
590	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
591		INSTANCE_BROADCAST_WRITES, 1);
592	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
593		SH_BROADCAST_WRITES, 1);
594	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
595		SE_BROADCAST_WRITES, 1);
596
597	WREG32(mmGRBM_GFX_INDEX, data);
598	mutex_unlock(&adev->grbm_idx_mutex);
599
600	return 0;
601}
602
603static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
604					unsigned int watch_point_id,
605					unsigned int reg_offset)
606{
607	return 0;
608}
609
610static void set_scratch_backing_va(struct kgd_dev *kgd,
611					uint64_t va, uint32_t vmid)
612{
613	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
614
615	lock_srbm(kgd, 0, 0, 0, vmid);
616	WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
617	unlock_srbm(kgd);
618}
619
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
620static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
621		uint64_t page_table_base)
622{
623	struct amdgpu_device *adev = get_amdgpu_device(kgd);
624
625	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
626		pr_err("trying to set page table base for wrong VMID\n");
627		return;
628	}
629	WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
630			lower_32_bits(page_table_base));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
631}
632
633const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
634	.program_sh_mem_settings = kgd_program_sh_mem_settings,
635	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
636	.init_interrupts = kgd_init_interrupts,
637	.hqd_load = kgd_hqd_load,
638	.hqd_sdma_load = kgd_hqd_sdma_load,
639	.hqd_dump = kgd_hqd_dump,
640	.hqd_sdma_dump = kgd_hqd_sdma_dump,
641	.hqd_is_occupied = kgd_hqd_is_occupied,
642	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
643	.hqd_destroy = kgd_hqd_destroy,
644	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
645	.address_watch_disable = kgd_address_watch_disable,
646	.address_watch_execute = kgd_address_watch_execute,
647	.wave_control_execute = kgd_wave_control_execute,
648	.address_watch_get_offset = kgd_address_watch_get_offset,
649	.get_atc_vmid_pasid_mapping_info =
650			get_atc_vmid_pasid_mapping_info,
651	.set_scratch_backing_va = set_scratch_backing_va,
652	.set_vm_context_page_table_base = set_vm_context_page_table_base,
653};