Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22#include "amdgpu.h"
 23#include "amdgpu_amdkfd.h"
 24#include "gc/gc_10_1_0_offset.h"
 25#include "gc/gc_10_1_0_sh_mask.h"
 
 26#include "athub/athub_2_0_0_offset.h"
 27#include "athub/athub_2_0_0_sh_mask.h"
 28#include "oss/osssys_5_0_0_offset.h"
 29#include "oss/osssys_5_0_0_sh_mask.h"
 30#include "soc15_common.h"
 31#include "v10_structs.h"
 32#include "nv.h"
 33#include "nvd.h"
 
 34
 35enum hqd_dequeue_request_type {
 36	NO_ACTION = 0,
 37	DRAIN_PIPE,
 38	RESET_WAVES,
 39	SAVE_WAVES
 40};
 41
 42static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
 
 
 
 
 
 43			uint32_t queue, uint32_t vmid)
 44{
 
 
 45	mutex_lock(&adev->srbm_mutex);
 46	nv_grbm_select(adev, mec, pipe, queue, vmid);
 47}
 48
 49static void unlock_srbm(struct amdgpu_device *adev)
 50{
 
 
 51	nv_grbm_select(adev, 0, 0, 0, 0);
 52	mutex_unlock(&adev->srbm_mutex);
 53}
 54
 55static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
 56				uint32_t queue_id)
 57{
 
 
 58	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
 59	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
 60
 61	lock_srbm(adev, mec, pipe, queue_id, 0);
 62}
 63
 64static uint64_t get_queue_mask(struct amdgpu_device *adev,
 65			       uint32_t pipe_id, uint32_t queue_id)
 66{
 67	unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
 68			queue_id;
 69
 70	return 1ull << bit;
 71}
 72
 73static void release_queue(struct amdgpu_device *adev)
 74{
 75	unlock_srbm(adev);
 76}
 77
 78static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
 79					uint32_t sh_mem_config,
 80					uint32_t sh_mem_ape1_base,
 81					uint32_t sh_mem_ape1_limit,
 82					uint32_t sh_mem_bases)
 83{
 84	lock_srbm(adev, 0, 0, 0, vmid);
 
 
 85
 86	WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
 87	WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
 88	/* APE1 no longer exists on GFX9 */
 89
 90	unlock_srbm(adev);
 91}
 92
 93static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
 94					unsigned int vmid)
 95{
 
 
 96	/*
 97	 * We have to assume that there is no outstanding mapping.
 98	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
 99	 * a mapping is in progress or because a mapping finished
100	 * and the SW cleared it.
101	 * So the protocol is to always wait & clear.
102	 */
103	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
104			ATC_VMID0_PASID_MAPPING__VALID_MASK;
105
106	pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
107
108	pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
109	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
110	       pasid_mapping);
111
112#if 0
113	/* TODO: uncomment this code when the hardware support is ready. */
114	while (!(RREG32(SOC15_REG_OFFSET(
115				ATHUB, 0,
116				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
117		 (1U << vmid)))
118		cpu_relax();
119
120	pr_debug("ATHUB mapping update finished\n");
121	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
122				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
123	       1U << vmid);
124#endif
125
126	/* Mapping vmid to pasid also for IH block */
127	pr_debug("update mapping for IH block and mmhub");
128	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
129	       pasid_mapping);
130
131	return 0;
132}
133
134/* TODO - RING0 form of field is obsolete, seems to date back to SI
135 * but still works
136 */
137
138static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
139{
 
140	uint32_t mec;
141	uint32_t pipe;
142
143	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
144	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
145
146	lock_srbm(adev, mec, pipe, 0, 0);
147
148	WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
149		CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
150		CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
151
152	unlock_srbm(adev);
153
154	return 0;
155}
156
157static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
158				unsigned int engine_id,
159				unsigned int queue_id)
160{
161	uint32_t sdma_engine_reg_base[2] = {
162		SOC15_REG_OFFSET(SDMA0, 0,
163				 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
164		/* On gfx10, mmSDMA1_xxx registers are defined NOT based
165		 * on SDMA1 base address (dw 0x1860) but based on SDMA0
166		 * base address (dw 0x1260). Therefore use mmSDMA0_RLC0_RB_CNTL
167		 * instead of mmSDMA1_RLC0_RB_CNTL for the base address calc
168		 * below
169		 */
170		SOC15_REG_OFFSET(SDMA1, 0,
171				 mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
172	};
173
174	uint32_t retval = sdma_engine_reg_base[engine_id]
175		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
176
177	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
178			queue_id, retval);
179
180	return retval;
181}
182
183#if 0
184static uint32_t get_watch_base_addr(struct amdgpu_device *adev)
185{
186	uint32_t retval = SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) -
187			mmTCP_WATCH0_ADDR_H;
188
189	pr_debug("kfd: reg watch base address: 0x%x\n", retval);
190
191	return retval;
192}
193#endif
194
195static inline struct v10_compute_mqd *get_mqd(void *mqd)
196{
197	return (struct v10_compute_mqd *)mqd;
198}
199
200static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
201{
202	return (struct v10_sdma_mqd *)mqd;
203}
204
205static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
206			uint32_t pipe_id, uint32_t queue_id,
207			uint32_t __user *wptr, uint32_t wptr_shift,
208			uint32_t wptr_mask, struct mm_struct *mm)
209{
 
210	struct v10_compute_mqd *m;
211	uint32_t *mqd_hqd;
212	uint32_t reg, hqd_base, data;
213
214	m = get_mqd(mqd);
215
216	pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
217	acquire_queue(adev, pipe_id, queue_id);
218
219	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
220	mqd_hqd = &m->cp_mqd_base_addr_lo;
221	hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
222
223	for (reg = hqd_base;
224	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
225		WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]);
226
227
228	/* Activate doorbell logic before triggering WPTR poll. */
229	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
230			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
231	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data);
232
233	if (wptr) {
234		/* Don't read wptr with get_user because the user
235		 * context may not be accessible (if this function
236		 * runs in a work queue). Instead trigger a one-shot
237		 * polling read from memory in the CP. This assumes
238		 * that wptr is GPU-accessible in the queue's VMID via
239		 * ATC or SVM. WPTR==RPTR before starting the poll so
240		 * the CP starts fetching new commands from the right
241		 * place.
242		 *
243		 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
244		 * tricky. Assume that the queue didn't overflow. The
245		 * number of valid bits in the 32-bit RPTR depends on
246		 * the queue size. The remaining bits are taken from
247		 * the saved 64-bit WPTR. If the WPTR wrapped, add the
248		 * queue size.
249		 */
250		uint32_t queue_size =
251			2 << REG_GET_FIELD(m->cp_hqd_pq_control,
252					   CP_HQD_PQ_CONTROL, QUEUE_SIZE);
253		uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
254
255		if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
256			guessed_wptr += queue_size;
257		guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
258		guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
259
260		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
261		       lower_32_bits(guessed_wptr));
262		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
263		       upper_32_bits(guessed_wptr));
264		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
265		       lower_32_bits((uint64_t)wptr));
266		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
267		       upper_32_bits((uint64_t)wptr));
268		pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
269			 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
270		WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
271		       (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
272	}
273
274	/* Start the EOP fetcher */
275	WREG32_SOC15(GC, 0, mmCP_HQD_EOP_RPTR,
276	       REG_SET_FIELD(m->cp_hqd_eop_rptr,
277			     CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
278
279	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
280	WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
281
282	release_queue(adev);
283
284	return 0;
285}
286
287static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
288			    uint32_t pipe_id, uint32_t queue_id,
289			    uint32_t doorbell_off)
290{
 
291	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
292	struct v10_compute_mqd *m;
293	uint32_t mec, pipe;
294	int r;
295
296	m = get_mqd(mqd);
297
298	acquire_queue(adev, pipe_id, queue_id);
299
300	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
301	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
302
303	pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
304		 mec, pipe, queue_id);
305
306	spin_lock(&adev->gfx.kiq.ring_lock);
307	r = amdgpu_ring_alloc(kiq_ring, 7);
308	if (r) {
309		pr_err("Failed to alloc KIQ (%d).\n", r);
310		goto out_unlock;
311	}
312
313	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
314	amdgpu_ring_write(kiq_ring,
315			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
316			  PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
317			  PACKET3_MAP_QUEUES_QUEUE(queue_id) |
318			  PACKET3_MAP_QUEUES_PIPE(pipe) |
319			  PACKET3_MAP_QUEUES_ME((mec - 1)) |
320			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
321			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
322			  PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
323			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
324	amdgpu_ring_write(kiq_ring,
325			  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
326	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
327	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
328	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
329	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
330	amdgpu_ring_commit(kiq_ring);
331
332out_unlock:
333	spin_unlock(&adev->gfx.kiq.ring_lock);
334	release_queue(adev);
335
336	return r;
337}
338
339static int kgd_hqd_dump(struct amdgpu_device *adev,
340			uint32_t pipe_id, uint32_t queue_id,
341			uint32_t (**dump)[2], uint32_t *n_regs)
342{
 
343	uint32_t i = 0, reg;
344#define HQD_N_REGS 56
345#define DUMP_REG(addr) do {				\
346		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
347			break;				\
348		(*dump)[i][0] = (addr) << 2;		\
349		(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr);		\
350	} while (0)
351
352	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
353	if (*dump == NULL)
354		return -ENOMEM;
355
356	acquire_queue(adev, pipe_id, queue_id);
357
358	for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
359	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
360		DUMP_REG(reg);
361
362	release_queue(adev);
363
364	WARN_ON_ONCE(i != HQD_N_REGS);
365	*n_regs = i;
366
367	return 0;
368}
369
370static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
371			     uint32_t __user *wptr, struct mm_struct *mm)
372{
 
373	struct v10_sdma_mqd *m;
374	uint32_t sdma_rlc_reg_offset;
375	unsigned long end_jiffies;
376	uint32_t data;
377	uint64_t data64;
378	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
379
380	m = get_sdma_mqd(mqd);
381	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
382					    m->sdma_queue_id);
383
384	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
385		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
386
387	end_jiffies = msecs_to_jiffies(2000) + jiffies;
388	while (true) {
389		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
390		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
391			break;
392		if (time_after(jiffies, end_jiffies)) {
393			pr_err("SDMA RLC not idle in %s\n", __func__);
394			return -ETIME;
395		}
396		usleep_range(500, 1000);
397	}
398
399	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
400	       m->sdmax_rlcx_doorbell_offset);
401
402	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
403			     ENABLE, 1);
404	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
405	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
406				m->sdmax_rlcx_rb_rptr);
407	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
408				m->sdmax_rlcx_rb_rptr_hi);
409
410	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
411	if (read_user_wptr(mm, wptr64, data64)) {
412		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
413		       lower_32_bits(data64));
414		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
415		       upper_32_bits(data64));
416	} else {
417		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
418		       m->sdmax_rlcx_rb_rptr);
419		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
420		       m->sdmax_rlcx_rb_rptr_hi);
421	}
422	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
423
424	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
425	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
426			m->sdmax_rlcx_rb_base_hi);
427	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
428			m->sdmax_rlcx_rb_rptr_addr_lo);
429	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
430			m->sdmax_rlcx_rb_rptr_addr_hi);
431
432	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
433			     RB_ENABLE, 1);
434	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
435
436	return 0;
437}
438
439static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
440			     uint32_t engine_id, uint32_t queue_id,
441			     uint32_t (**dump)[2], uint32_t *n_regs)
442{
 
443	uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
444			engine_id, queue_id);
445	uint32_t i = 0, reg;
446#undef HQD_N_REGS
447#define HQD_N_REGS (19+6+7+10)
448
449	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
450	if (*dump == NULL)
451		return -ENOMEM;
452
453	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
454		DUMP_REG(sdma_rlc_reg_offset + reg);
455	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
456		DUMP_REG(sdma_rlc_reg_offset + reg);
457	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
458	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
459		DUMP_REG(sdma_rlc_reg_offset + reg);
460	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
461	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
462		DUMP_REG(sdma_rlc_reg_offset + reg);
463
464	WARN_ON_ONCE(i != HQD_N_REGS);
465	*n_regs = i;
466
467	return 0;
468}
469
470static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
471				uint64_t queue_address, uint32_t pipe_id,
472				uint32_t queue_id)
473{
 
474	uint32_t act;
475	bool retval = false;
476	uint32_t low, high;
477
478	acquire_queue(adev, pipe_id, queue_id);
479	act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
480	if (act) {
481		low = lower_32_bits(queue_address >> 8);
482		high = upper_32_bits(queue_address >> 8);
483
484		if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
485		   high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
486			retval = true;
487	}
488	release_queue(adev);
489	return retval;
490}
491
492static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
493{
 
494	struct v10_sdma_mqd *m;
495	uint32_t sdma_rlc_reg_offset;
496	uint32_t sdma_rlc_rb_cntl;
497
498	m = get_sdma_mqd(mqd);
499	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
500					    m->sdma_queue_id);
501
502	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
503
504	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
505		return true;
506
507	return false;
508}
509
510static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
511				enum kfd_preempt_type reset_type,
512				unsigned int utimeout, uint32_t pipe_id,
513				uint32_t queue_id)
514{
 
515	enum hqd_dequeue_request_type type;
516	unsigned long end_jiffies;
517	uint32_t temp;
518	struct v10_compute_mqd *m = get_mqd(mqd);
519
520	if (amdgpu_in_reset(adev))
521		return -EIO;
522
523#if 0
524	unsigned long flags;
525	int retry;
526#endif
527
528	acquire_queue(adev, pipe_id, queue_id);
529
530	if (m->cp_hqd_vmid == 0)
531		WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
532
533	switch (reset_type) {
534	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
535		type = DRAIN_PIPE;
536		break;
537	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
538		type = RESET_WAVES;
539		break;
540	case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
541		type = SAVE_WAVES;
542		break;
543	default:
544		type = DRAIN_PIPE;
545		break;
546	}
547
548#if 0 /* Is this still needed? */
549	/* Workaround: If IQ timer is active and the wait time is close to or
550	 * equal to 0, dequeueing is not safe. Wait until either the wait time
551	 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
552	 * cleared before continuing. Also, ensure wait times are set to at
553	 * least 0x3.
554	 */
555	local_irq_save(flags);
556	preempt_disable();
557	retry = 5000; /* wait for 500 usecs at maximum */
558	while (true) {
559		temp = RREG32(mmCP_HQD_IQ_TIMER);
560		if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
561			pr_debug("HW is processing IQ\n");
562			goto loop;
563		}
564		if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
565			if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
566					== 3) /* SEM-rearm is safe */
567				break;
568			/* Wait time 3 is safe for CP, but our MMIO read/write
569			 * time is close to 1 microsecond, so check for 10 to
570			 * leave more buffer room
571			 */
572			if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
573					>= 10)
574				break;
575			pr_debug("IQ timer is active\n");
576		} else
577			break;
578loop:
579		if (!retry) {
580			pr_err("CP HQD IQ timer status time out\n");
581			break;
582		}
583		ndelay(100);
584		--retry;
585	}
586	retry = 1000;
587	while (true) {
588		temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
589		if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
590			break;
591		pr_debug("Dequeue request is pending\n");
592
593		if (!retry) {
594			pr_err("CP HQD dequeue request time out\n");
595			break;
596		}
597		ndelay(100);
598		--retry;
599	}
600	local_irq_restore(flags);
601	preempt_enable();
602#endif
603
604	WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type);
605
606	end_jiffies = (utimeout * HZ / 1000) + jiffies;
607	while (true) {
608		temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
609		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
610			break;
611		if (time_after(jiffies, end_jiffies)) {
612			pr_err("cp queue preemption time out.\n");
613			release_queue(adev);
614			return -ETIME;
615		}
616		usleep_range(500, 1000);
617	}
618
619	release_queue(adev);
620	return 0;
621}
622
623static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
624				unsigned int utimeout)
625{
 
626	struct v10_sdma_mqd *m;
627	uint32_t sdma_rlc_reg_offset;
628	uint32_t temp;
629	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
630
631	m = get_sdma_mqd(mqd);
632	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
633					    m->sdma_queue_id);
634
635	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
636	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
637	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
638
639	while (true) {
640		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
641		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
642			break;
643		if (time_after(jiffies, end_jiffies)) {
644			pr_err("SDMA RLC not idle in %s\n", __func__);
645			return -ETIME;
646		}
647		usleep_range(500, 1000);
648	}
649
650	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
651	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
652		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
653		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
654
655	m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
656	m->sdmax_rlcx_rb_rptr_hi =
657		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
658
659	return 0;
660}
661
662static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
663					uint8_t vmid, uint16_t *p_pasid)
664{
665	uint32_t value;
 
666
667	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
668		     + vmid);
669	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
670
671	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
672}
673
674static int kgd_wave_control_execute(struct amdgpu_device *adev,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
675					uint32_t gfx_index_val,
676					uint32_t sq_cmd)
677{
 
678	uint32_t data = 0;
679
680	mutex_lock(&adev->grbm_idx_mutex);
681
682	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
683	WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd);
684
685	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
686		INSTANCE_BROADCAST_WRITES, 1);
687	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
688		SA_BROADCAST_WRITES, 1);
689	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
690		SE_BROADCAST_WRITES, 1);
691
692	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
693	mutex_unlock(&adev->grbm_idx_mutex);
694
695	return 0;
696}
697
698static void set_vm_context_page_table_base(struct amdgpu_device *adev,
699		uint32_t vmid, uint64_t page_table_base)
 
700{
 
 
 
 
 
 
 
 
701	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
702		pr_err("trying to set page table base for wrong VMID %u\n",
703		       vmid);
704		return;
705	}
706
707	/* SDMA is on gfxhub as well for Navi1* series */
708	adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
709}
710
711static void program_trap_handler_settings(struct amdgpu_device *adev,
712		uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
713{
714	lock_srbm(adev, 0, 0, 0, vmid);
715
716	/*
717	 * Program TBA registers
718	 */
719	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
720			lower_32_bits(tba_addr >> 8));
721	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
722			upper_32_bits(tba_addr >> 8) |
723			(1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
724
725	/*
726	 * Program TMA registers
727	 */
728	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
729			lower_32_bits(tma_addr >> 8));
730	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
731			upper_32_bits(tma_addr >> 8));
732
733	unlock_srbm(adev);
734}
735
736const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
737	.program_sh_mem_settings = kgd_program_sh_mem_settings,
738	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
739	.init_interrupts = kgd_init_interrupts,
740	.hqd_load = kgd_hqd_load,
741	.hiq_mqd_load = kgd_hiq_mqd_load,
742	.hqd_sdma_load = kgd_hqd_sdma_load,
743	.hqd_dump = kgd_hqd_dump,
744	.hqd_sdma_dump = kgd_hqd_sdma_dump,
745	.hqd_is_occupied = kgd_hqd_is_occupied,
746	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
747	.hqd_destroy = kgd_hqd_destroy,
748	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
 
 
749	.wave_control_execute = kgd_wave_control_execute,
 
750	.get_atc_vmid_pasid_mapping_info =
751			get_atc_vmid_pasid_mapping_info,
752	.set_vm_context_page_table_base = set_vm_context_page_table_base,
753	.program_trap_handler_settings = program_trap_handler_settings,
 
754};
v5.9
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22#include "amdgpu.h"
 23#include "amdgpu_amdkfd.h"
 24#include "gc/gc_10_1_0_offset.h"
 25#include "gc/gc_10_1_0_sh_mask.h"
 26#include "navi10_enum.h"
 27#include "athub/athub_2_0_0_offset.h"
 28#include "athub/athub_2_0_0_sh_mask.h"
 29#include "oss/osssys_5_0_0_offset.h"
 30#include "oss/osssys_5_0_0_sh_mask.h"
 31#include "soc15_common.h"
 32#include "v10_structs.h"
 33#include "nv.h"
 34#include "nvd.h"
 35#include "gfxhub_v2_0.h"
 36
 37enum hqd_dequeue_request_type {
 38	NO_ACTION = 0,
 39	DRAIN_PIPE,
 40	RESET_WAVES,
 41	SAVE_WAVES
 42};
 43
 44static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
 45{
 46	return (struct amdgpu_device *)kgd;
 47}
 48
 49static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
 50			uint32_t queue, uint32_t vmid)
 51{
 52	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 53
 54	mutex_lock(&adev->srbm_mutex);
 55	nv_grbm_select(adev, mec, pipe, queue, vmid);
 56}
 57
 58static void unlock_srbm(struct kgd_dev *kgd)
 59{
 60	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 61
 62	nv_grbm_select(adev, 0, 0, 0, 0);
 63	mutex_unlock(&adev->srbm_mutex);
 64}
 65
 66static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
 67				uint32_t queue_id)
 68{
 69	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 70
 71	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
 72	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
 73
 74	lock_srbm(kgd, mec, pipe, queue_id, 0);
 75}
 76
 77static uint64_t get_queue_mask(struct amdgpu_device *adev,
 78			       uint32_t pipe_id, uint32_t queue_id)
 79{
 80	unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
 81			queue_id;
 82
 83	return 1ull << bit;
 84}
 85
 86static void release_queue(struct kgd_dev *kgd)
 87{
 88	unlock_srbm(kgd);
 89}
 90
 91static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
 92					uint32_t sh_mem_config,
 93					uint32_t sh_mem_ape1_base,
 94					uint32_t sh_mem_ape1_limit,
 95					uint32_t sh_mem_bases)
 96{
 97	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 98
 99	lock_srbm(kgd, 0, 0, 0, vmid);
100
101	WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
102	WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
103	/* APE1 no longer exists on GFX9 */
104
105	unlock_srbm(kgd);
106}
107
108static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
109					unsigned int vmid)
110{
111	struct amdgpu_device *adev = get_amdgpu_device(kgd);
112
113	/*
114	 * We have to assume that there is no outstanding mapping.
115	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
116	 * a mapping is in progress or because a mapping finished
117	 * and the SW cleared it.
118	 * So the protocol is to always wait & clear.
119	 */
120	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
121			ATC_VMID0_PASID_MAPPING__VALID_MASK;
122
123	pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
124
125	pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
126	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
127	       pasid_mapping);
128
129#if 0
130	/* TODO: uncomment this code when the hardware support is ready. */
131	while (!(RREG32(SOC15_REG_OFFSET(
132				ATHUB, 0,
133				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
134		 (1U << vmid)))
135		cpu_relax();
136
137	pr_debug("ATHUB mapping update finished\n");
138	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
139				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
140	       1U << vmid);
141#endif
142
143	/* Mapping vmid to pasid also for IH block */
144	pr_debug("update mapping for IH block and mmhub");
145	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
146	       pasid_mapping);
147
148	return 0;
149}
150
151/* TODO - RING0 form of field is obsolete, seems to date back to SI
152 * but still works
153 */
154
155static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
156{
157	struct amdgpu_device *adev = get_amdgpu_device(kgd);
158	uint32_t mec;
159	uint32_t pipe;
160
161	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
162	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
163
164	lock_srbm(kgd, mec, pipe, 0, 0);
165
166	WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
167		CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
168		CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
169
170	unlock_srbm(kgd);
171
172	return 0;
173}
174
175static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
176				unsigned int engine_id,
177				unsigned int queue_id)
178{
179	uint32_t sdma_engine_reg_base[2] = {
180		SOC15_REG_OFFSET(SDMA0, 0,
181				 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
182		/* On gfx10, mmSDMA1_xxx registers are defined NOT based
183		 * on SDMA1 base address (dw 0x1860) but based on SDMA0
184		 * base address (dw 0x1260). Therefore use mmSDMA0_RLC0_RB_CNTL
185		 * instead of mmSDMA1_RLC0_RB_CNTL for the base address calc
186		 * below
187		 */
188		SOC15_REG_OFFSET(SDMA1, 0,
189				 mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
190	};
191
192	uint32_t retval = sdma_engine_reg_base[engine_id]
193		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
194
195	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
196			queue_id, retval);
197
198	return retval;
199}
200
201#if 0
202static uint32_t get_watch_base_addr(struct amdgpu_device *adev)
203{
204	uint32_t retval = SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) -
205			mmTCP_WATCH0_ADDR_H;
206
207	pr_debug("kfd: reg watch base address: 0x%x\n", retval);
208
209	return retval;
210}
211#endif
212
213static inline struct v10_compute_mqd *get_mqd(void *mqd)
214{
215	return (struct v10_compute_mqd *)mqd;
216}
217
218static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
219{
220	return (struct v10_sdma_mqd *)mqd;
221}
222
223static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
224			uint32_t queue_id, uint32_t __user *wptr,
225			uint32_t wptr_shift, uint32_t wptr_mask,
226			struct mm_struct *mm)
227{
228	struct amdgpu_device *adev = get_amdgpu_device(kgd);
229	struct v10_compute_mqd *m;
230	uint32_t *mqd_hqd;
231	uint32_t reg, hqd_base, data;
232
233	m = get_mqd(mqd);
234
235	pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
236	acquire_queue(kgd, pipe_id, queue_id);
237
238	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
239	mqd_hqd = &m->cp_mqd_base_addr_lo;
240	hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
241
242	for (reg = hqd_base;
243	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
244		WREG32(reg, mqd_hqd[reg - hqd_base]);
245
246
247	/* Activate doorbell logic before triggering WPTR poll. */
248	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
249			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
250	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
251
252	if (wptr) {
253		/* Don't read wptr with get_user because the user
254		 * context may not be accessible (if this function
255		 * runs in a work queue). Instead trigger a one-shot
256		 * polling read from memory in the CP. This assumes
257		 * that wptr is GPU-accessible in the queue's VMID via
258		 * ATC or SVM. WPTR==RPTR before starting the poll so
259		 * the CP starts fetching new commands from the right
260		 * place.
261		 *
262		 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
263		 * tricky. Assume that the queue didn't overflow. The
264		 * number of valid bits in the 32-bit RPTR depends on
265		 * the queue size. The remaining bits are taken from
266		 * the saved 64-bit WPTR. If the WPTR wrapped, add the
267		 * queue size.
268		 */
269		uint32_t queue_size =
270			2 << REG_GET_FIELD(m->cp_hqd_pq_control,
271					   CP_HQD_PQ_CONTROL, QUEUE_SIZE);
272		uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
273
274		if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
275			guessed_wptr += queue_size;
276		guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
277		guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
278
279		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
280		       lower_32_bits(guessed_wptr));
281		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
282		       upper_32_bits(guessed_wptr));
283		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
284		       lower_32_bits((uint64_t)wptr));
285		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
286		       upper_32_bits((uint64_t)wptr));
287		pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
288			 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
289		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
290		       (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
291	}
292
293	/* Start the EOP fetcher */
294	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
295	       REG_SET_FIELD(m->cp_hqd_eop_rptr,
296			     CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
297
298	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
299	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
300
301	release_queue(kgd);
302
303	return 0;
304}
305
306static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
307			    uint32_t pipe_id, uint32_t queue_id,
308			    uint32_t doorbell_off)
309{
310	struct amdgpu_device *adev = get_amdgpu_device(kgd);
311	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
312	struct v10_compute_mqd *m;
313	uint32_t mec, pipe;
314	int r;
315
316	m = get_mqd(mqd);
317
318	acquire_queue(kgd, pipe_id, queue_id);
319
320	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
321	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
322
323	pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
324		 mec, pipe, queue_id);
325
326	spin_lock(&adev->gfx.kiq.ring_lock);
327	r = amdgpu_ring_alloc(kiq_ring, 7);
328	if (r) {
329		pr_err("Failed to alloc KIQ (%d).\n", r);
330		goto out_unlock;
331	}
332
333	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
334	amdgpu_ring_write(kiq_ring,
335			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
336			  PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
337			  PACKET3_MAP_QUEUES_QUEUE(queue_id) |
338			  PACKET3_MAP_QUEUES_PIPE(pipe) |
339			  PACKET3_MAP_QUEUES_ME((mec - 1)) |
340			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
341			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
342			  PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
343			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
344	amdgpu_ring_write(kiq_ring,
345			  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
346	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
347	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
348	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
349	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
350	amdgpu_ring_commit(kiq_ring);
351
352out_unlock:
353	spin_unlock(&adev->gfx.kiq.ring_lock);
354	release_queue(kgd);
355
356	return r;
357}
358
359static int kgd_hqd_dump(struct kgd_dev *kgd,
360			uint32_t pipe_id, uint32_t queue_id,
361			uint32_t (**dump)[2], uint32_t *n_regs)
362{
363	struct amdgpu_device *adev = get_amdgpu_device(kgd);
364	uint32_t i = 0, reg;
365#define HQD_N_REGS 56
366#define DUMP_REG(addr) do {				\
367		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
368			break;				\
369		(*dump)[i][0] = (addr) << 2;		\
370		(*dump)[i++][1] = RREG32(addr);		\
371	} while (0)
372
373	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
374	if (*dump == NULL)
375		return -ENOMEM;
376
377	acquire_queue(kgd, pipe_id, queue_id);
378
379	for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
380	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
381		DUMP_REG(reg);
382
383	release_queue(kgd);
384
385	WARN_ON_ONCE(i != HQD_N_REGS);
386	*n_regs = i;
387
388	return 0;
389}
390
391static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
392			     uint32_t __user *wptr, struct mm_struct *mm)
393{
394	struct amdgpu_device *adev = get_amdgpu_device(kgd);
395	struct v10_sdma_mqd *m;
396	uint32_t sdma_rlc_reg_offset;
397	unsigned long end_jiffies;
398	uint32_t data;
399	uint64_t data64;
400	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
401
402	m = get_sdma_mqd(mqd);
403	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
404					    m->sdma_queue_id);
405
406	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
407		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
408
409	end_jiffies = msecs_to_jiffies(2000) + jiffies;
410	while (true) {
411		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
412		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
413			break;
414		if (time_after(jiffies, end_jiffies)) {
415			pr_err("SDMA RLC not idle in %s\n", __func__);
416			return -ETIME;
417		}
418		usleep_range(500, 1000);
419	}
420
421	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
422	       m->sdmax_rlcx_doorbell_offset);
423
424	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
425			     ENABLE, 1);
426	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
427	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
428				m->sdmax_rlcx_rb_rptr);
429	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
430				m->sdmax_rlcx_rb_rptr_hi);
431
432	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
433	if (read_user_wptr(mm, wptr64, data64)) {
434		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
435		       lower_32_bits(data64));
436		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
437		       upper_32_bits(data64));
438	} else {
439		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
440		       m->sdmax_rlcx_rb_rptr);
441		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
442		       m->sdmax_rlcx_rb_rptr_hi);
443	}
444	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
445
446	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
447	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
448			m->sdmax_rlcx_rb_base_hi);
449	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
450			m->sdmax_rlcx_rb_rptr_addr_lo);
451	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
452			m->sdmax_rlcx_rb_rptr_addr_hi);
453
454	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
455			     RB_ENABLE, 1);
456	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
457
458	return 0;
459}
460
461static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
462			     uint32_t engine_id, uint32_t queue_id,
463			     uint32_t (**dump)[2], uint32_t *n_regs)
464{
465	struct amdgpu_device *adev = get_amdgpu_device(kgd);
466	uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
467			engine_id, queue_id);
468	uint32_t i = 0, reg;
469#undef HQD_N_REGS
470#define HQD_N_REGS (19+6+7+10)
471
472	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
473	if (*dump == NULL)
474		return -ENOMEM;
475
476	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
477		DUMP_REG(sdma_rlc_reg_offset + reg);
478	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
479		DUMP_REG(sdma_rlc_reg_offset + reg);
480	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
481	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
482		DUMP_REG(sdma_rlc_reg_offset + reg);
483	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
484	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
485		DUMP_REG(sdma_rlc_reg_offset + reg);
486
487	WARN_ON_ONCE(i != HQD_N_REGS);
488	*n_regs = i;
489
490	return 0;
491}
492
493static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
494				uint32_t pipe_id, uint32_t queue_id)
 
495{
496	struct amdgpu_device *adev = get_amdgpu_device(kgd);
497	uint32_t act;
498	bool retval = false;
499	uint32_t low, high;
500
501	acquire_queue(kgd, pipe_id, queue_id);
502	act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
503	if (act) {
504		low = lower_32_bits(queue_address >> 8);
505		high = upper_32_bits(queue_address >> 8);
506
507		if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
508		   high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
509			retval = true;
510	}
511	release_queue(kgd);
512	return retval;
513}
514
515static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
516{
517	struct amdgpu_device *adev = get_amdgpu_device(kgd);
518	struct v10_sdma_mqd *m;
519	uint32_t sdma_rlc_reg_offset;
520	uint32_t sdma_rlc_rb_cntl;
521
522	m = get_sdma_mqd(mqd);
523	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
524					    m->sdma_queue_id);
525
526	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
527
528	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
529		return true;
530
531	return false;
532}
533
534static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
535				enum kfd_preempt_type reset_type,
536				unsigned int utimeout, uint32_t pipe_id,
537				uint32_t queue_id)
538{
539	struct amdgpu_device *adev = get_amdgpu_device(kgd);
540	enum hqd_dequeue_request_type type;
541	unsigned long end_jiffies;
542	uint32_t temp;
543	struct v10_compute_mqd *m = get_mqd(mqd);
544
545	if (adev->in_gpu_reset)
546		return -EIO;
547
548#if 0
549	unsigned long flags;
550	int retry;
551#endif
552
553	acquire_queue(kgd, pipe_id, queue_id);
554
555	if (m->cp_hqd_vmid == 0)
556		WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
557
558	switch (reset_type) {
559	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
560		type = DRAIN_PIPE;
561		break;
562	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
563		type = RESET_WAVES;
564		break;
 
 
 
565	default:
566		type = DRAIN_PIPE;
567		break;
568	}
569
570#if 0 /* Is this still needed? */
571	/* Workaround: If IQ timer is active and the wait time is close to or
572	 * equal to 0, dequeueing is not safe. Wait until either the wait time
573	 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
574	 * cleared before continuing. Also, ensure wait times are set to at
575	 * least 0x3.
576	 */
577	local_irq_save(flags);
578	preempt_disable();
579	retry = 5000; /* wait for 500 usecs at maximum */
580	while (true) {
581		temp = RREG32(mmCP_HQD_IQ_TIMER);
582		if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
583			pr_debug("HW is processing IQ\n");
584			goto loop;
585		}
586		if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
587			if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
588					== 3) /* SEM-rearm is safe */
589				break;
590			/* Wait time 3 is safe for CP, but our MMIO read/write
591			 * time is close to 1 microsecond, so check for 10 to
592			 * leave more buffer room
593			 */
594			if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
595					>= 10)
596				break;
597			pr_debug("IQ timer is active\n");
598		} else
599			break;
600loop:
601		if (!retry) {
602			pr_err("CP HQD IQ timer status time out\n");
603			break;
604		}
605		ndelay(100);
606		--retry;
607	}
608	retry = 1000;
609	while (true) {
610		temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
611		if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
612			break;
613		pr_debug("Dequeue request is pending\n");
614
615		if (!retry) {
616			pr_err("CP HQD dequeue request time out\n");
617			break;
618		}
619		ndelay(100);
620		--retry;
621	}
622	local_irq_restore(flags);
623	preempt_enable();
624#endif
625
626	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
627
628	end_jiffies = (utimeout * HZ / 1000) + jiffies;
629	while (true) {
630		temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
631		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
632			break;
633		if (time_after(jiffies, end_jiffies)) {
634			pr_err("cp queue preemption time out.\n");
635			release_queue(kgd);
636			return -ETIME;
637		}
638		usleep_range(500, 1000);
639	}
640
641	release_queue(kgd);
642	return 0;
643}
644
645static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
646				unsigned int utimeout)
647{
648	struct amdgpu_device *adev = get_amdgpu_device(kgd);
649	struct v10_sdma_mqd *m;
650	uint32_t sdma_rlc_reg_offset;
651	uint32_t temp;
652	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
653
654	m = get_sdma_mqd(mqd);
655	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
656					    m->sdma_queue_id);
657
658	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
659	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
660	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
661
662	while (true) {
663		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
664		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
665			break;
666		if (time_after(jiffies, end_jiffies)) {
667			pr_err("SDMA RLC not idle in %s\n", __func__);
668			return -ETIME;
669		}
670		usleep_range(500, 1000);
671	}
672
673	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
674	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
675		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
676		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
677
678	m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
679	m->sdmax_rlcx_rb_rptr_hi =
680		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
681
682	return 0;
683}
684
685static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
686					uint8_t vmid, uint16_t *p_pasid)
687{
688	uint32_t value;
689	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
690
691	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
692		     + vmid);
693	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
694
695	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
696}
697
698static int kgd_address_watch_disable(struct kgd_dev *kgd)
699{
700	return 0;
701}
702
703static int kgd_address_watch_execute(struct kgd_dev *kgd,
704					unsigned int watch_point_id,
705					uint32_t cntl_val,
706					uint32_t addr_hi,
707					uint32_t addr_lo)
708{
709	return 0;
710}
711
712static int kgd_wave_control_execute(struct kgd_dev *kgd,
713					uint32_t gfx_index_val,
714					uint32_t sq_cmd)
715{
716	struct amdgpu_device *adev = get_amdgpu_device(kgd);
717	uint32_t data = 0;
718
719	mutex_lock(&adev->grbm_idx_mutex);
720
721	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val);
722	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
723
724	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
725		INSTANCE_BROADCAST_WRITES, 1);
726	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
727		SA_BROADCAST_WRITES, 1);
728	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
729		SE_BROADCAST_WRITES, 1);
730
731	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
732	mutex_unlock(&adev->grbm_idx_mutex);
733
734	return 0;
735}
736
737static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
738					unsigned int watch_point_id,
739					unsigned int reg_offset)
740{
741	return 0;
742}
743
744static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
745		uint64_t page_table_base)
746{
747	struct amdgpu_device *adev = get_amdgpu_device(kgd);
748
749	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
750		pr_err("trying to set page table base for wrong VMID %u\n",
751		       vmid);
752		return;
753	}
754
755	/* SDMA is on gfxhub as well for Navi1* series */
756	gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
757}
758
759const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
760	.program_sh_mem_settings = kgd_program_sh_mem_settings,
761	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
762	.init_interrupts = kgd_init_interrupts,
763	.hqd_load = kgd_hqd_load,
764	.hiq_mqd_load = kgd_hiq_mqd_load,
765	.hqd_sdma_load = kgd_hqd_sdma_load,
766	.hqd_dump = kgd_hqd_dump,
767	.hqd_sdma_dump = kgd_hqd_sdma_dump,
768	.hqd_is_occupied = kgd_hqd_is_occupied,
769	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
770	.hqd_destroy = kgd_hqd_destroy,
771	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
772	.address_watch_disable = kgd_address_watch_disable,
773	.address_watch_execute = kgd_address_watch_execute,
774	.wave_control_execute = kgd_wave_control_execute,
775	.address_watch_get_offset = kgd_address_watch_get_offset,
776	.get_atc_vmid_pasid_mapping_info =
777			get_atc_vmid_pasid_mapping_info,
778	.set_vm_context_page_table_base = set_vm_context_page_table_base,
779	.get_hive_id = amdgpu_amdkfd_get_hive_id,
780	.get_unique_id = amdgpu_amdkfd_get_unique_id,
781};