Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#ifndef __AMDGPU_MES_H__
 25#define __AMDGPU_MES_H__
 26
 27#include "amdgpu_irq.h"
 28#include "kgd_kfd_interface.h"
 29#include "amdgpu_gfx.h"
 
 30#include <linux/sched/mm.h>
 31
 32#define AMDGPU_MES_MAX_COMPUTE_PIPES        8
 33#define AMDGPU_MES_MAX_GFX_PIPES            2
 34#define AMDGPU_MES_MAX_SDMA_PIPES           2
 35
 36#define AMDGPU_MES_API_VERSION_SHIFT	12
 37#define AMDGPU_MES_FEAT_VERSION_SHIFT	24
 38
 39#define AMDGPU_MES_VERSION_MASK		0x00000fff
 40#define AMDGPU_MES_API_VERSION_MASK	0x00fff000
 41#define AMDGPU_MES_FEAT_VERSION_MASK	0xff000000
 
 42
 43enum amdgpu_mes_priority_level {
 44	AMDGPU_MES_PRIORITY_LEVEL_LOW       = 0,
 45	AMDGPU_MES_PRIORITY_LEVEL_NORMAL    = 1,
 46	AMDGPU_MES_PRIORITY_LEVEL_MEDIUM    = 2,
 47	AMDGPU_MES_PRIORITY_LEVEL_HIGH      = 3,
 48	AMDGPU_MES_PRIORITY_LEVEL_REALTIME  = 4,
 49	AMDGPU_MES_PRIORITY_NUM_LEVELS
 50};
 51
 52#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
 53#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
 54
 55struct amdgpu_mes_funcs;
 56
 57enum admgpu_mes_pipe {
 58	AMDGPU_MES_SCHED_PIPE = 0,
 59	AMDGPU_MES_KIQ_PIPE,
 60	AMDGPU_MAX_MES_PIPES = 2,
 61};
 62
 63struct amdgpu_mes {
 64	struct amdgpu_device            *adev;
 65
 66	struct mutex                    mutex_hidden;
 67
 68	struct idr                      pasid_idr;
 69	struct idr                      gang_id_idr;
 70	struct idr                      queue_id_idr;
 71	struct ida                      doorbell_ida;
 72
 73	spinlock_t                      queue_id_lock;
 74
 75	uint32_t			sched_version;
 76	uint32_t			kiq_version;
 
 
 77
 78	uint32_t                        total_max_queue;
 79	uint32_t                        doorbell_id_offset;
 80	uint32_t                        max_doorbell_slices;
 81
 82	uint64_t                        default_process_quantum;
 83	uint64_t                        default_gang_quantum;
 84
 85	struct amdgpu_ring              ring;
 86	spinlock_t                      ring_lock;
 87
 88	const struct firmware           *fw[AMDGPU_MAX_MES_PIPES];
 89
 90	/* mes ucode */
 91	struct amdgpu_bo		*ucode_fw_obj[AMDGPU_MAX_MES_PIPES];
 92	uint64_t			ucode_fw_gpu_addr[AMDGPU_MAX_MES_PIPES];
 93	uint32_t			*ucode_fw_ptr[AMDGPU_MAX_MES_PIPES];
 94	uint64_t                        uc_start_addr[AMDGPU_MAX_MES_PIPES];
 95
 96	/* mes ucode data */
 97	struct amdgpu_bo		*data_fw_obj[AMDGPU_MAX_MES_PIPES];
 98	uint64_t			data_fw_gpu_addr[AMDGPU_MAX_MES_PIPES];
 99	uint32_t			*data_fw_ptr[AMDGPU_MAX_MES_PIPES];
100	uint64_t                        data_start_addr[AMDGPU_MAX_MES_PIPES];
101
102	/* eop gpu obj */
103	struct amdgpu_bo		*eop_gpu_obj[AMDGPU_MAX_MES_PIPES];
104	uint64_t                        eop_gpu_addr[AMDGPU_MAX_MES_PIPES];
105
106	void                            *mqd_backup[AMDGPU_MAX_MES_PIPES];
107	struct amdgpu_irq_src	        irq[AMDGPU_MAX_MES_PIPES];
108
109	uint32_t                        vmid_mask_gfxhub;
110	uint32_t                        vmid_mask_mmhub;
111	uint32_t                        compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
112	uint32_t                        gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
113	uint32_t                        sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
114	uint32_t                        aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
115	uint32_t                        sch_ctx_offs;
116	uint64_t			sch_ctx_gpu_addr;
117	uint64_t			*sch_ctx_ptr;
118	uint32_t			query_status_fence_offs;
119	uint64_t			query_status_fence_gpu_addr;
120	uint64_t			*query_status_fence_ptr;
121	uint32_t                        read_val_offs;
122	uint64_t			read_val_gpu_addr;
123	uint32_t			*read_val_ptr;
124
125	uint32_t			saved_flags;
126
127	/* initialize kiq pipe */
128	int                             (*kiq_hw_init)(struct amdgpu_device *adev);
129	int                             (*kiq_hw_fini)(struct amdgpu_device *adev);
130
 
 
 
 
 
 
 
 
 
 
 
131	/* ip specific functions */
132	const struct amdgpu_mes_funcs   *funcs;
 
 
 
 
 
 
133};
134
135struct amdgpu_mes_process {
136	int			pasid;
137	struct			amdgpu_vm *vm;
138	uint64_t		pd_gpu_addr;
139	struct amdgpu_bo 	*proc_ctx_bo;
140	uint64_t 		proc_ctx_gpu_addr;
141	void 			*proc_ctx_cpu_ptr;
142	uint64_t 		process_quantum;
143	struct 			list_head gang_list;
144	uint32_t 		doorbell_index;
145	unsigned long 		*doorbell_bitmap;
146	struct mutex		doorbell_lock;
147};
148
149struct amdgpu_mes_gang {
150	int 				gang_id;
151	int 				priority;
152	int 				inprocess_gang_priority;
153	int 				global_priority_level;
154	struct list_head 		list;
155	struct amdgpu_mes_process 	*process;
156	struct amdgpu_bo 		*gang_ctx_bo;
157	uint64_t 			gang_ctx_gpu_addr;
158	void 				*gang_ctx_cpu_ptr;
159	uint64_t 			gang_quantum;
160	struct list_head 		queue_list;
161};
162
163struct amdgpu_mes_queue {
164	struct list_head 		list;
165	struct amdgpu_mes_gang 		*gang;
166	int 				queue_id;
167	uint64_t 			doorbell_off;
168	struct amdgpu_bo		*mqd_obj;
169	void				*mqd_cpu_ptr;
170	uint64_t 			mqd_gpu_addr;
171	uint64_t 			wptr_gpu_addr;
172	int 				queue_type;
173	int 				paging;
174	struct amdgpu_ring 		*ring;
175};
176
177struct amdgpu_mes_queue_properties {
178	int 			queue_type;
179	uint64_t                hqd_base_gpu_addr;
180	uint64_t                rptr_gpu_addr;
181	uint64_t                wptr_gpu_addr;
182	uint64_t                wptr_mc_addr;
183	uint32_t                queue_size;
184	uint64_t                eop_gpu_addr;
185	uint32_t                hqd_pipe_priority;
186	uint32_t                hqd_queue_priority;
187	bool 			paging;
188	struct amdgpu_ring 	*ring;
189	/* out */
190	uint64_t       		doorbell_off;
191};
192
193struct amdgpu_mes_gang_properties {
194	uint32_t 	priority;
195	uint32_t 	gang_quantum;
196	uint32_t 	inprocess_gang_priority;
197	uint32_t 	priority_level;
198	int 		global_priority_level;
199};
200
201struct mes_add_queue_input {
202	uint32_t	process_id;
203	uint64_t	page_table_base_addr;
204	uint64_t	process_va_start;
205	uint64_t	process_va_end;
206	uint64_t	process_quantum;
207	uint64_t	process_context_addr;
208	uint64_t	gang_quantum;
209	uint64_t	gang_context_addr;
210	uint32_t	inprocess_gang_priority;
211	uint32_t	gang_global_priority_level;
212	uint32_t	doorbell_offset;
213	uint64_t	mqd_addr;
214	uint64_t	wptr_addr;
215	uint64_t	wptr_mc_addr;
216	uint32_t	queue_type;
217	uint32_t	paging;
218	uint32_t        gws_base;
219	uint32_t        gws_size;
220	uint64_t	tba_addr;
221	uint64_t	tma_addr;
 
 
222	uint32_t	is_kfd_process;
223	uint32_t	is_aql_queue;
224	uint32_t	queue_size;
 
225};
226
227struct mes_remove_queue_input {
228	uint32_t	doorbell_offset;
229	uint64_t	gang_context_addr;
230};
231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232struct mes_unmap_legacy_queue_input {
233	enum amdgpu_unmap_queues_action    action;
234	uint32_t                           queue_type;
235	uint32_t                           doorbell_offset;
236	uint32_t                           pipe_id;
237	uint32_t                           queue_id;
238	uint64_t                           trail_fence_addr;
239	uint64_t                           trail_fence_data;
240};
241
242struct mes_suspend_gang_input {
243	bool		suspend_all_gangs;
244	uint64_t	gang_context_addr;
245	uint64_t	suspend_fence_addr;
246	uint32_t	suspend_fence_value;
247};
248
249struct mes_resume_gang_input {
250	bool		resume_all_gangs;
251	uint64_t	gang_context_addr;
252};
253
 
 
 
 
 
 
 
 
 
 
 
 
254enum mes_misc_opcode {
255	MES_MISC_OP_WRITE_REG,
256	MES_MISC_OP_READ_REG,
257	MES_MISC_OP_WRM_REG_WAIT,
258	MES_MISC_OP_WRM_REG_WR_WAIT,
 
 
259};
260
261struct mes_misc_op_input {
262	enum mes_misc_opcode op;
263
264	union {
265		struct {
266			uint32_t                  reg_offset;
267			uint64_t                  buffer_addr;
268		} read_reg;
269
270		struct {
271			uint32_t                  reg_offset;
272			uint32_t                  reg_value;
273		} write_reg;
274
275		struct {
276			uint32_t                   ref;
277			uint32_t                   mask;
278			uint32_t                   reg0;
279			uint32_t                   reg1;
280		} wrm_reg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281	};
282};
283
284struct amdgpu_mes_funcs {
285	int (*add_hw_queue)(struct amdgpu_mes *mes,
286			    struct mes_add_queue_input *input);
287
288	int (*remove_hw_queue)(struct amdgpu_mes *mes,
289			       struct mes_remove_queue_input *input);
290
 
 
 
291	int (*unmap_legacy_queue)(struct amdgpu_mes *mes,
292				  struct mes_unmap_legacy_queue_input *input);
293
294	int (*suspend_gang)(struct amdgpu_mes *mes,
295			    struct mes_suspend_gang_input *input);
296
297	int (*resume_gang)(struct amdgpu_mes *mes,
298			   struct mes_resume_gang_input *input);
299
300	int (*misc_op)(struct amdgpu_mes *mes,
301		       struct mes_misc_op_input *input);
 
 
 
 
 
 
302};
303
304#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
305#define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev))
306
307int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
308
 
309int amdgpu_mes_init(struct amdgpu_device *adev);
310void amdgpu_mes_fini(struct amdgpu_device *adev);
311
312int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
313			      struct amdgpu_vm *vm);
314void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid);
315
316int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
317			struct amdgpu_mes_gang_properties *gprops,
318			int *gang_id);
319int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id);
320
321int amdgpu_mes_suspend(struct amdgpu_device *adev);
322int amdgpu_mes_resume(struct amdgpu_device *adev);
323
324int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
325			    struct amdgpu_mes_queue_properties *qprops,
326			    int *queue_id);
327int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
 
 
 
328
 
 
329int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
330				  struct amdgpu_ring *ring,
331				  enum amdgpu_unmap_queues_action action,
332				  u64 gpu_addr, u64 seq);
 
 
 
 
333
334uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
335int amdgpu_mes_wreg(struct amdgpu_device *adev,
336		    uint32_t reg, uint32_t val);
337int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
338			uint32_t val, uint32_t mask);
339int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
340				  uint32_t reg0, uint32_t reg1,
341				  uint32_t ref, uint32_t mask);
342
 
 
 
 
 
 
 
343int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
344			int queue_type, int idx,
345			struct amdgpu_mes_ctx_data *ctx_data,
346			struct amdgpu_ring **out);
347void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
348			    struct amdgpu_ring *ring);
349
350uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
351						   enum amdgpu_mes_priority_level prio);
352
353int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
354				   struct amdgpu_mes_ctx_data *ctx_data);
355void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data);
356int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
357				 struct amdgpu_vm *vm,
358				 struct amdgpu_mes_ctx_data *ctx_data);
359int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
360				   struct amdgpu_mes_ctx_data *ctx_data);
361
362int amdgpu_mes_self_test(struct amdgpu_device *adev);
363
364int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev,
365					unsigned int *doorbell_index);
366void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev,
367					unsigned int doorbell_index);
368unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar(
369					struct amdgpu_device *adev,
370					uint32_t doorbell_index,
371					unsigned int doorbell_id);
372int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev);
373
374/*
375 * MES lock can be taken in MMU notifiers.
376 *
377 * A bit more detail about why to set no-FS reclaim with MES lock:
378 *
379 * The purpose of the MMU notifier is to stop GPU access to memory so
380 * that the Linux VM subsystem can move pages around safely. This is
381 * done by preempting user mode queues for the affected process. When
382 * MES is used, MES lock needs to be taken to preempt the queues.
383 *
384 * The MMU notifier callback entry point in the driver is
385 * amdgpu_mn_invalidate_range_start_hsa. The relevant call chain from
386 * there is:
387 * amdgpu_amdkfd_evict_userptr -> kgd2kfd_quiesce_mm ->
388 * kfd_process_evict_queues -> pdd->dev->dqm->ops.evict_process_queues
389 *
390 * The last part of the chain is a function pointer where we take the
391 * MES lock.
392 *
393 * The problem with taking locks in the MMU notifier is, that MMU
394 * notifiers can be called in reclaim-FS context. That's where the
395 * kernel frees up pages to make room for new page allocations under
396 * memory pressure. While we are running in reclaim-FS context, we must
397 * not trigger another memory reclaim operation because that would
398 * recursively reenter the reclaim code and cause a deadlock. The
399 * memalloc_nofs_save/restore calls guarantee that.
400 *
401 * In addition we also need to avoid lock dependencies on other locks taken
402 * under the MES lock, for example reservation locks. Here is a possible
403 * scenario of a deadlock:
404 * Thread A: takes and holds reservation lock | triggers reclaim-FS |
405 * MMU notifier | blocks trying to take MES lock
406 * Thread B: takes and holds MES lock | blocks trying to take reservation lock
407 *
408 * In this scenario Thread B gets involved in a deadlock even without
409 * triggering a reclaim-FS operation itself.
410 * To fix this and break the lock dependency chain you'd need to either:
411 * 1. protect reservation locks with memalloc_nofs_save/restore, or
412 * 2. avoid taking reservation locks under the MES lock.
413 *
414 * Reservation locks are taken all over the kernel in different subsystems, we
415 * have no control over them and their lock dependencies.So the only workable
416 * solution is to avoid taking other locks under the MES lock.
417 * As a result, make sure no reclaim-FS happens while holding this lock anywhere
418 * to prevent deadlocks when an MMU notifier runs in reclaim-FS context.
419 */
420static inline void amdgpu_mes_lock(struct amdgpu_mes *mes)
421{
422	mutex_lock(&mes->mutex_hidden);
423	mes->saved_flags = memalloc_noreclaim_save();
424}
425
426static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
427{
428	memalloc_noreclaim_restore(mes->saved_flags);
429	mutex_unlock(&mes->mutex_hidden);
430}
 
 
 
 
 
431#endif /* __AMDGPU_MES_H__ */
v6.13.7
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#ifndef __AMDGPU_MES_H__
 25#define __AMDGPU_MES_H__
 26
 27#include "amdgpu_irq.h"
 28#include "kgd_kfd_interface.h"
 29#include "amdgpu_gfx.h"
 30#include "amdgpu_doorbell.h"
 31#include <linux/sched/mm.h>
 32
 33#define AMDGPU_MES_MAX_COMPUTE_PIPES        8
 34#define AMDGPU_MES_MAX_GFX_PIPES            2
 35#define AMDGPU_MES_MAX_SDMA_PIPES           2
 36
 37#define AMDGPU_MES_API_VERSION_SHIFT	12
 38#define AMDGPU_MES_FEAT_VERSION_SHIFT	24
 39
 40#define AMDGPU_MES_VERSION_MASK		0x00000fff
 41#define AMDGPU_MES_API_VERSION_MASK	0x00fff000
 42#define AMDGPU_MES_FEAT_VERSION_MASK	0xff000000
 43#define AMDGPU_MES_MSCRATCH_SIZE	0x8000
 44
 45enum amdgpu_mes_priority_level {
 46	AMDGPU_MES_PRIORITY_LEVEL_LOW       = 0,
 47	AMDGPU_MES_PRIORITY_LEVEL_NORMAL    = 1,
 48	AMDGPU_MES_PRIORITY_LEVEL_MEDIUM    = 2,
 49	AMDGPU_MES_PRIORITY_LEVEL_HIGH      = 3,
 50	AMDGPU_MES_PRIORITY_LEVEL_REALTIME  = 4,
 51	AMDGPU_MES_PRIORITY_NUM_LEVELS
 52};
 53
 54#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
 55#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
 56
 57struct amdgpu_mes_funcs;
 58
 59enum admgpu_mes_pipe {
 60	AMDGPU_MES_SCHED_PIPE = 0,
 61	AMDGPU_MES_KIQ_PIPE,
 62	AMDGPU_MAX_MES_PIPES = 2,
 63};
 64
 65struct amdgpu_mes {
 66	struct amdgpu_device            *adev;
 67
 68	struct mutex                    mutex_hidden;
 69
 70	struct idr                      pasid_idr;
 71	struct idr                      gang_id_idr;
 72	struct idr                      queue_id_idr;
 73	struct ida                      doorbell_ida;
 74
 75	spinlock_t                      queue_id_lock;
 76
 77	uint32_t			sched_version;
 78	uint32_t			kiq_version;
 79	uint32_t			fw_version[AMDGPU_MAX_MES_PIPES];
 80	bool                            enable_legacy_queue_map;
 81
 82	uint32_t                        total_max_queue;
 
 83	uint32_t                        max_doorbell_slices;
 84
 85	uint64_t                        default_process_quantum;
 86	uint64_t                        default_gang_quantum;
 87
 88	struct amdgpu_ring              ring[AMDGPU_MAX_MES_PIPES];
 89	spinlock_t                      ring_lock[AMDGPU_MAX_MES_PIPES];
 90
 91	const struct firmware           *fw[AMDGPU_MAX_MES_PIPES];
 92
 93	/* mes ucode */
 94	struct amdgpu_bo		*ucode_fw_obj[AMDGPU_MAX_MES_PIPES];
 95	uint64_t			ucode_fw_gpu_addr[AMDGPU_MAX_MES_PIPES];
 96	uint32_t			*ucode_fw_ptr[AMDGPU_MAX_MES_PIPES];
 97	uint64_t                        uc_start_addr[AMDGPU_MAX_MES_PIPES];
 98
 99	/* mes ucode data */
100	struct amdgpu_bo		*data_fw_obj[AMDGPU_MAX_MES_PIPES];
101	uint64_t			data_fw_gpu_addr[AMDGPU_MAX_MES_PIPES];
102	uint32_t			*data_fw_ptr[AMDGPU_MAX_MES_PIPES];
103	uint64_t                        data_start_addr[AMDGPU_MAX_MES_PIPES];
104
105	/* eop gpu obj */
106	struct amdgpu_bo		*eop_gpu_obj[AMDGPU_MAX_MES_PIPES];
107	uint64_t                        eop_gpu_addr[AMDGPU_MAX_MES_PIPES];
108
109	void                            *mqd_backup[AMDGPU_MAX_MES_PIPES];
110	struct amdgpu_irq_src	        irq[AMDGPU_MAX_MES_PIPES];
111
112	uint32_t                        vmid_mask_gfxhub;
113	uint32_t                        vmid_mask_mmhub;
114	uint32_t                        compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
115	uint32_t                        gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
116	uint32_t                        sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
117	uint32_t                        aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
118	uint32_t                        sch_ctx_offs[AMDGPU_MAX_MES_PIPES];
119	uint64_t			sch_ctx_gpu_addr[AMDGPU_MAX_MES_PIPES];
120	uint64_t			*sch_ctx_ptr[AMDGPU_MAX_MES_PIPES];
121	uint32_t			query_status_fence_offs[AMDGPU_MAX_MES_PIPES];
122	uint64_t			query_status_fence_gpu_addr[AMDGPU_MAX_MES_PIPES];
123	uint64_t			*query_status_fence_ptr[AMDGPU_MAX_MES_PIPES];
 
 
 
124
125	uint32_t			saved_flags;
126
127	/* initialize kiq pipe */
128	int                             (*kiq_hw_init)(struct amdgpu_device *adev);
129	int                             (*kiq_hw_fini)(struct amdgpu_device *adev);
130
131	/* MES doorbells */
132	uint32_t			db_start_dw_offset;
133	uint32_t			num_mes_dbs;
134	unsigned long			*doorbell_bitmap;
135
136	/* MES event log buffer */
137	uint32_t			event_log_size;
138	struct amdgpu_bo	*event_log_gpu_obj;
139	uint64_t			event_log_gpu_addr;
140	void				*event_log_cpu_addr;
141
142	/* ip specific functions */
143	const struct amdgpu_mes_funcs   *funcs;
144
145	/* mes resource_1 bo*/
146	struct amdgpu_bo    *resource_1;
147	uint64_t            resource_1_gpu_addr;
148	void                *resource_1_addr;
149
150};
151
152struct amdgpu_mes_process {
153	int			pasid;
154	struct			amdgpu_vm *vm;
155	uint64_t		pd_gpu_addr;
156	struct amdgpu_bo 	*proc_ctx_bo;
157	uint64_t 		proc_ctx_gpu_addr;
158	void 			*proc_ctx_cpu_ptr;
159	uint64_t 		process_quantum;
160	struct 			list_head gang_list;
161	uint32_t 		doorbell_index;
 
162	struct mutex		doorbell_lock;
163};
164
165struct amdgpu_mes_gang {
166	int 				gang_id;
167	int 				priority;
168	int 				inprocess_gang_priority;
169	int 				global_priority_level;
170	struct list_head 		list;
171	struct amdgpu_mes_process 	*process;
172	struct amdgpu_bo 		*gang_ctx_bo;
173	uint64_t 			gang_ctx_gpu_addr;
174	void 				*gang_ctx_cpu_ptr;
175	uint64_t 			gang_quantum;
176	struct list_head 		queue_list;
177};
178
179struct amdgpu_mes_queue {
180	struct list_head 		list;
181	struct amdgpu_mes_gang 		*gang;
182	int 				queue_id;
183	uint64_t 			doorbell_off;
184	struct amdgpu_bo		*mqd_obj;
185	void				*mqd_cpu_ptr;
186	uint64_t 			mqd_gpu_addr;
187	uint64_t 			wptr_gpu_addr;
188	int 				queue_type;
189	int 				paging;
190	struct amdgpu_ring 		*ring;
191};
192
193struct amdgpu_mes_queue_properties {
194	int 			queue_type;
195	uint64_t                hqd_base_gpu_addr;
196	uint64_t                rptr_gpu_addr;
197	uint64_t                wptr_gpu_addr;
198	uint64_t                wptr_mc_addr;
199	uint32_t                queue_size;
200	uint64_t                eop_gpu_addr;
201	uint32_t                hqd_pipe_priority;
202	uint32_t                hqd_queue_priority;
203	bool 			paging;
204	struct amdgpu_ring 	*ring;
205	/* out */
206	uint64_t       		doorbell_off;
207};
208
209struct amdgpu_mes_gang_properties {
210	uint32_t 	priority;
211	uint32_t 	gang_quantum;
212	uint32_t 	inprocess_gang_priority;
213	uint32_t 	priority_level;
214	int 		global_priority_level;
215};
216
217struct mes_add_queue_input {
218	uint32_t	process_id;
219	uint64_t	page_table_base_addr;
220	uint64_t	process_va_start;
221	uint64_t	process_va_end;
222	uint64_t	process_quantum;
223	uint64_t	process_context_addr;
224	uint64_t	gang_quantum;
225	uint64_t	gang_context_addr;
226	uint32_t	inprocess_gang_priority;
227	uint32_t	gang_global_priority_level;
228	uint32_t	doorbell_offset;
229	uint64_t	mqd_addr;
230	uint64_t	wptr_addr;
231	uint64_t	wptr_mc_addr;
232	uint32_t	queue_type;
233	uint32_t	paging;
234	uint32_t        gws_base;
235	uint32_t        gws_size;
236	uint64_t	tba_addr;
237	uint64_t	tma_addr;
238	uint32_t	trap_en;
239	uint32_t	skip_process_ctx_clear;
240	uint32_t	is_kfd_process;
241	uint32_t	is_aql_queue;
242	uint32_t	queue_size;
243	uint32_t	exclusively_scheduled;
244};
245
246struct mes_remove_queue_input {
247	uint32_t	doorbell_offset;
248	uint64_t	gang_context_addr;
249};
250
251struct mes_reset_queue_input {
252	uint32_t	doorbell_offset;
253	uint64_t	gang_context_addr;
254	bool		use_mmio;
255	uint32_t	queue_type;
256	uint32_t	me_id;
257	uint32_t	pipe_id;
258	uint32_t	queue_id;
259	uint32_t	xcc_id;
260	uint32_t	vmid;
261};
262
263struct mes_map_legacy_queue_input {
264	uint32_t                           queue_type;
265	uint32_t                           doorbell_offset;
266	uint32_t                           pipe_id;
267	uint32_t                           queue_id;
268	uint64_t                           mqd_addr;
269	uint64_t                           wptr_addr;
270};
271
272struct mes_unmap_legacy_queue_input {
273	enum amdgpu_unmap_queues_action    action;
274	uint32_t                           queue_type;
275	uint32_t                           doorbell_offset;
276	uint32_t                           pipe_id;
277	uint32_t                           queue_id;
278	uint64_t                           trail_fence_addr;
279	uint64_t                           trail_fence_data;
280};
281
282struct mes_suspend_gang_input {
283	bool		suspend_all_gangs;
284	uint64_t	gang_context_addr;
285	uint64_t	suspend_fence_addr;
286	uint32_t	suspend_fence_value;
287};
288
289struct mes_resume_gang_input {
290	bool		resume_all_gangs;
291	uint64_t	gang_context_addr;
292};
293
294struct mes_reset_legacy_queue_input {
295	uint32_t                           queue_type;
296	uint32_t                           doorbell_offset;
297	bool                               use_mmio;
298	uint32_t                           me_id;
299	uint32_t                           pipe_id;
300	uint32_t                           queue_id;
301	uint64_t                           mqd_addr;
302	uint64_t                           wptr_addr;
303	uint32_t                           vmid;
304};
305
306enum mes_misc_opcode {
307	MES_MISC_OP_WRITE_REG,
308	MES_MISC_OP_READ_REG,
309	MES_MISC_OP_WRM_REG_WAIT,
310	MES_MISC_OP_WRM_REG_WR_WAIT,
311	MES_MISC_OP_SET_SHADER_DEBUGGER,
312	MES_MISC_OP_CHANGE_CONFIG,
313};
314
315struct mes_misc_op_input {
316	enum mes_misc_opcode op;
317
318	union {
319		struct {
320			uint32_t                  reg_offset;
321			uint64_t                  buffer_addr;
322		} read_reg;
323
324		struct {
325			uint32_t                  reg_offset;
326			uint32_t                  reg_value;
327		} write_reg;
328
329		struct {
330			uint32_t                   ref;
331			uint32_t                   mask;
332			uint32_t                   reg0;
333			uint32_t                   reg1;
334		} wrm_reg;
335
336		struct {
337			uint64_t process_context_addr;
338			union {
339				struct {
340					uint32_t single_memop : 1;
341					uint32_t single_alu_op : 1;
342					uint32_t reserved: 29;
343					uint32_t process_ctx_flush: 1;
344				};
345				uint32_t u32all;
346			} flags;
347			uint32_t spi_gdbg_per_vmid_cntl;
348			uint32_t tcp_watch_cntl[4];
349			uint32_t trap_en;
350		} set_shader_debugger;
351
352		struct {
353			union {
354				struct {
355					uint32_t limit_single_process : 1;
356					uint32_t enable_hws_logging_buffer : 1;
357					uint32_t reserved : 30;
358				};
359				uint32_t all;
360			} option;
361			struct {
362				uint32_t tdr_level;
363				uint32_t tdr_delay;
364			} tdr_config;
365		} change_config;
366	};
367};
368
369struct amdgpu_mes_funcs {
370	int (*add_hw_queue)(struct amdgpu_mes *mes,
371			    struct mes_add_queue_input *input);
372
373	int (*remove_hw_queue)(struct amdgpu_mes *mes,
374			       struct mes_remove_queue_input *input);
375
376	int (*map_legacy_queue)(struct amdgpu_mes *mes,
377				struct mes_map_legacy_queue_input *input);
378
379	int (*unmap_legacy_queue)(struct amdgpu_mes *mes,
380				  struct mes_unmap_legacy_queue_input *input);
381
382	int (*suspend_gang)(struct amdgpu_mes *mes,
383			    struct mes_suspend_gang_input *input);
384
385	int (*resume_gang)(struct amdgpu_mes *mes,
386			   struct mes_resume_gang_input *input);
387
388	int (*misc_op)(struct amdgpu_mes *mes,
389		       struct mes_misc_op_input *input);
390
391	int (*reset_legacy_queue)(struct amdgpu_mes *mes,
392				  struct mes_reset_legacy_queue_input *input);
393
394	int (*reset_hw_queue)(struct amdgpu_mes *mes,
395			      struct mes_reset_queue_input *input);
396};
397
398#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
399#define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev))
400
401int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
402
403int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
404int amdgpu_mes_init(struct amdgpu_device *adev);
405void amdgpu_mes_fini(struct amdgpu_device *adev);
406
407int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
408			      struct amdgpu_vm *vm);
409void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid);
410
411int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
412			struct amdgpu_mes_gang_properties *gprops,
413			int *gang_id);
414int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id);
415
416int amdgpu_mes_suspend(struct amdgpu_device *adev);
417int amdgpu_mes_resume(struct amdgpu_device *adev);
418
419int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
420			    struct amdgpu_mes_queue_properties *qprops,
421			    int *queue_id);
422int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
423int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id);
424int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
425				   int me_id, int pipe_id, int queue_id, int vmid);
426
427int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
428				struct amdgpu_ring *ring);
429int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
430				  struct amdgpu_ring *ring,
431				  enum amdgpu_unmap_queues_action action,
432				  u64 gpu_addr, u64 seq);
433int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
434				  struct amdgpu_ring *ring,
435				  unsigned int vmid,
436				  bool use_mmio);
437
438uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
439int amdgpu_mes_wreg(struct amdgpu_device *adev,
440		    uint32_t reg, uint32_t val);
441int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
442			uint32_t val, uint32_t mask);
443int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
444				  uint32_t reg0, uint32_t reg1,
445				  uint32_t ref, uint32_t mask);
446int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
447				uint64_t process_context_addr,
448				uint32_t spi_gdbg_per_vmid_cntl,
449				const uint32_t *tcp_watch_cntl,
450				uint32_t flags,
451				bool trap_en);
452int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
453				uint64_t process_context_addr);
454int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
455			int queue_type, int idx,
456			struct amdgpu_mes_ctx_data *ctx_data,
457			struct amdgpu_ring **out);
458void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
459			    struct amdgpu_ring *ring);
460
461uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
462						   enum amdgpu_mes_priority_level prio);
463
464int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
465				   struct amdgpu_mes_ctx_data *ctx_data);
466void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data);
467int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
468				 struct amdgpu_vm *vm,
469				 struct amdgpu_mes_ctx_data *ctx_data);
470int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
471				   struct amdgpu_mes_ctx_data *ctx_data);
472
473int amdgpu_mes_self_test(struct amdgpu_device *adev);
474
 
 
 
 
 
 
 
 
475int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev);
476
477/*
478 * MES lock can be taken in MMU notifiers.
479 *
480 * A bit more detail about why to set no-FS reclaim with MES lock:
481 *
482 * The purpose of the MMU notifier is to stop GPU access to memory so
483 * that the Linux VM subsystem can move pages around safely. This is
484 * done by preempting user mode queues for the affected process. When
485 * MES is used, MES lock needs to be taken to preempt the queues.
486 *
487 * The MMU notifier callback entry point in the driver is
488 * amdgpu_mn_invalidate_range_start_hsa. The relevant call chain from
489 * there is:
490 * amdgpu_amdkfd_evict_userptr -> kgd2kfd_quiesce_mm ->
491 * kfd_process_evict_queues -> pdd->dev->dqm->ops.evict_process_queues
492 *
493 * The last part of the chain is a function pointer where we take the
494 * MES lock.
495 *
496 * The problem with taking locks in the MMU notifier is, that MMU
497 * notifiers can be called in reclaim-FS context. That's where the
498 * kernel frees up pages to make room for new page allocations under
499 * memory pressure. While we are running in reclaim-FS context, we must
500 * not trigger another memory reclaim operation because that would
501 * recursively reenter the reclaim code and cause a deadlock. The
502 * memalloc_nofs_save/restore calls guarantee that.
503 *
504 * In addition we also need to avoid lock dependencies on other locks taken
505 * under the MES lock, for example reservation locks. Here is a possible
506 * scenario of a deadlock:
507 * Thread A: takes and holds reservation lock | triggers reclaim-FS |
508 * MMU notifier | blocks trying to take MES lock
509 * Thread B: takes and holds MES lock | blocks trying to take reservation lock
510 *
511 * In this scenario Thread B gets involved in a deadlock even without
512 * triggering a reclaim-FS operation itself.
513 * To fix this and break the lock dependency chain you'd need to either:
514 * 1. protect reservation locks with memalloc_nofs_save/restore, or
515 * 2. avoid taking reservation locks under the MES lock.
516 *
517 * Reservation locks are taken all over the kernel in different subsystems, we
518 * have no control over them and their lock dependencies.So the only workable
519 * solution is to avoid taking other locks under the MES lock.
520 * As a result, make sure no reclaim-FS happens while holding this lock anywhere
521 * to prevent deadlocks when an MMU notifier runs in reclaim-FS context.
522 */
523static inline void amdgpu_mes_lock(struct amdgpu_mes *mes)
524{
525	mutex_lock(&mes->mutex_hidden);
526	mes->saved_flags = memalloc_noreclaim_save();
527}
528
529static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
530{
531	memalloc_noreclaim_restore(mes->saved_flags);
532	mutex_unlock(&mes->mutex_hidden);
533}
534
535bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
536
537int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev);
538
539#endif /* __AMDGPU_MES_H__ */