Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2016 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Christian König
 23 */
 24#ifndef __AMDGPU_VM_H__
 25#define __AMDGPU_VM_H__
 26
 27#include <linux/idr.h>
 28#include <linux/kfifo.h>
 29#include <linux/rbtree.h>
 30#include <drm/gpu_scheduler.h>
 31#include <drm/drm_file.h>
 32#include <drm/ttm/ttm_bo_driver.h>
 33
 34#include "amdgpu_sync.h"
 35#include "amdgpu_ring.h"
 36#include "amdgpu_ids.h"
 37
 38struct amdgpu_bo_va;
 39struct amdgpu_job;
 40struct amdgpu_bo_list_entry;
 41
 42/*
 43 * GPUVM handling
 44 */
 45
 46/* Maximum number of PTEs the hardware can write with one command */
 47#define AMDGPU_VM_MAX_UPDATE_SIZE	0x3FFFF
 48
 49/* number of entries in page table */
 50#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
 51
 52#define AMDGPU_PTE_VALID	(1ULL << 0)
 53#define AMDGPU_PTE_SYSTEM	(1ULL << 1)
 54#define AMDGPU_PTE_SNOOPED	(1ULL << 2)
 55
 56/* VI only */
 57#define AMDGPU_PTE_EXECUTABLE	(1ULL << 4)
 58
 59#define AMDGPU_PTE_READABLE	(1ULL << 5)
 60#define AMDGPU_PTE_WRITEABLE	(1ULL << 6)
 61
 62#define AMDGPU_PTE_FRAG(x)	((x & 0x1fULL) << 7)
 63
 64/* TILED for VEGA10, reserved for older ASICs  */
 65#define AMDGPU_PTE_PRT		(1ULL << 51)
 66
 67/* PDE is handled as PTE for VEGA10 */
 68#define AMDGPU_PDE_PTE		(1ULL << 54)
 69
 70#define AMDGPU_PTE_LOG          (1ULL << 55)
 71
 72/* PTE is handled as PDE for VEGA10 (Translate Further) */
 73#define AMDGPU_PTE_TF		(1ULL << 56)
 74
 75/* PDE Block Fragment Size for VEGA10 */
 76#define AMDGPU_PDE_BFS(a)	((uint64_t)a << 59)
 77
 78
 79/* For GFX9 */
 80#define AMDGPU_PTE_MTYPE_VG10(a)	((uint64_t)(a) << 57)
 81#define AMDGPU_PTE_MTYPE_VG10_MASK	AMDGPU_PTE_MTYPE_VG10(3ULL)
 82
 83#define AMDGPU_MTYPE_NC 0
 84#define AMDGPU_MTYPE_CC 2
 85
 86#define AMDGPU_PTE_DEFAULT_ATC  (AMDGPU_PTE_SYSTEM      \
 87                                | AMDGPU_PTE_SNOOPED    \
 88                                | AMDGPU_PTE_EXECUTABLE \
 89                                | AMDGPU_PTE_READABLE   \
 90                                | AMDGPU_PTE_WRITEABLE  \
 91                                | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
 92
 93/* gfx10 */
 94#define AMDGPU_PTE_MTYPE_NV10(a)       ((uint64_t)(a) << 48)
 95#define AMDGPU_PTE_MTYPE_NV10_MASK     AMDGPU_PTE_MTYPE_NV10(7ULL)
 96
 97/* How to programm VM fault handling */
 98#define AMDGPU_VM_FAULT_STOP_NEVER	0
 99#define AMDGPU_VM_FAULT_STOP_FIRST	1
100#define AMDGPU_VM_FAULT_STOP_ALWAYS	2
101
102/* max number of VMHUB */
103#define AMDGPU_MAX_VMHUBS			3
104#define AMDGPU_GFXHUB_0				0
105#define AMDGPU_MMHUB_0				1
106#define AMDGPU_MMHUB_1				2
107
108/* hardcode that limit for now */
109#define AMDGPU_VA_RESERVED_SIZE			(1ULL << 20)
110
111/* max vmids dedicated for process */
112#define AMDGPU_VM_MAX_RESERVED_VMID	1
113
114#define AMDGPU_VM_CONTEXT_GFX 0
115#define AMDGPU_VM_CONTEXT_COMPUTE 1
116
117/* See vm_update_mode */
118#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
119#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
120
121/* VMPT level enumerate, and the hiberachy is:
122 * PDB2->PDB1->PDB0->PTB
123 */
124enum amdgpu_vm_level {
125	AMDGPU_VM_PDB2,
126	AMDGPU_VM_PDB1,
127	AMDGPU_VM_PDB0,
128	AMDGPU_VM_PTB
129};
130
131/* base structure for tracking BO usage in a VM */
132struct amdgpu_vm_bo_base {
133	/* constant after initialization */
134	struct amdgpu_vm		*vm;
135	struct amdgpu_bo		*bo;
136
137	/* protected by bo being reserved */
138	struct amdgpu_vm_bo_base	*next;
139
140	/* protected by spinlock */
141	struct list_head		vm_status;
142
143	/* protected by the BO being reserved */
144	bool				moved;
145};
146
147struct amdgpu_vm_pt {
148	struct amdgpu_vm_bo_base	base;
149
150	/* array of page tables, one for each directory entry */
151	struct amdgpu_vm_pt		*entries;
152};
153
154/* provided by hw blocks that can write ptes, e.g., sdma */
155struct amdgpu_vm_pte_funcs {
156	/* number of dw to reserve per operation */
157	unsigned	copy_pte_num_dw;
158
159	/* copy pte entries from GART */
160	void (*copy_pte)(struct amdgpu_ib *ib,
161			 uint64_t pe, uint64_t src,
162			 unsigned count);
163
164	/* write pte one entry at a time with addr mapping */
165	void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
166			  uint64_t value, unsigned count,
167			  uint32_t incr);
168	/* for linear pte/pde updates without addr mapping */
169	void (*set_pte_pde)(struct amdgpu_ib *ib,
170			    uint64_t pe,
171			    uint64_t addr, unsigned count,
172			    uint32_t incr, uint64_t flags);
173};
174
175struct amdgpu_task_info {
176	char	process_name[TASK_COMM_LEN];
177	char	task_name[TASK_COMM_LEN];
178	pid_t	pid;
179	pid_t	tgid;
180};
181
182/**
183 * struct amdgpu_vm_update_params
184 *
185 * Encapsulate some VM table update parameters to reduce
186 * the number of function parameters
187 *
188 */
189struct amdgpu_vm_update_params {
190
191	/**
192	 * @adev: amdgpu device we do this update for
193	 */
194	struct amdgpu_device *adev;
195
196	/**
197	 * @vm: optional amdgpu_vm we do this update for
198	 */
199	struct amdgpu_vm *vm;
200
201	/**
202	 * @pages_addr:
203	 *
204	 * DMA addresses to use for mapping
205	 */
206	dma_addr_t *pages_addr;
207
208	/**
209	 * @job: job to used for hw submission
210	 */
211	struct amdgpu_job *job;
212
213	/**
214	 * @num_dw_left: number of dw left for the IB
215	 */
216	unsigned int num_dw_left;
217};
218
219struct amdgpu_vm_update_funcs {
220	int (*map_table)(struct amdgpu_bo *bo);
221	int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
222		       struct dma_fence *exclusive);
223	int (*update)(struct amdgpu_vm_update_params *p,
224		      struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
225		      unsigned count, uint32_t incr, uint64_t flags);
226	int (*commit)(struct amdgpu_vm_update_params *p,
227		      struct dma_fence **fence);
228};
229
230struct amdgpu_vm {
231	/* tree of virtual addresses mapped */
232	struct rb_root_cached	va;
233
234	/* BOs who needs a validation */
235	struct list_head	evicted;
236
237	/* PT BOs which relocated and their parent need an update */
238	struct list_head	relocated;
239
240	/* per VM BOs moved, but not yet updated in the PT */
241	struct list_head	moved;
242
243	/* All BOs of this VM not currently in the state machine */
244	struct list_head	idle;
245
246	/* regular invalidated BOs, but not yet updated in the PT */
247	struct list_head	invalidated;
248	spinlock_t		invalidated_lock;
249
250	/* BO mappings freed, but not yet updated in the PT */
251	struct list_head	freed;
252
253	/* contains the page directory */
254	struct amdgpu_vm_pt     root;
255	struct dma_fence	*last_update;
256
257	/* Scheduler entity for page table updates */
258	struct drm_sched_entity	entity;
259
260	unsigned int		pasid;
261	/* dedicated to vm */
262	struct amdgpu_vmid	*reserved_vmid[AMDGPU_MAX_VMHUBS];
263
264	/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
265	bool					use_cpu_for_update;
266
267	/* Functions to use for VM table updates */
268	const struct amdgpu_vm_update_funcs	*update_funcs;
269
270	/* Flag to indicate ATS support from PTE for GFX9 */
271	bool			pte_support_ats;
272
273	/* Up to 128 pending retry page faults */
274	DECLARE_KFIFO(faults, u64, 128);
275
276	/* Points to the KFD process VM info */
277	struct amdkfd_process_info *process_info;
278
279	/* List node in amdkfd_process_info.vm_list_head */
280	struct list_head	vm_list_node;
281
282	/* Valid while the PD is reserved or fenced */
283	uint64_t		pd_phys_addr;
284
285	/* Some basic info about the task */
286	struct amdgpu_task_info task_info;
287
288	/* Store positions of group of BOs */
289	struct ttm_lru_bulk_move lru_bulk_move;
290	/* mark whether can do the bulk move */
291	bool			bulk_moveable;
292};
293
294struct amdgpu_vm_manager {
295	/* Handling of VMIDs */
296	struct amdgpu_vmid_mgr			id_mgr[AMDGPU_MAX_VMHUBS];
297
298	/* Handling of VM fences */
299	u64					fence_context;
300	unsigned				seqno[AMDGPU_MAX_RINGS];
301
302	uint64_t				max_pfn;
303	uint32_t				num_level;
304	uint32_t				block_size;
305	uint32_t				fragment_size;
306	enum amdgpu_vm_level			root_level;
307	/* vram base address for page table entry  */
308	u64					vram_base_offset;
309	/* vm pte handling */
310	const struct amdgpu_vm_pte_funcs	*vm_pte_funcs;
311	struct drm_sched_rq			*vm_pte_rqs[AMDGPU_MAX_RINGS];
312	unsigned				vm_pte_num_rqs;
313	struct amdgpu_ring			*page_fault;
314
315	/* partial resident texture handling */
316	spinlock_t				prt_lock;
317	atomic_t				num_prt_users;
318
319	/* controls how VM page tables are updated for Graphics and Compute.
320	 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
321	 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
322	 */
323	int					vm_update_mode;
324
325	/* PASID to VM mapping, will be used in interrupt context to
326	 * look up VM of a page fault
327	 */
328	struct idr				pasid_idr;
329	spinlock_t				pasid_lock;
330
331	/* counter of mapped memory through xgmi */
332	uint32_t				xgmi_map_counter;
333	struct mutex				lock_pstate;
334};
335
336#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
337#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
338#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
339
340extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
341extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
342
343void amdgpu_vm_manager_init(struct amdgpu_device *adev);
344void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
345
346long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
347int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
348		   int vm_context, unsigned int pasid);
349int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
350void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
351void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
352void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
353			 struct list_head *validated,
354			 struct amdgpu_bo_list_entry *entry);
355bool amdgpu_vm_ready(struct amdgpu_vm *vm);
356int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
357			      int (*callback)(void *p, struct amdgpu_bo *bo),
358			      void *param);
359int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
360int amdgpu_vm_update_directories(struct amdgpu_device *adev,
361				 struct amdgpu_vm *vm);
362int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
363			  struct amdgpu_vm *vm,
364			  struct dma_fence **fence);
365int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
366			   struct amdgpu_vm *vm);
367int amdgpu_vm_bo_update(struct amdgpu_device *adev,
368			struct amdgpu_bo_va *bo_va,
369			bool clear);
370void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
371			     struct amdgpu_bo *bo, bool evicted);
372uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
373struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
374				       struct amdgpu_bo *bo);
375struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
376				      struct amdgpu_vm *vm,
377				      struct amdgpu_bo *bo);
378int amdgpu_vm_bo_map(struct amdgpu_device *adev,
379		     struct amdgpu_bo_va *bo_va,
380		     uint64_t addr, uint64_t offset,
381		     uint64_t size, uint64_t flags);
382int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
383			     struct amdgpu_bo_va *bo_va,
384			     uint64_t addr, uint64_t offset,
385			     uint64_t size, uint64_t flags);
386int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
387		       struct amdgpu_bo_va *bo_va,
388		       uint64_t addr);
389int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
390				struct amdgpu_vm *vm,
391				uint64_t saddr, uint64_t size);
392struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
393							 uint64_t addr);
394void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
395void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
396		      struct amdgpu_bo_va *bo_va);
397void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
398			   uint32_t fragment_size_default, unsigned max_level,
399			   unsigned max_bits);
400int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
401bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
402				  struct amdgpu_job *job);
403void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
404
405void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
406			     struct amdgpu_task_info *task_info);
407
408void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
409
410void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
411				struct amdgpu_vm *vm);
412void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
413
414#endif