Linux Audio

Check our new training course

Loading...
v6.9.4
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright 2023 Advanced Micro Devices, Inc.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice shall be included in
 13 * all copies or substantial portions of the Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 21 * OTHER DEALINGS IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include "amdgpu.h"
 26#include "amdgpu_seq64.h"
 27
 28#include <drm/drm_exec.h>
 29
 30/**
 31 * DOC: amdgpu_seq64
 32 *
 33 * amdgpu_seq64 allocates a 64bit memory on each request in sequence order.
 34 * seq64 driver is required for user queue fence memory allocation, TLB
 35 * counters and VM updates. It has maximum count of 32768 64 bit slots.
 36 */
 37
 38/**
 39 * amdgpu_seq64_get_va_base - Get the seq64 va base address
 40 *
 41 * @adev: amdgpu_device pointer
 42 *
 43 * Returns:
 44 * va base address on success
 45 */
 46static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
 47{
 48	return AMDGPU_VA_RESERVED_SEQ64_START(adev);
 49}
 50
 51/**
 52 * amdgpu_seq64_map - Map the seq64 memory to VM
 53 *
 54 * @adev: amdgpu_device pointer
 55 * @vm: vm pointer
 56 * @bo_va: bo_va pointer
 
 
 57 *
 58 * Map the seq64 memory to the given VM.
 59 *
 60 * Returns:
 61 * 0 on success or a negative error code on failure
 62 */
 63int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 64		     struct amdgpu_bo_va **bo_va)
 
 65{
 66	struct amdgpu_bo *bo;
 67	struct drm_exec exec;
 68	u64 seq64_addr;
 69	int r;
 70
 71	bo = adev->seq64.sbo;
 72	if (!bo)
 73		return -EINVAL;
 74
 75	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
 76	drm_exec_until_all_locked(&exec) {
 77		r = amdgpu_vm_lock_pd(vm, &exec, 0);
 78		if (likely(!r))
 79			r = drm_exec_lock_obj(&exec, &bo->tbo.base);
 80		drm_exec_retry_on_contention(&exec);
 81		if (unlikely(r))
 82			goto error;
 83	}
 84
 85	*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
 86	if (!*bo_va) {
 87		r = -ENOMEM;
 88		goto error;
 89	}
 90
 91	seq64_addr = amdgpu_seq64_get_va_base(adev);
 92	r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
 93			     AMDGPU_PTE_READABLE);
 94	if (r) {
 95		DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
 96		amdgpu_vm_bo_del(adev, *bo_va);
 97		goto error;
 98	}
 99
100	r = amdgpu_vm_bo_update(adev, *bo_va, false);
101	if (r) {
102		DRM_ERROR("failed to do vm_bo_update on userq sem\n");
103		amdgpu_vm_bo_del(adev, *bo_va);
104		goto error;
105	}
106
107error:
108	drm_exec_fini(&exec);
109	return r;
110}
111
112/**
113 * amdgpu_seq64_unmap - Unmap the seq64 memory
114 *
115 * @adev: amdgpu_device pointer
116 * @fpriv: DRM file private
117 *
118 * Unmap the seq64 memory from the given VM.
119 */
120void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
121{
122	struct amdgpu_vm *vm;
123	struct amdgpu_bo *bo;
124	struct drm_exec exec;
125	int r;
126
127	if (!fpriv->seq64_va)
128		return;
129
130	bo = adev->seq64.sbo;
131	if (!bo)
132		return;
133
134	vm = &fpriv->vm;
135
136	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
137	drm_exec_until_all_locked(&exec) {
138		r = amdgpu_vm_lock_pd(vm, &exec, 0);
139		if (likely(!r))
140			r = drm_exec_lock_obj(&exec, &bo->tbo.base);
141		drm_exec_retry_on_contention(&exec);
142		if (unlikely(r))
143			goto error;
144	}
145
146	amdgpu_vm_bo_del(adev, fpriv->seq64_va);
147
148	fpriv->seq64_va = NULL;
149
150error:
151	drm_exec_fini(&exec);
152}
153
154/**
155 * amdgpu_seq64_alloc - Allocate a 64 bit memory
156 *
157 * @adev: amdgpu_device pointer
158 * @va: VA to access the seq in process address space
159 * @cpu_addr: CPU address to access the seq
160 *
161 * Alloc a 64 bit memory from seq64 pool.
162 *
163 * Returns:
164 * 0 on success or a negative error code on failure
165 */
166int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
 
167{
168	unsigned long bit_pos;
 
169
170	bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem);
171	if (bit_pos >= adev->seq64.num_sem)
172		return -ENOSPC;
173
174	__set_bit(bit_pos, adev->seq64.used);
175	*va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev);
176	*cpu_addr = bit_pos + adev->seq64.cpu_base_addr;
 
 
 
 
 
 
177
178	return 0;
179}
180
181/**
182 * amdgpu_seq64_free - Free the given 64 bit memory
183 *
184 * @adev: amdgpu_device pointer
185 * @va: gpu start address to be freed
186 *
187 * Free the given 64 bit memory from seq64 pool.
 
188 */
189void amdgpu_seq64_free(struct amdgpu_device *adev, u64 va)
190{
191	unsigned long bit_pos;
 
 
192
193	bit_pos = (va - amdgpu_seq64_get_va_base(adev)) / sizeof(u64);
194	if (bit_pos < adev->seq64.num_sem)
195		__clear_bit(bit_pos, adev->seq64.used);
196}
197
198/**
199 * amdgpu_seq64_fini - Cleanup seq64 driver
200 *
201 * @adev: amdgpu_device pointer
202 *
203 * Free the memory space allocated for seq64.
204 *
205 */
206void amdgpu_seq64_fini(struct amdgpu_device *adev)
207{
208	amdgpu_bo_free_kernel(&adev->seq64.sbo,
209			      NULL,
210			      (void **)&adev->seq64.cpu_base_addr);
211}
212
213/**
214 * amdgpu_seq64_init - Initialize seq64 driver
215 *
216 * @adev: amdgpu_device pointer
217 *
218 * Allocate the required memory space for seq64.
219 *
220 * Returns:
221 * 0 on success or a negative error code on failure
222 */
223int amdgpu_seq64_init(struct amdgpu_device *adev)
224{
225	int r;
226
227	if (adev->seq64.sbo)
228		return 0;
229
230	/*
231	 * AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS
232	 * 64bit slots
233	 */
234	r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE,
235				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
236				    &adev->seq64.sbo, NULL,
237				    (void **)&adev->seq64.cpu_base_addr);
238	if (r) {
239		dev_warn(adev->dev, "(%d) create seq64 failed\n", r);
240		return r;
241	}
242
243	memset(adev->seq64.cpu_base_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE);
244
245	adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS;
246	memset(&adev->seq64.used, 0, sizeof(adev->seq64.used));
247
248	return 0;
249}
v6.8
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright 2023 Advanced Micro Devices, Inc.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice shall be included in
 13 * all copies or substantial portions of the Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 21 * OTHER DEALINGS IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include "amdgpu.h"
 26#include "amdgpu_seq64.h"
 27
 28#include <drm/drm_exec.h>
 29
 30/**
 31 * DOC: amdgpu_seq64
 32 *
 33 * amdgpu_seq64 allocates a 64bit memory on each request in sequence order.
 34 * seq64 driver is required for user queue fence memory allocation, TLB
 35 * counters and VM updates. It has maximum count of 32768 64 bit slots.
 36 */
 37
 38/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 39 * amdgpu_seq64_map - Map the seq64 memory to VM
 40 *
 41 * @adev: amdgpu_device pointer
 42 * @vm: vm pointer
 43 * @bo_va: bo_va pointer
 44 * @seq64_addr: seq64 vaddr start address
 45 * @size: seq64 pool size
 46 *
 47 * Map the seq64 memory to the given VM.
 48 *
 49 * Returns:
 50 * 0 on success or a negative error code on failure
 51 */
 52int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 53		     struct amdgpu_bo_va **bo_va, u64 seq64_addr,
 54		     uint32_t size)
 55{
 56	struct amdgpu_bo *bo;
 57	struct drm_exec exec;
 
 58	int r;
 59
 60	bo = adev->seq64.sbo;
 61	if (!bo)
 62		return -EINVAL;
 63
 64	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
 65	drm_exec_until_all_locked(&exec) {
 66		r = amdgpu_vm_lock_pd(vm, &exec, 0);
 67		if (likely(!r))
 68			r = drm_exec_lock_obj(&exec, &bo->tbo.base);
 69		drm_exec_retry_on_contention(&exec);
 70		if (unlikely(r))
 71			goto error;
 72	}
 73
 74	*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
 75	if (!*bo_va) {
 76		r = -ENOMEM;
 77		goto error;
 78	}
 79
 80	r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, size,
 81			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
 82			     AMDGPU_PTE_EXECUTABLE);
 83	if (r) {
 84		DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
 85		amdgpu_vm_bo_del(adev, *bo_va);
 86		goto error;
 87	}
 88
 89	r = amdgpu_vm_bo_update(adev, *bo_va, false);
 90	if (r) {
 91		DRM_ERROR("failed to do vm_bo_update on userq sem\n");
 92		amdgpu_vm_bo_del(adev, *bo_va);
 93		goto error;
 94	}
 95
 96error:
 97	drm_exec_fini(&exec);
 98	return r;
 99}
100
101/**
102 * amdgpu_seq64_unmap - Unmap the seq64 memory
103 *
104 * @adev: amdgpu_device pointer
105 * @fpriv: DRM file private
106 *
107 * Unmap the seq64 memory from the given VM.
108 */
109void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
110{
111	struct amdgpu_vm *vm;
112	struct amdgpu_bo *bo;
113	struct drm_exec exec;
114	int r;
115
116	if (!fpriv->seq64_va)
117		return;
118
119	bo = adev->seq64.sbo;
120	if (!bo)
121		return;
122
123	vm = &fpriv->vm;
124
125	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
126	drm_exec_until_all_locked(&exec) {
127		r = amdgpu_vm_lock_pd(vm, &exec, 0);
128		if (likely(!r))
129			r = drm_exec_lock_obj(&exec, &bo->tbo.base);
130		drm_exec_retry_on_contention(&exec);
131		if (unlikely(r))
132			goto error;
133	}
134
135	amdgpu_vm_bo_del(adev, fpriv->seq64_va);
136
137	fpriv->seq64_va = NULL;
138
139error:
140	drm_exec_fini(&exec);
141}
142
143/**
144 * amdgpu_seq64_alloc - Allocate a 64 bit memory
145 *
146 * @adev: amdgpu_device pointer
147 * @gpu_addr: allocated gpu VA start address
148 * @cpu_addr: allocated cpu VA start address
149 *
150 * Alloc a 64 bit memory from seq64 pool.
151 *
152 * Returns:
153 * 0 on success or a negative error code on failure
154 */
155int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr,
156		       u64 **cpu_addr)
157{
158	unsigned long bit_pos;
159	u32 offset;
160
161	bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem);
 
 
162
163	if (bit_pos < adev->seq64.num_sem) {
164		__set_bit(bit_pos, adev->seq64.used);
165		offset = bit_pos << 6; /* convert to qw offset */
166	} else {
167		return -EINVAL;
168	}
169
170	*gpu_addr = offset + AMDGPU_SEQ64_VADDR_START;
171	*cpu_addr = offset + adev->seq64.cpu_base_addr;
172
173	return 0;
174}
175
176/**
177 * amdgpu_seq64_free - Free the given 64 bit memory
178 *
179 * @adev: amdgpu_device pointer
180 * @gpu_addr: gpu start address to be freed
181 *
182 * Free the given 64 bit memory from seq64 pool.
183 *
184 */
185void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr)
186{
187	u32 offset;
188
189	offset = gpu_addr - AMDGPU_SEQ64_VADDR_START;
190
191	offset >>= 6;
192	if (offset < adev->seq64.num_sem)
193		__clear_bit(offset, adev->seq64.used);
194}
195
196/**
197 * amdgpu_seq64_fini - Cleanup seq64 driver
198 *
199 * @adev: amdgpu_device pointer
200 *
201 * Free the memory space allocated for seq64.
202 *
203 */
204void amdgpu_seq64_fini(struct amdgpu_device *adev)
205{
206	amdgpu_bo_free_kernel(&adev->seq64.sbo,
207			      NULL,
208			      (void **)&adev->seq64.cpu_base_addr);
209}
210
211/**
212 * amdgpu_seq64_init - Initialize seq64 driver
213 *
214 * @adev: amdgpu_device pointer
215 *
216 * Allocate the required memory space for seq64.
217 *
218 * Returns:
219 * 0 on success or a negative error code on failure
220 */
221int amdgpu_seq64_init(struct amdgpu_device *adev)
222{
223	int r;
224
225	if (adev->seq64.sbo)
226		return 0;
227
228	/*
229	 * AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS
230	 * 64bit slots
231	 */
232	r = amdgpu_bo_create_kernel(adev, AMDGPU_SEQ64_SIZE,
233				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
234				    &adev->seq64.sbo, NULL,
235				    (void **)&adev->seq64.cpu_base_addr);
236	if (r) {
237		dev_warn(adev->dev, "(%d) create seq64 failed\n", r);
238		return r;
239	}
240
241	memset(adev->seq64.cpu_base_addr, 0, AMDGPU_SEQ64_SIZE);
242
243	adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS;
244	memset(&adev->seq64.used, 0, sizeof(adev->seq64.used));
245
246	return 0;
247}