Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Copyright 2016 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include "amdgpu.h"
 25#define MAX_KIQ_REG_WAIT	5000 /* in usecs, 5ms */
 26#define MAX_KIQ_REG_BAILOUT_INTERVAL	5 /* in msecs, 5ms */
 27#define MAX_KIQ_REG_TRY 20
 28
 29uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
 30{
 31	uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
 32
 33	addr -= AMDGPU_VA_RESERVED_SIZE;
 34
 35	if (addr >= AMDGPU_VA_HOLE_START)
 36		addr |= AMDGPU_VA_HOLE_END;
 37
 38	return addr;
 39}
 40
 41bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
 42{
 43	/* By now all MMIO pages except mailbox are blocked */
 44	/* if blocking is enabled in hypervisor. Choose the */
 45	/* SCRATCH_REG0 to test. */
 46	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
 47}
 48
 49int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
 50{
 51	int r;
 52	void *ptr;
 53
 54	r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
 55				AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
 56				&adev->virt.csa_vmid0_addr, &ptr);
 57	if (r)
 58		return r;
 59
 60	memset(ptr, 0, AMDGPU_CSA_SIZE);
 61	return 0;
 62}
 63
 64void amdgpu_free_static_csa(struct amdgpu_device *adev) {
 65	amdgpu_bo_free_kernel(&adev->virt.csa_obj,
 66						&adev->virt.csa_vmid0_addr,
 67						NULL);
 68}
 69
 70/*
 71 * amdgpu_map_static_csa should be called during amdgpu_vm_init
 72 * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
 73 * submission of GFX should use this virtual address within META_DATA init
 74 * package to support SRIOV gfx preemption.
 75 */
 76int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 77			  struct amdgpu_bo_va **bo_va)
 78{
 79	uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
 80	struct ww_acquire_ctx ticket;
 81	struct list_head list;
 82	struct amdgpu_bo_list_entry pd;
 83	struct ttm_validate_buffer csa_tv;
 84	int r;
 85
 86	INIT_LIST_HEAD(&list);
 87	INIT_LIST_HEAD(&csa_tv.head);
 88	csa_tv.bo = &adev->virt.csa_obj->tbo;
 89	csa_tv.shared = true;
 90
 91	list_add(&csa_tv.head, &list);
 92	amdgpu_vm_get_pd_bo(vm, &list, &pd);
 93
 94	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
 95	if (r) {
 96		DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
 97		return r;
 98	}
 99
100	*bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
101	if (!*bo_va) {
102		ttm_eu_backoff_reservation(&ticket, &list);
103		DRM_ERROR("failed to create bo_va for static CSA\n");
104		return -ENOMEM;
105	}
106
107	r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
108				AMDGPU_CSA_SIZE);
109	if (r) {
110		DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
111		amdgpu_vm_bo_rmv(adev, *bo_va);
112		ttm_eu_backoff_reservation(&ticket, &list);
113		return r;
114	}
115
116	r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
117			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
118			     AMDGPU_PTE_EXECUTABLE);
119
120	if (r) {
121		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
122		amdgpu_vm_bo_rmv(adev, *bo_va);
123		ttm_eu_backoff_reservation(&ticket, &list);
124		return r;
125	}
126
127	ttm_eu_backoff_reservation(&ticket, &list);
128	return 0;
129}
130
131void amdgpu_virt_init_setting(struct amdgpu_device *adev)
132{
133	/* enable virtual display */
134	adev->mode_info.num_crtc = 1;
135	adev->enable_virtual_display = true;
 
136	adev->cg_flags = 0;
137	adev->pg_flags = 0;
138}
139
140uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
141{
142	signed long r, cnt = 0;
143	unsigned long flags;
144	uint32_t seq;
145	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
146	struct amdgpu_ring *ring = &kiq->ring;
147
148	BUG_ON(!ring->funcs->emit_rreg);
149
150	spin_lock_irqsave(&kiq->ring_lock, flags);
151	amdgpu_ring_alloc(ring, 32);
152	amdgpu_ring_emit_rreg(ring, reg);
153	amdgpu_fence_emit_polling(ring, &seq);
154	amdgpu_ring_commit(ring);
155	spin_unlock_irqrestore(&kiq->ring_lock, flags);
156
157	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
158
159	/* don't wait anymore for gpu reset case because this way may
160	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
161	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
162	 * never return if we keep waiting in virt_kiq_rreg, which cause
163	 * gpu_recover() hang there.
164	 *
165	 * also don't wait anymore for IRQ context
166	 * */
167	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
168		goto failed_kiq_read;
169
170	if (in_interrupt())
171		might_sleep();
172
173	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
174		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
175		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
176	}
177
178	if (cnt > MAX_KIQ_REG_TRY)
179		goto failed_kiq_read;
180
181	return adev->wb.wb[adev->virt.reg_val_offs];
182
183failed_kiq_read:
184	pr_err("failed to read reg:%x\n", reg);
185	return ~0;
186}
187
188void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
189{
190	signed long r, cnt = 0;
191	unsigned long flags;
192	uint32_t seq;
193	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
194	struct amdgpu_ring *ring = &kiq->ring;
195
196	BUG_ON(!ring->funcs->emit_wreg);
197
198	spin_lock_irqsave(&kiq->ring_lock, flags);
199	amdgpu_ring_alloc(ring, 32);
200	amdgpu_ring_emit_wreg(ring, reg, v);
201	amdgpu_fence_emit_polling(ring, &seq);
202	amdgpu_ring_commit(ring);
203	spin_unlock_irqrestore(&kiq->ring_lock, flags);
204
205	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
206
207	/* don't wait anymore for gpu reset case because this way may
208	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
209	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
210	 * never return if we keep waiting in virt_kiq_rreg, which cause
211	 * gpu_recover() hang there.
212	 *
213	 * also don't wait anymore for IRQ context
214	 * */
215	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
216		goto failed_kiq_write;
217
218	if (in_interrupt())
219		might_sleep();
220
221	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
222
223		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
224		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
225	}
226
227	if (cnt > MAX_KIQ_REG_TRY)
228		goto failed_kiq_write;
229
230	return;
231
232failed_kiq_write:
233	pr_err("failed to write reg:%x\n", reg);
234}
235
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236/**
237 * amdgpu_virt_request_full_gpu() - request full gpu access
238 * @amdgpu:	amdgpu device.
239 * @init:	is driver init time.
240 * When start to init/fini driver, first need to request full gpu access.
241 * Return: Zero if request success, otherwise will return error.
242 */
243int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
244{
245	struct amdgpu_virt *virt = &adev->virt;
246	int r;
247
248	if (virt->ops && virt->ops->req_full_gpu) {
249		r = virt->ops->req_full_gpu(adev, init);
250		if (r)
251			return r;
252
253		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
254	}
255
256	return 0;
257}
258
259/**
260 * amdgpu_virt_release_full_gpu() - release full gpu access
261 * @amdgpu:	amdgpu device.
262 * @init:	is driver init time.
263 * When finishing driver init/fini, need to release full gpu access.
264 * Return: Zero if release success, otherwise will returen error.
265 */
266int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
267{
268	struct amdgpu_virt *virt = &adev->virt;
269	int r;
270
271	if (virt->ops && virt->ops->rel_full_gpu) {
272		r = virt->ops->rel_full_gpu(adev, init);
273		if (r)
274			return r;
275
276		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
277	}
278	return 0;
279}
280
281/**
282 * amdgpu_virt_reset_gpu() - reset gpu
283 * @amdgpu:	amdgpu device.
284 * Send reset command to GPU hypervisor to reset GPU that VM is using
285 * Return: Zero if reset success, otherwise will return error.
286 */
287int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
288{
289	struct amdgpu_virt *virt = &adev->virt;
290	int r;
291
292	if (virt->ops && virt->ops->reset_gpu) {
293		r = virt->ops->reset_gpu(adev);
294		if (r)
295			return r;
296
297		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
298	}
299
300	return 0;
301}
302
303/**
304 * amdgpu_virt_wait_reset() - wait for reset gpu completed
305 * @amdgpu:	amdgpu device.
306 * Wait for GPU reset completed.
307 * Return: Zero if reset success, otherwise will return error.
308 */
309int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
310{
311	struct amdgpu_virt *virt = &adev->virt;
312
313	if (!virt->ops || !virt->ops->wait_reset)
314		return -EINVAL;
315
316	return virt->ops->wait_reset(adev);
317}
318
319/**
320 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
321 * @amdgpu:	amdgpu device.
322 * MM table is used by UVD and VCE for its initialization
323 * Return: Zero if allocate success.
324 */
325int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
326{
327	int r;
328
329	if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
330		return 0;
331
332	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
333				    AMDGPU_GEM_DOMAIN_VRAM,
334				    &adev->virt.mm_table.bo,
335				    &adev->virt.mm_table.gpu_addr,
336				    (void *)&adev->virt.mm_table.cpu_addr);
337	if (r) {
338		DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
339		return r;
340	}
341
342	memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
343	DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
344		 adev->virt.mm_table.gpu_addr,
345		 adev->virt.mm_table.cpu_addr);
346	return 0;
347}
348
349/**
350 * amdgpu_virt_free_mm_table() - free mm table memory
351 * @amdgpu:	amdgpu device.
352 * Free MM table memory
353 */
354void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
355{
356	if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
357		return;
358
359	amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
360			      &adev->virt.mm_table.gpu_addr,
361			      (void *)&adev->virt.mm_table.cpu_addr);
362	adev->virt.mm_table.gpu_addr = 0;
363}
364
365
366int amdgpu_virt_fw_reserve_get_checksum(void *obj,
367					unsigned long obj_size,
368					unsigned int key,
369					unsigned int chksum)
370{
371	unsigned int ret = key;
372	unsigned long i = 0;
373	unsigned char *pos;
374
375	pos = (char *)obj;
376	/* calculate checksum */
377	for (i = 0; i < obj_size; ++i)
378		ret += *(pos + i);
379	/* minus the chksum itself */
380	pos = (char *)&chksum;
381	for (i = 0; i < sizeof(chksum); ++i)
382		ret -= *(pos + i);
383	return ret;
384}
385
386void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
387{
388	uint32_t pf2vf_size = 0;
389	uint32_t checksum = 0;
390	uint32_t checkval;
391	char *str;
392
393	adev->virt.fw_reserve.p_pf2vf = NULL;
394	adev->virt.fw_reserve.p_vf2pf = NULL;
395
396	if (adev->fw_vram_usage.va != NULL) {
397		adev->virt.fw_reserve.p_pf2vf =
398			(struct amdgim_pf2vf_info_header *)(
399			adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
400		AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
401		AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
402		AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
403
404		/* pf2vf message must be in 4K */
405		if (pf2vf_size > 0 && pf2vf_size < 4096) {
406			checkval = amdgpu_virt_fw_reserve_get_checksum(
407				adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
408				adev->virt.fw_reserve.checksum_key, checksum);
409			if (checkval == checksum) {
410				adev->virt.fw_reserve.p_vf2pf =
411					((void *)adev->virt.fw_reserve.p_pf2vf +
412					pf2vf_size);
413				memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
414					sizeof(amdgim_vf2pf_info));
415				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
416					AMDGPU_FW_VRAM_VF2PF_VER);
417				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
418					sizeof(amdgim_vf2pf_info));
419				AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
420					&str);
421#ifdef MODULE
422				if (THIS_MODULE->version != NULL)
423					strcpy(str, THIS_MODULE->version);
424				else
425#endif
426					strcpy(str, "N/A");
427				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
428					0);
429				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
430					amdgpu_virt_fw_reserve_get_checksum(
431					adev->virt.fw_reserve.p_vf2pf,
432					pf2vf_size,
433					adev->virt.fw_reserve.checksum_key, 0));
434			}
435		}
436	}
437}
438
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439
v5.4
  1/*
  2 * Copyright 2016 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/module.h>
 
 
 
 
 
 
 
 25
 26#include <drm/drm_drv.h>
 27
 28#include "amdgpu.h"
 
 
 
 
 29
 30bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
 31{
 32	/* By now all MMIO pages except mailbox are blocked */
 33	/* if blocking is enabled in hypervisor. Choose the */
 34	/* SCRATCH_REG0 to test. */
 35	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
 36}
 37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 38void amdgpu_virt_init_setting(struct amdgpu_device *adev)
 39{
 40	/* enable virtual display */
 41	adev->mode_info.num_crtc = 1;
 42	adev->enable_virtual_display = true;
 43	adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
 44	adev->cg_flags = 0;
 45	adev->pg_flags = 0;
 46}
 47
 48uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
 49{
 50	signed long r, cnt = 0;
 51	unsigned long flags;
 52	uint32_t seq;
 53	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 54	struct amdgpu_ring *ring = &kiq->ring;
 55
 56	BUG_ON(!ring->funcs->emit_rreg);
 57
 58	spin_lock_irqsave(&kiq->ring_lock, flags);
 59	amdgpu_ring_alloc(ring, 32);
 60	amdgpu_ring_emit_rreg(ring, reg);
 61	amdgpu_fence_emit_polling(ring, &seq);
 62	amdgpu_ring_commit(ring);
 63	spin_unlock_irqrestore(&kiq->ring_lock, flags);
 64
 65	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
 66
 67	/* don't wait anymore for gpu reset case because this way may
 68	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
 69	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
 70	 * never return if we keep waiting in virt_kiq_rreg, which cause
 71	 * gpu_recover() hang there.
 72	 *
 73	 * also don't wait anymore for IRQ context
 74	 * */
 75	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
 76		goto failed_kiq_read;
 77
 78	might_sleep();
 
 
 79	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
 80		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
 81		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
 82	}
 83
 84	if (cnt > MAX_KIQ_REG_TRY)
 85		goto failed_kiq_read;
 86
 87	return adev->wb.wb[adev->virt.reg_val_offs];
 88
 89failed_kiq_read:
 90	pr_err("failed to read reg:%x\n", reg);
 91	return ~0;
 92}
 93
 94void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 95{
 96	signed long r, cnt = 0;
 97	unsigned long flags;
 98	uint32_t seq;
 99	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
100	struct amdgpu_ring *ring = &kiq->ring;
101
102	BUG_ON(!ring->funcs->emit_wreg);
103
104	spin_lock_irqsave(&kiq->ring_lock, flags);
105	amdgpu_ring_alloc(ring, 32);
106	amdgpu_ring_emit_wreg(ring, reg, v);
107	amdgpu_fence_emit_polling(ring, &seq);
108	amdgpu_ring_commit(ring);
109	spin_unlock_irqrestore(&kiq->ring_lock, flags);
110
111	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
112
113	/* don't wait anymore for gpu reset case because this way may
114	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
115	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
116	 * never return if we keep waiting in virt_kiq_rreg, which cause
117	 * gpu_recover() hang there.
118	 *
119	 * also don't wait anymore for IRQ context
120	 * */
121	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
122		goto failed_kiq_write;
123
124	might_sleep();
 
 
125	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
126
127		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
128		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
129	}
130
131	if (cnt > MAX_KIQ_REG_TRY)
132		goto failed_kiq_write;
133
134	return;
135
136failed_kiq_write:
137	pr_err("failed to write reg:%x\n", reg);
138}
139
140void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
141					uint32_t reg0, uint32_t reg1,
142					uint32_t ref, uint32_t mask)
143{
144	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
145	struct amdgpu_ring *ring = &kiq->ring;
146	signed long r, cnt = 0;
147	unsigned long flags;
148	uint32_t seq;
149
150	spin_lock_irqsave(&kiq->ring_lock, flags);
151	amdgpu_ring_alloc(ring, 32);
152	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
153					    ref, mask);
154	amdgpu_fence_emit_polling(ring, &seq);
155	amdgpu_ring_commit(ring);
156	spin_unlock_irqrestore(&kiq->ring_lock, flags);
157
158	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
159
160	/* don't wait anymore for IRQ context */
161	if (r < 1 && in_interrupt())
162		goto failed_kiq;
163
164	might_sleep();
165	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
166
167		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
168		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
169	}
170
171	if (cnt > MAX_KIQ_REG_TRY)
172		goto failed_kiq;
173
174	return;
175
176failed_kiq:
177	pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
178}
179
180/**
181 * amdgpu_virt_request_full_gpu() - request full gpu access
182 * @amdgpu:	amdgpu device.
183 * @init:	is driver init time.
184 * When start to init/fini driver, first need to request full gpu access.
185 * Return: Zero if request success, otherwise will return error.
186 */
187int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
188{
189	struct amdgpu_virt *virt = &adev->virt;
190	int r;
191
192	if (virt->ops && virt->ops->req_full_gpu) {
193		r = virt->ops->req_full_gpu(adev, init);
194		if (r)
195			return r;
196
197		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
198	}
199
200	return 0;
201}
202
203/**
204 * amdgpu_virt_release_full_gpu() - release full gpu access
205 * @amdgpu:	amdgpu device.
206 * @init:	is driver init time.
207 * When finishing driver init/fini, need to release full gpu access.
208 * Return: Zero if release success, otherwise will returen error.
209 */
210int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
211{
212	struct amdgpu_virt *virt = &adev->virt;
213	int r;
214
215	if (virt->ops && virt->ops->rel_full_gpu) {
216		r = virt->ops->rel_full_gpu(adev, init);
217		if (r)
218			return r;
219
220		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
221	}
222	return 0;
223}
224
225/**
226 * amdgpu_virt_reset_gpu() - reset gpu
227 * @amdgpu:	amdgpu device.
228 * Send reset command to GPU hypervisor to reset GPU that VM is using
229 * Return: Zero if reset success, otherwise will return error.
230 */
231int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
232{
233	struct amdgpu_virt *virt = &adev->virt;
234	int r;
235
236	if (virt->ops && virt->ops->reset_gpu) {
237		r = virt->ops->reset_gpu(adev);
238		if (r)
239			return r;
240
241		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
242	}
243
244	return 0;
245}
246
247/**
248 * amdgpu_virt_wait_reset() - wait for reset gpu completed
249 * @amdgpu:	amdgpu device.
250 * Wait for GPU reset completed.
251 * Return: Zero if reset success, otherwise will return error.
252 */
253int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
254{
255	struct amdgpu_virt *virt = &adev->virt;
256
257	if (!virt->ops || !virt->ops->wait_reset)
258		return -EINVAL;
259
260	return virt->ops->wait_reset(adev);
261}
262
263/**
264 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
265 * @amdgpu:	amdgpu device.
266 * MM table is used by UVD and VCE for its initialization
267 * Return: Zero if allocate success.
268 */
269int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
270{
271	int r;
272
273	if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
274		return 0;
275
276	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
277				    AMDGPU_GEM_DOMAIN_VRAM,
278				    &adev->virt.mm_table.bo,
279				    &adev->virt.mm_table.gpu_addr,
280				    (void *)&adev->virt.mm_table.cpu_addr);
281	if (r) {
282		DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
283		return r;
284	}
285
286	memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
287	DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
288		 adev->virt.mm_table.gpu_addr,
289		 adev->virt.mm_table.cpu_addr);
290	return 0;
291}
292
293/**
294 * amdgpu_virt_free_mm_table() - free mm table memory
295 * @amdgpu:	amdgpu device.
296 * Free MM table memory
297 */
298void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
299{
300	if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
301		return;
302
303	amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
304			      &adev->virt.mm_table.gpu_addr,
305			      (void *)&adev->virt.mm_table.cpu_addr);
306	adev->virt.mm_table.gpu_addr = 0;
307}
308
309
310int amdgpu_virt_fw_reserve_get_checksum(void *obj,
311					unsigned long obj_size,
312					unsigned int key,
313					unsigned int chksum)
314{
315	unsigned int ret = key;
316	unsigned long i = 0;
317	unsigned char *pos;
318
319	pos = (char *)obj;
320	/* calculate checksum */
321	for (i = 0; i < obj_size; ++i)
322		ret += *(pos + i);
323	/* minus the chksum itself */
324	pos = (char *)&chksum;
325	for (i = 0; i < sizeof(chksum); ++i)
326		ret -= *(pos + i);
327	return ret;
328}
329
330void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
331{
332	uint32_t pf2vf_size = 0;
333	uint32_t checksum = 0;
334	uint32_t checkval;
335	char *str;
336
337	adev->virt.fw_reserve.p_pf2vf = NULL;
338	adev->virt.fw_reserve.p_vf2pf = NULL;
339
340	if (adev->fw_vram_usage.va != NULL) {
341		adev->virt.fw_reserve.p_pf2vf =
342			(struct amd_sriov_msg_pf2vf_info_header *)(
343			adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
344		AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
345		AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
346		AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
347
348		/* pf2vf message must be in 4K */
349		if (pf2vf_size > 0 && pf2vf_size < 4096) {
350			checkval = amdgpu_virt_fw_reserve_get_checksum(
351				adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
352				adev->virt.fw_reserve.checksum_key, checksum);
353			if (checkval == checksum) {
354				adev->virt.fw_reserve.p_vf2pf =
355					((void *)adev->virt.fw_reserve.p_pf2vf +
356					pf2vf_size);
357				memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
358					sizeof(amdgim_vf2pf_info));
359				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
360					AMDGPU_FW_VRAM_VF2PF_VER);
361				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
362					sizeof(amdgim_vf2pf_info));
363				AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
364					&str);
365#ifdef MODULE
366				if (THIS_MODULE->version != NULL)
367					strcpy(str, THIS_MODULE->version);
368				else
369#endif
370					strcpy(str, "N/A");
371				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
372					0);
373				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
374					amdgpu_virt_fw_reserve_get_checksum(
375					adev->virt.fw_reserve.p_vf2pf,
376					pf2vf_size,
377					adev->virt.fw_reserve.checksum_key, 0));
378			}
379		}
380	}
381}
382
383static uint32_t parse_clk(char *buf, bool min)
384{
385        char *ptr = buf;
386        uint32_t clk = 0;
387
388        do {
389                ptr = strchr(ptr, ':');
390                if (!ptr)
391                        break;
392                ptr+=2;
393		if (kstrtou32(ptr, 10, &clk))
394			return 0;
395        } while (!min);
396
397        return clk * 100;
398}
399
400uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
401{
402	char *buf = NULL;
403	uint32_t clk = 0;
404
405	buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
406	if (!buf)
407		return -ENOMEM;
408
409	adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
410	clk = parse_clk(buf, lowest);
411
412	kfree(buf);
413
414	return clk;
415}
416
417uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
418{
419	char *buf = NULL;
420	uint32_t clk = 0;
421
422	buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
423	if (!buf)
424		return -ENOMEM;
425
426	adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
427	clk = parse_clk(buf, lowest);
428
429	kfree(buf);
430
431	return clk;
432}