Linux Audio

Check our new training course

Loading...
v5.14.15
  1/*
  2 * Copyright 2016 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/module.h>
 
 
 
 
 
 
 
 25
 26#include <drm/drm_drv.h>
 27
 28#include "amdgpu.h"
 29#include "amdgpu_ras.h"
 30#include "vi.h"
 31#include "soc15.h"
 32#include "nv.h"
 33
 34#define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
 35	do { \
 36		vf2pf_info->ucode_info[ucode].id = ucode; \
 37		vf2pf_info->ucode_info[ucode].version = ver; \
 38	} while (0)
 39
 40bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
 41{
 42	/* By now all MMIO pages except mailbox are blocked */
 43	/* if blocking is enabled in hypervisor. Choose the */
 44	/* SCRATCH_REG0 to test. */
 45	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
 46}
 47
 48void amdgpu_virt_init_setting(struct amdgpu_device *adev)
 49{
 50	struct drm_device *ddev = adev_to_drm(adev);
 
 51
 52	/* enable virtual display */
 53	if (adev->asic_type != CHIP_ALDEBARAN &&
 54	    adev->asic_type != CHIP_ARCTURUS) {
 55		if (adev->mode_info.num_crtc == 0)
 56			adev->mode_info.num_crtc = 1;
 57		adev->enable_virtual_display = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58	}
 59	ddev->driver_features &= ~DRIVER_ATOMIC;
 
 
 
 
 
 
 
 
 
 60	adev->cg_flags = 0;
 61	adev->pg_flags = 0;
 62}
 63
 64void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
 65					uint32_t reg0, uint32_t reg1,
 66					uint32_t ref, uint32_t mask)
 67{
 
 
 
 68	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 69	struct amdgpu_ring *ring = &kiq->ring;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70	signed long r, cnt = 0;
 71	unsigned long flags;
 72	uint32_t seq;
 
 
 
 
 73
 74	spin_lock_irqsave(&kiq->ring_lock, flags);
 75	amdgpu_ring_alloc(ring, 32);
 76	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
 77					    ref, mask);
 78	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
 79	if (r)
 80		goto failed_undo;
 81
 82	amdgpu_ring_commit(ring);
 83	spin_unlock_irqrestore(&kiq->ring_lock, flags);
 84
 85	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
 86
 87	/* don't wait anymore for IRQ context */
 88	if (r < 1 && in_interrupt())
 89		goto failed_kiq;
 
 
 
 
 
 
 
 
 
 
 90
 91	might_sleep();
 92	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
 93
 94		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
 95		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
 96	}
 97
 98	if (cnt > MAX_KIQ_REG_TRY)
 99		goto failed_kiq;
100
101	return;
102
103failed_undo:
104	amdgpu_ring_undo(ring);
105	spin_unlock_irqrestore(&kiq->ring_lock, flags);
106failed_kiq:
107	dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
108}
109
110/**
111 * amdgpu_virt_request_full_gpu() - request full gpu access
112 * @adev:	amdgpu device.
113 * @init:	is driver init time.
114 * When start to init/fini driver, first need to request full gpu access.
115 * Return: Zero if request success, otherwise will return error.
116 */
117int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
118{
119	struct amdgpu_virt *virt = &adev->virt;
120	int r;
121
122	if (virt->ops && virt->ops->req_full_gpu) {
123		r = virt->ops->req_full_gpu(adev, init);
124		if (r)
125			return r;
126
127		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
128	}
129
130	return 0;
131}
132
133/**
134 * amdgpu_virt_release_full_gpu() - release full gpu access
135 * @adev:	amdgpu device.
136 * @init:	is driver init time.
137 * When finishing driver init/fini, need to release full gpu access.
138 * Return: Zero if release success, otherwise will returen error.
139 */
140int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
141{
142	struct amdgpu_virt *virt = &adev->virt;
143	int r;
144
145	if (virt->ops && virt->ops->rel_full_gpu) {
146		r = virt->ops->rel_full_gpu(adev, init);
147		if (r)
148			return r;
149
150		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
151	}
152	return 0;
153}
154
155/**
156 * amdgpu_virt_reset_gpu() - reset gpu
157 * @adev:	amdgpu device.
158 * Send reset command to GPU hypervisor to reset GPU that VM is using
159 * Return: Zero if reset success, otherwise will return error.
160 */
161int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
162{
163	struct amdgpu_virt *virt = &adev->virt;
164	int r;
165
166	if (virt->ops && virt->ops->reset_gpu) {
167		r = virt->ops->reset_gpu(adev);
168		if (r)
169			return r;
170
171		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
172	}
173
174	return 0;
175}
176
177void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
178{
179	struct amdgpu_virt *virt = &adev->virt;
180
181	if (virt->ops && virt->ops->req_init_data)
182		virt->ops->req_init_data(adev);
183
184	if (adev->virt.req_init_data_ver > 0)
185		DRM_INFO("host supports REQ_INIT_DATA handshake\n");
186	else
187		DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
188}
189
190/**
191 * amdgpu_virt_wait_reset() - wait for reset gpu completed
192 * @adev:	amdgpu device.
193 * Wait for GPU reset completed.
194 * Return: Zero if reset success, otherwise will return error.
195 */
196int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
197{
198	struct amdgpu_virt *virt = &adev->virt;
199
200	if (!virt->ops || !virt->ops->wait_reset)
201		return -EINVAL;
202
203	return virt->ops->wait_reset(adev);
204}
205
206/**
207 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
208 * @adev:	amdgpu device.
209 * MM table is used by UVD and VCE for its initialization
210 * Return: Zero if allocate success.
211 */
212int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
213{
214	int r;
215
216	if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
217		return 0;
218
219	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
220				    AMDGPU_GEM_DOMAIN_VRAM,
221				    &adev->virt.mm_table.bo,
222				    &adev->virt.mm_table.gpu_addr,
223				    (void *)&adev->virt.mm_table.cpu_addr);
224	if (r) {
225		DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
226		return r;
227	}
228
229	memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
230	DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
231		 adev->virt.mm_table.gpu_addr,
232		 adev->virt.mm_table.cpu_addr);
233	return 0;
234}
235
236/**
237 * amdgpu_virt_free_mm_table() - free mm table memory
238 * @adev:	amdgpu device.
239 * Free MM table memory
240 */
241void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
242{
243	if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
244		return;
245
246	amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
247			      &adev->virt.mm_table.gpu_addr,
248			      (void *)&adev->virt.mm_table.cpu_addr);
249	adev->virt.mm_table.gpu_addr = 0;
250}
251
252
253unsigned int amd_sriov_msg_checksum(void *obj,
254				unsigned long obj_size,
255				unsigned int key,
256				unsigned int checksum)
257{
258	unsigned int ret = key;
259	unsigned long i = 0;
260	unsigned char *pos;
261
262	pos = (char *)obj;
263	/* calculate checksum */
264	for (i = 0; i < obj_size; ++i)
265		ret += *(pos + i);
266	/* minus the checksum itself */
267	pos = (char *)&checksum;
268	for (i = 0; i < sizeof(checksum); ++i)
269		ret -= *(pos + i);
270	return ret;
271}
272
273static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
274{
275	struct amdgpu_virt *virt = &adev->virt;
276	struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
277	/* GPU will be marked bad on host if bp count more then 10,
278	 * so alloc 512 is enough.
279	 */
280	unsigned int align_space = 512;
281	void *bps = NULL;
282	struct amdgpu_bo **bps_bo = NULL;
283
284	*data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
285	if (!*data)
286		return -ENOMEM;
287
288	bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
289	bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
290
291	if (!bps || !bps_bo) {
292		kfree(bps);
293		kfree(bps_bo);
294		kfree(*data);
295		return -ENOMEM;
296	}
297
298	(*data)->bps = bps;
299	(*data)->bps_bo = bps_bo;
300	(*data)->count = 0;
301	(*data)->last_reserved = 0;
302
303	virt->ras_init_done = true;
304
305	return 0;
306}
307
308static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
309{
310	struct amdgpu_virt *virt = &adev->virt;
311	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
312	struct amdgpu_bo *bo;
313	int i;
314
315	if (!data)
316		return;
317
318	for (i = data->last_reserved - 1; i >= 0; i--) {
319		bo = data->bps_bo[i];
320		amdgpu_bo_free_kernel(&bo, NULL, NULL);
321		data->bps_bo[i] = bo;
322		data->last_reserved = i;
323	}
324}
325
326void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
327{
328	struct amdgpu_virt *virt = &adev->virt;
329	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
330
331	virt->ras_init_done = false;
332
333	if (!data)
334		return;
335
336	amdgpu_virt_ras_release_bp(adev);
337
338	kfree(data->bps);
339	kfree(data->bps_bo);
340	kfree(data);
341	virt->virt_eh_data = NULL;
342}
343
344static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
345		struct eeprom_table_record *bps, int pages)
346{
347	struct amdgpu_virt *virt = &adev->virt;
348	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
349
350	if (!data)
351		return;
352
353	memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
354	data->count += pages;
355}
356
357static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
358{
359	struct amdgpu_virt *virt = &adev->virt;
360	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
361	struct amdgpu_bo *bo = NULL;
362	uint64_t bp;
363	int i;
364
365	if (!data)
366		return;
367
368	for (i = data->last_reserved; i < data->count; i++) {
369		bp = data->bps[i].retired_page;
370
371		/* There are two cases of reserve error should be ignored:
372		 * 1) a ras bad page has been allocated (used by someone);
373		 * 2) a ras bad page has been reserved (duplicate error injection
374		 *    for one page);
375		 */
376		if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
377					       AMDGPU_GPU_PAGE_SIZE,
378					       AMDGPU_GEM_DOMAIN_VRAM,
379					       &bo, NULL))
380			DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
381
382		data->bps_bo[i] = bo;
383		data->last_reserved = i + 1;
384		bo = NULL;
385	}
386}
387
388static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
389		uint64_t retired_page)
390{
391	struct amdgpu_virt *virt = &adev->virt;
392	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
393	int i;
394
395	if (!data)
396		return true;
397
398	for (i = 0; i < data->count; i++)
399		if (retired_page == data->bps[i].retired_page)
400			return true;
401
402	return false;
403}
404
405static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
406		uint64_t bp_block_offset, uint32_t bp_block_size)
407{
408	struct eeprom_table_record bp;
409	uint64_t retired_page;
410	uint32_t bp_idx, bp_cnt;
411
412	if (bp_block_size) {
413		bp_cnt = bp_block_size / sizeof(uint64_t);
414		for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
415			retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va +
416					bp_block_offset + bp_idx * sizeof(uint64_t));
417			bp.retired_page = retired_page;
418
419			if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
420				continue;
421
422			amdgpu_virt_ras_add_bps(adev, &bp, 1);
423
424			amdgpu_virt_ras_reserve_bps(adev);
425		}
426	}
427}
428
429static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
430{
431	struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
432	uint32_t checksum;
433	uint32_t checkval;
434
435	uint32_t i;
436	uint32_t tmp;
437
438	if (adev->virt.fw_reserve.p_pf2vf == NULL)
439		return -EINVAL;
440
441	if (pf2vf_info->size > 1024) {
442		DRM_ERROR("invalid pf2vf message size\n");
443		return -EINVAL;
444	}
445
446	switch (pf2vf_info->version) {
447	case 1:
448		checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
449		checkval = amd_sriov_msg_checksum(
450			adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
451			adev->virt.fw_reserve.checksum_key, checksum);
452		if (checksum != checkval) {
453			DRM_ERROR("invalid pf2vf message\n");
454			return -EINVAL;
455		}
456
457		adev->virt.gim_feature =
458			((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
459		break;
460	case 2:
461		/* TODO: missing key, need to add it later */
462		checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
463		checkval = amd_sriov_msg_checksum(
464			adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
465			0, checksum);
466		if (checksum != checkval) {
467			DRM_ERROR("invalid pf2vf message\n");
468			return -EINVAL;
469		}
470
471		adev->virt.vf2pf_update_interval_ms =
472			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
473		adev->virt.gim_feature =
474			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
475		adev->virt.reg_access =
476			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
477
478		adev->virt.decode_max_dimension_pixels = 0;
479		adev->virt.decode_max_frame_pixels = 0;
480		adev->virt.encode_max_dimension_pixels = 0;
481		adev->virt.encode_max_frame_pixels = 0;
482		adev->virt.is_mm_bw_enabled = false;
483		for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
484			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
485			adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
486
487			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
488			adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
489
490			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
491			adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
492
493			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
494			adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
495		}
496		if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
497			adev->virt.is_mm_bw_enabled = true;
498
499		adev->unique_id =
500			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
501		break;
502	default:
503		DRM_ERROR("invalid pf2vf version\n");
504		return -EINVAL;
505	}
506
507	/* correct too large or too little interval value */
508	if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
509		adev->virt.vf2pf_update_interval_ms = 2000;
510
511	return 0;
512}
513
514static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
515{
516	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
517	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
518
519	if (adev->virt.fw_reserve.p_vf2pf == NULL)
520		return;
521
522	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE,      adev->vce.fw_version);
523	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD,      adev->uvd.fw_version);
524	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC,       adev->gmc.fw_version);
525	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME,       adev->gfx.me_fw_version);
526	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP,      adev->gfx.pfp_fw_version);
527	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE,       adev->gfx.ce_fw_version);
528	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC,      adev->gfx.rlc_fw_version);
529	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
530	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
531	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
532	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
533	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
534	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos_fw_version);
535	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,      adev->psp.asd_fw_version);
536	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,   adev->psp.ta_ras_ucode_version);
537	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,  adev->psp.ta_xgmi_ucode_version);
538	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC,      adev->pm.fw_version);
539	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA,     adev->sdma.instance[0].fw_version);
540	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2,    adev->sdma.instance[1].fw_version);
541	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN,      adev->vcn.fw_version);
542	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU,     adev->dm.dmcu_fw_version);
543}
544
545static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
546{
547	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
548	struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
549
550	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
551
552	if (adev->virt.fw_reserve.p_vf2pf == NULL)
553		return -EINVAL;
554
555	memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
556
557	vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
558	vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
559
560#ifdef MODULE
561	if (THIS_MODULE->version != NULL)
562		strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
563	else
564#endif
565		strcpy(vf2pf_info->driver_version, "N/A");
566
567	vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
568	vf2pf_info->driver_cert = 0;
569	vf2pf_info->os_info.all = 0;
570
571	vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20;
572	vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20;
573	vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
574	vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
575
576	amdgpu_virt_populate_vf2pf_ucode_info(adev);
577
578	/* TODO: read dynamic info */
579	vf2pf_info->gfx_usage = 0;
580	vf2pf_info->compute_usage = 0;
581	vf2pf_info->encode_usage = 0;
582	vf2pf_info->decode_usage = 0;
583
584	vf2pf_info->checksum =
585		amd_sriov_msg_checksum(
586		vf2pf_info, vf2pf_info->header.size, 0, 0);
587
588	return 0;
589}
590
591static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
592{
593	struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
594	int ret;
595
596	ret = amdgpu_virt_read_pf2vf_data(adev);
597	if (ret)
598		goto out;
599	amdgpu_virt_write_vf2pf_data(adev);
600
601out:
602	schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
603}
604
605void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
606{
607	if (adev->virt.vf2pf_update_interval_ms != 0) {
608		DRM_INFO("clean up the vf2pf work item\n");
609		cancel_delayed_work_sync(&adev->virt.vf2pf_work);
610		adev->virt.vf2pf_update_interval_ms = 0;
611	}
612}
613
614void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
615{
616	uint64_t bp_block_offset = 0;
617	uint32_t bp_block_size = 0;
618	struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
 
619
620	adev->virt.fw_reserve.p_pf2vf = NULL;
621	adev->virt.fw_reserve.p_vf2pf = NULL;
622	adev->virt.vf2pf_update_interval_ms = 0;
623
624	if (adev->mman.fw_vram_usage_va != NULL) {
625		adev->virt.vf2pf_update_interval_ms = 2000;
626
 
627		adev->virt.fw_reserve.p_pf2vf =
628			(struct amd_sriov_msg_pf2vf_info_header *)
629			(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
630		adev->virt.fw_reserve.p_vf2pf =
631			(struct amd_sriov_msg_vf2pf_info_header *)
632			(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
633
634		amdgpu_virt_read_pf2vf_data(adev);
635		amdgpu_virt_write_vf2pf_data(adev);
636
637		/* bad page handling for version 2 */
638		if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
639				pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
640
641				bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
642						((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
643				bp_block_size = pf2vf_v2->bp_block_size;
644
645				if (bp_block_size && !adev->virt.ras_init_done)
646					amdgpu_virt_init_ras_err_handler_data(adev);
647
648				if (adev->virt.ras_init_done)
649					amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
650			}
651	} else if (adev->bios != NULL) {
652		adev->virt.fw_reserve.p_pf2vf =
653			(struct amd_sriov_msg_pf2vf_info_header *)
654			(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
655
656		amdgpu_virt_read_pf2vf_data(adev);
657
658		return;
659	}
660
661	if (adev->virt.vf2pf_update_interval_ms != 0) {
662		INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
663		schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
664	}
665}
666
667void amdgpu_detect_virtualization(struct amdgpu_device *adev)
668{
669	uint32_t reg;
670
671	switch (adev->asic_type) {
672	case CHIP_TONGA:
673	case CHIP_FIJI:
674		reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
675		break;
676	case CHIP_VEGA10:
677	case CHIP_VEGA20:
678	case CHIP_NAVI10:
679	case CHIP_NAVI12:
680	case CHIP_SIENNA_CICHLID:
681	case CHIP_ARCTURUS:
682	case CHIP_ALDEBARAN:
683		reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
684		break;
685	default: /* other chip doesn't support SRIOV */
686		reg = 0;
687		break;
688	}
689
690	if (reg & 1)
691		adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
692
693	if (reg & 0x80000000)
694		adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
695
696	if (!reg) {
697		if (is_virtual_machine())	/* passthrough mode exclus sriov mod */
698			adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
699	}
700
701	/* we have the ability to check now */
702	if (amdgpu_sriov_vf(adev)) {
703		switch (adev->asic_type) {
704		case CHIP_TONGA:
705		case CHIP_FIJI:
706			vi_set_virt_ops(adev);
707			break;
708		case CHIP_VEGA10:
709		case CHIP_VEGA20:
710		case CHIP_ARCTURUS:
711		case CHIP_ALDEBARAN:
712			soc15_set_virt_ops(adev);
713			break;
714		case CHIP_NAVI10:
715		case CHIP_NAVI12:
716		case CHIP_SIENNA_CICHLID:
717			nv_set_virt_ops(adev);
718			/* try send GPU_INIT_DATA request to host */
719			amdgpu_virt_request_init_data(adev);
720			break;
721		default: /* other chip doesn't support SRIOV */
722			DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
723			break;
724		}
725	}
726}
727
728static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
729{
730	return amdgpu_sriov_is_debug(adev) ? true : false;
731}
732
733static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
734{
735	return amdgpu_sriov_is_normal(adev) ? true : false;
736}
737
738int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
739{
740	if (!amdgpu_sriov_vf(adev) ||
741	    amdgpu_virt_access_debugfs_is_kiq(adev))
742		return 0;
743
744	if (amdgpu_virt_access_debugfs_is_mmio(adev))
745		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
746	else
747		return -EPERM;
748
749	return 0;
750}
751
752void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
753{
754	if (amdgpu_sriov_vf(adev))
755		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
756}
757
758enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
759{
760	enum amdgpu_sriov_vf_mode mode;
761
762	if (amdgpu_sriov_vf(adev)) {
763		if (amdgpu_sriov_is_pp_one_vf(adev))
764			mode = SRIOV_VF_MODE_ONE_VF;
765		else
766			mode = SRIOV_VF_MODE_MULTI_VF;
767	} else {
768		mode = SRIOV_VF_MODE_BARE_METAL;
769	}
770
771	return mode;
772}
773
774void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
775			struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
776			struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
777{
778	uint32_t i;
779
780	if (!adev->virt.is_mm_bw_enabled)
781		return;
782
783	if (encode) {
784		for (i = 0; i < encode_array_size; i++) {
785			encode[i].max_width = adev->virt.encode_max_dimension_pixels;
786			encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
787			if (encode[i].max_width > 0)
788				encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
789			else
790				encode[i].max_height = 0;
791		}
792	}
793
794	if (decode) {
795		for (i = 0; i < decode_array_size; i++) {
796			decode[i].max_width = adev->virt.decode_max_dimension_pixels;
797			decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
798			if (decode[i].max_width > 0)
799				decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
800			else
801				decode[i].max_height = 0;
802		}
803	}
804}
v4.17
  1/*
  2 * Copyright 2016 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include "amdgpu.h"
 25#define MAX_KIQ_REG_WAIT	5000 /* in usecs, 5ms */
 26#define MAX_KIQ_REG_BAILOUT_INTERVAL	5 /* in msecs, 5ms */
 27#define MAX_KIQ_REG_TRY 20
 28
 29uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
 30{
 31	uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
 32
 33	addr -= AMDGPU_VA_RESERVED_SIZE;
 34
 35	if (addr >= AMDGPU_VA_HOLE_START)
 36		addr |= AMDGPU_VA_HOLE_END;
 37
 38	return addr;
 39}
 
 
 
 
 
 
 40
 41bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
 42{
 43	/* By now all MMIO pages except mailbox are blocked */
 44	/* if blocking is enabled in hypervisor. Choose the */
 45	/* SCRATCH_REG0 to test. */
 46	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
 47}
 48
 49int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
 50{
 51	int r;
 52	void *ptr;
 53
 54	r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
 55				AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
 56				&adev->virt.csa_vmid0_addr, &ptr);
 57	if (r)
 58		return r;
 59
 60	memset(ptr, 0, AMDGPU_CSA_SIZE);
 61	return 0;
 62}
 63
 64void amdgpu_free_static_csa(struct amdgpu_device *adev) {
 65	amdgpu_bo_free_kernel(&adev->virt.csa_obj,
 66						&adev->virt.csa_vmid0_addr,
 67						NULL);
 68}
 69
 70/*
 71 * amdgpu_map_static_csa should be called during amdgpu_vm_init
 72 * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
 73 * submission of GFX should use this virtual address within META_DATA init
 74 * package to support SRIOV gfx preemption.
 75 */
 76int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 77			  struct amdgpu_bo_va **bo_va)
 78{
 79	uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
 80	struct ww_acquire_ctx ticket;
 81	struct list_head list;
 82	struct amdgpu_bo_list_entry pd;
 83	struct ttm_validate_buffer csa_tv;
 84	int r;
 85
 86	INIT_LIST_HEAD(&list);
 87	INIT_LIST_HEAD(&csa_tv.head);
 88	csa_tv.bo = &adev->virt.csa_obj->tbo;
 89	csa_tv.shared = true;
 90
 91	list_add(&csa_tv.head, &list);
 92	amdgpu_vm_get_pd_bo(vm, &list, &pd);
 93
 94	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
 95	if (r) {
 96		DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
 97		return r;
 98	}
 99
100	*bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
101	if (!*bo_va) {
102		ttm_eu_backoff_reservation(&ticket, &list);
103		DRM_ERROR("failed to create bo_va for static CSA\n");
104		return -ENOMEM;
105	}
106
107	r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
108				AMDGPU_CSA_SIZE);
109	if (r) {
110		DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
111		amdgpu_vm_bo_rmv(adev, *bo_va);
112		ttm_eu_backoff_reservation(&ticket, &list);
113		return r;
114	}
115
116	r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
117			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
118			     AMDGPU_PTE_EXECUTABLE);
119
120	if (r) {
121		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
122		amdgpu_vm_bo_rmv(adev, *bo_va);
123		ttm_eu_backoff_reservation(&ticket, &list);
124		return r;
125	}
126
127	ttm_eu_backoff_reservation(&ticket, &list);
128	return 0;
129}
130
131void amdgpu_virt_init_setting(struct amdgpu_device *adev)
132{
133	/* enable virtual display */
134	adev->mode_info.num_crtc = 1;
135	adev->enable_virtual_display = true;
136	adev->cg_flags = 0;
137	adev->pg_flags = 0;
138}
139
140uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
 
 
141{
142	signed long r, cnt = 0;
143	unsigned long flags;
144	uint32_t seq;
145	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
146	struct amdgpu_ring *ring = &kiq->ring;
147
148	BUG_ON(!ring->funcs->emit_rreg);
149
150	spin_lock_irqsave(&kiq->ring_lock, flags);
151	amdgpu_ring_alloc(ring, 32);
152	amdgpu_ring_emit_rreg(ring, reg);
153	amdgpu_fence_emit_polling(ring, &seq);
154	amdgpu_ring_commit(ring);
155	spin_unlock_irqrestore(&kiq->ring_lock, flags);
156
157	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
158
159	/* don't wait anymore for gpu reset case because this way may
160	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
161	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
162	 * never return if we keep waiting in virt_kiq_rreg, which cause
163	 * gpu_recover() hang there.
164	 *
165	 * also don't wait anymore for IRQ context
166	 * */
167	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
168		goto failed_kiq_read;
169
170	if (in_interrupt())
171		might_sleep();
172
173	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
174		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
175		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
176	}
177
178	if (cnt > MAX_KIQ_REG_TRY)
179		goto failed_kiq_read;
180
181	return adev->wb.wb[adev->virt.reg_val_offs];
182
183failed_kiq_read:
184	pr_err("failed to read reg:%x\n", reg);
185	return ~0;
186}
187
188void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
189{
190	signed long r, cnt = 0;
191	unsigned long flags;
192	uint32_t seq;
193	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
194	struct amdgpu_ring *ring = &kiq->ring;
195
196	BUG_ON(!ring->funcs->emit_wreg);
197
198	spin_lock_irqsave(&kiq->ring_lock, flags);
199	amdgpu_ring_alloc(ring, 32);
200	amdgpu_ring_emit_wreg(ring, reg, v);
201	amdgpu_fence_emit_polling(ring, &seq);
 
 
 
 
202	amdgpu_ring_commit(ring);
203	spin_unlock_irqrestore(&kiq->ring_lock, flags);
204
205	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
206
207	/* don't wait anymore for gpu reset case because this way may
208	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
209	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
210	 * never return if we keep waiting in virt_kiq_rreg, which cause
211	 * gpu_recover() hang there.
212	 *
213	 * also don't wait anymore for IRQ context
214	 * */
215	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
216		goto failed_kiq_write;
217
218	if (in_interrupt())
219		might_sleep();
220
 
221	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
222
223		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
224		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
225	}
226
227	if (cnt > MAX_KIQ_REG_TRY)
228		goto failed_kiq_write;
229
230	return;
231
232failed_kiq_write:
233	pr_err("failed to write reg:%x\n", reg);
 
 
 
234}
235
236/**
237 * amdgpu_virt_request_full_gpu() - request full gpu access
238 * @amdgpu:	amdgpu device.
239 * @init:	is driver init time.
240 * When start to init/fini driver, first need to request full gpu access.
241 * Return: Zero if request success, otherwise will return error.
242 */
243int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
244{
245	struct amdgpu_virt *virt = &adev->virt;
246	int r;
247
248	if (virt->ops && virt->ops->req_full_gpu) {
249		r = virt->ops->req_full_gpu(adev, init);
250		if (r)
251			return r;
252
253		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
254	}
255
256	return 0;
257}
258
259/**
260 * amdgpu_virt_release_full_gpu() - release full gpu access
261 * @amdgpu:	amdgpu device.
262 * @init:	is driver init time.
263 * When finishing driver init/fini, need to release full gpu access.
264 * Return: Zero if release success, otherwise will returen error.
265 */
266int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
267{
268	struct amdgpu_virt *virt = &adev->virt;
269	int r;
270
271	if (virt->ops && virt->ops->rel_full_gpu) {
272		r = virt->ops->rel_full_gpu(adev, init);
273		if (r)
274			return r;
275
276		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
277	}
278	return 0;
279}
280
281/**
282 * amdgpu_virt_reset_gpu() - reset gpu
283 * @amdgpu:	amdgpu device.
284 * Send reset command to GPU hypervisor to reset GPU that VM is using
285 * Return: Zero if reset success, otherwise will return error.
286 */
287int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
288{
289	struct amdgpu_virt *virt = &adev->virt;
290	int r;
291
292	if (virt->ops && virt->ops->reset_gpu) {
293		r = virt->ops->reset_gpu(adev);
294		if (r)
295			return r;
296
297		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
298	}
299
300	return 0;
301}
302
 
 
 
 
 
 
 
 
 
 
 
 
 
303/**
304 * amdgpu_virt_wait_reset() - wait for reset gpu completed
305 * @amdgpu:	amdgpu device.
306 * Wait for GPU reset completed.
307 * Return: Zero if reset success, otherwise will return error.
308 */
309int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
310{
311	struct amdgpu_virt *virt = &adev->virt;
312
313	if (!virt->ops || !virt->ops->wait_reset)
314		return -EINVAL;
315
316	return virt->ops->wait_reset(adev);
317}
318
319/**
320 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
321 * @amdgpu:	amdgpu device.
322 * MM table is used by UVD and VCE for its initialization
323 * Return: Zero if allocate success.
324 */
325int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
326{
327	int r;
328
329	if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
330		return 0;
331
332	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
333				    AMDGPU_GEM_DOMAIN_VRAM,
334				    &adev->virt.mm_table.bo,
335				    &adev->virt.mm_table.gpu_addr,
336				    (void *)&adev->virt.mm_table.cpu_addr);
337	if (r) {
338		DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
339		return r;
340	}
341
342	memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
343	DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
344		 adev->virt.mm_table.gpu_addr,
345		 adev->virt.mm_table.cpu_addr);
346	return 0;
347}
348
349/**
350 * amdgpu_virt_free_mm_table() - free mm table memory
351 * @amdgpu:	amdgpu device.
352 * Free MM table memory
353 */
354void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
355{
356	if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
357		return;
358
359	amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
360			      &adev->virt.mm_table.gpu_addr,
361			      (void *)&adev->virt.mm_table.cpu_addr);
362	adev->virt.mm_table.gpu_addr = 0;
363}
364
365
366int amdgpu_virt_fw_reserve_get_checksum(void *obj,
367					unsigned long obj_size,
368					unsigned int key,
369					unsigned int chksum)
370{
371	unsigned int ret = key;
372	unsigned long i = 0;
373	unsigned char *pos;
374
375	pos = (char *)obj;
376	/* calculate checksum */
377	for (i = 0; i < obj_size; ++i)
378		ret += *(pos + i);
379	/* minus the chksum itself */
380	pos = (char *)&chksum;
381	for (i = 0; i < sizeof(chksum); ++i)
382		ret -= *(pos + i);
383	return ret;
384}
385
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
386void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
387{
388	uint32_t pf2vf_size = 0;
389	uint32_t checksum = 0;
390	uint32_t checkval;
391	char *str;
392
393	adev->virt.fw_reserve.p_pf2vf = NULL;
394	adev->virt.fw_reserve.p_vf2pf = NULL;
 
 
 
 
395
396	if (adev->fw_vram_usage.va != NULL) {
397		adev->virt.fw_reserve.p_pf2vf =
398			(struct amdgim_pf2vf_info_header *)(
399			adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
400		AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
401		AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
402		AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
403
404		/* pf2vf message must be in 4K */
405		if (pf2vf_size > 0 && pf2vf_size < 4096) {
406			checkval = amdgpu_virt_fw_reserve_get_checksum(
407				adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
408				adev->virt.fw_reserve.checksum_key, checksum);
409			if (checkval == checksum) {
410				adev->virt.fw_reserve.p_vf2pf =
411					((void *)adev->virt.fw_reserve.p_pf2vf +
412					pf2vf_size);
413				memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
414					sizeof(amdgim_vf2pf_info));
415				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
416					AMDGPU_FW_VRAM_VF2PF_VER);
417				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
418					sizeof(amdgim_vf2pf_info));
419				AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
420					&str);
421#ifdef MODULE
422				if (THIS_MODULE->version != NULL)
423					strcpy(str, THIS_MODULE->version);
424				else
425#endif
426					strcpy(str, "N/A");
427				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
428					0);
429				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
430					amdgpu_virt_fw_reserve_get_checksum(
431					adev->virt.fw_reserve.p_vf2pf,
432					pf2vf_size,
433					adev->virt.fw_reserve.checksum_key, 0));
434			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
435		}
436	}
437}
438
 
 
 
 
439