Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/*
  2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 21 * SOFTWARE.
 22 *
 23 * Authors:
 24 *    Eddie Dong <eddie.dong@intel.com>
 25 *    Kevin Tian <kevin.tian@intel.com>
 26 *
 27 * Contributors:
 28 *    Ping Gao <ping.a.gao@intel.com>
 29 *    Zhi Wang <zhi.a.wang@intel.com>
 30 *    Bing Niu <bing.niu@intel.com>
 31 *
 32 */
 33
 34#include "i915_drv.h"
 35#include "gvt.h"
 36#include "i915_pvinfo.h"
 37
 38void populate_pvinfo_page(struct intel_vgpu *vgpu)
 39{
 40	/* setup the ballooning information */
 41	vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
 42	vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
 43	vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
 44	vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
 45	vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
 46
 47	vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
 48	vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
 49	vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
 50
 51	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
 52		vgpu_aperture_gmadr_base(vgpu);
 53	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
 54		vgpu_aperture_sz(vgpu);
 55	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
 56		vgpu_hidden_gmadr_base(vgpu);
 57	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
 58		vgpu_hidden_sz(vgpu);
 59
 60	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
 61
 62	vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
 63	vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
 64
 65	gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
 66	gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
 67		vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
 68	gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
 69		vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
 70	gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
 71
 72	WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 73}
 74
 75#define VGPU_MAX_WEIGHT 16
 76#define VGPU_WEIGHT(vgpu_num)	\
 77	(VGPU_MAX_WEIGHT / (vgpu_num))
 78
 79static struct {
 80	unsigned int low_mm;
 81	unsigned int high_mm;
 82	unsigned int fence;
 83
 84	/* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
 85	 * with a weight of 4 on a contended host, different vGPU type has
 86	 * different weight set. Legal weights range from 1 to 16.
 87	 */
 88	unsigned int weight;
 89	enum intel_vgpu_edid edid;
 90	char *name;
 91} vgpu_types[] = {
 92/* Fixed vGPU type table */
 93	{ MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
 94	{ MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
 95	{ MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
 96	{ MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
 97};
 98
 99/**
100 * intel_gvt_init_vgpu_types - initialize vGPU type list
101 * @gvt : GVT device
102 *
103 * Initialize vGPU type list based on available resource.
104 *
105 */
106int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
107{
108	unsigned int num_types;
109	unsigned int i, low_avail, high_avail;
110	unsigned int min_low;
111
112	/* vGPU type name is defined as GVTg_Vx_y which contains
113	 * physical GPU generation type (e.g V4 as BDW server, V5 as
114	 * SKL server).
115	 *
116	 * Depend on physical SKU resource, might see vGPU types like
117	 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
118	 * different types of vGPU on same physical GPU depending on
119	 * available resource. Each vGPU type will have "avail_instance"
120	 * to indicate how many vGPU instance can be created for this
121	 * type.
122	 *
123	 */
124	low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
125	high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
126	num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
127
128	gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
129			     GFP_KERNEL);
130	if (!gvt->types)
131		return -ENOMEM;
132
133	min_low = MB_TO_BYTES(32);
134	for (i = 0; i < num_types; ++i) {
135		if (low_avail / vgpu_types[i].low_mm == 0)
136			break;
137
138		gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
139		gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
140		gvt->types[i].fence = vgpu_types[i].fence;
141
142		if (vgpu_types[i].weight < 1 ||
143					vgpu_types[i].weight > VGPU_MAX_WEIGHT)
144			return -EINVAL;
145
146		gvt->types[i].weight = vgpu_types[i].weight;
147		gvt->types[i].resolution = vgpu_types[i].edid;
148		gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
149						   high_avail / vgpu_types[i].high_mm);
150
151		if (IS_GEN(gvt->dev_priv, 8))
152			sprintf(gvt->types[i].name, "GVTg_V4_%s",
153						vgpu_types[i].name);
154		else if (IS_GEN(gvt->dev_priv, 9))
155			sprintf(gvt->types[i].name, "GVTg_V5_%s",
156						vgpu_types[i].name);
157
158		gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
159			     i, gvt->types[i].name,
160			     gvt->types[i].avail_instance,
161			     gvt->types[i].low_gm_size,
162			     gvt->types[i].high_gm_size, gvt->types[i].fence,
163			     gvt->types[i].weight,
164			     vgpu_edid_str(gvt->types[i].resolution));
165	}
166
167	gvt->num_types = i;
168	return 0;
169}
170
171void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
172{
173	kfree(gvt->types);
174}
175
176static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
177{
178	int i;
179	unsigned int low_gm_avail, high_gm_avail, fence_avail;
180	unsigned int low_gm_min, high_gm_min, fence_min;
181
182	/* Need to depend on maxium hw resource size but keep on
183	 * static config for now.
184	 */
185	low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
186		gvt->gm.vgpu_allocated_low_gm_size;
187	high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
188		gvt->gm.vgpu_allocated_high_gm_size;
189	fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
190		gvt->fence.vgpu_allocated_fence_num;
191
192	for (i = 0; i < gvt->num_types; i++) {
193		low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
194		high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
195		fence_min = fence_avail / gvt->types[i].fence;
196		gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
197						   fence_min);
198
199		gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
200		       i, gvt->types[i].name,
201		       gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
202		       gvt->types[i].high_gm_size, gvt->types[i].fence);
203	}
204}
205
206/**
207 * intel_gvt_active_vgpu - activate a virtual GPU
208 * @vgpu: virtual GPU
209 *
210 * This function is called when user wants to activate a virtual GPU.
211 *
212 */
213void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
214{
215	mutex_lock(&vgpu->gvt->lock);
216	vgpu->active = true;
217	mutex_unlock(&vgpu->gvt->lock);
218}
219
220/**
221 * intel_gvt_deactive_vgpu - deactivate a virtual GPU
222 * @vgpu: virtual GPU
223 *
224 * This function is called when user wants to deactivate a virtual GPU.
225 * The virtual GPU will be stopped.
226 *
227 */
228void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
229{
230	mutex_lock(&vgpu->vgpu_lock);
231
232	vgpu->active = false;
233
234	if (atomic_read(&vgpu->submission.running_workload_num)) {
235		mutex_unlock(&vgpu->vgpu_lock);
236		intel_gvt_wait_vgpu_idle(vgpu);
237		mutex_lock(&vgpu->vgpu_lock);
238	}
239
240	intel_vgpu_stop_schedule(vgpu);
241
242	mutex_unlock(&vgpu->vgpu_lock);
243}
244
245/**
246 * intel_gvt_release_vgpu - release a virtual GPU
247 * @vgpu: virtual GPU
248 *
249 * This function is called when user wants to release a virtual GPU.
250 * The virtual GPU will be stopped and all runtime information will be
251 * destroyed.
252 *
253 */
254void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
255{
256	intel_gvt_deactivate_vgpu(vgpu);
257
258	mutex_lock(&vgpu->vgpu_lock);
259	intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
260	intel_vgpu_dmabuf_cleanup(vgpu);
261	mutex_unlock(&vgpu->vgpu_lock);
262}
263
264/**
265 * intel_gvt_destroy_vgpu - destroy a virtual GPU
266 * @vgpu: virtual GPU
267 *
268 * This function is called when user wants to destroy a virtual GPU.
269 *
270 */
271void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
272{
273	struct intel_gvt *gvt = vgpu->gvt;
274
275	mutex_lock(&vgpu->vgpu_lock);
276
277	WARN(vgpu->active, "vGPU is still active!\n");
278
279	intel_gvt_debugfs_remove_vgpu(vgpu);
280	intel_vgpu_clean_sched_policy(vgpu);
281	intel_vgpu_clean_submission(vgpu);
282	intel_vgpu_clean_display(vgpu);
283	intel_vgpu_clean_opregion(vgpu);
284	intel_vgpu_reset_ggtt(vgpu, true);
285	intel_vgpu_clean_gtt(vgpu);
286	intel_gvt_hypervisor_detach_vgpu(vgpu);
287	intel_vgpu_free_resource(vgpu);
288	intel_vgpu_clean_mmio(vgpu);
289	intel_vgpu_dmabuf_cleanup(vgpu);
290	mutex_unlock(&vgpu->vgpu_lock);
291
292	mutex_lock(&gvt->lock);
293	idr_remove(&gvt->vgpu_idr, vgpu->id);
294	if (idr_is_empty(&gvt->vgpu_idr))
295		intel_gvt_clean_irq(gvt);
296	intel_gvt_update_vgpu_types(gvt);
297	mutex_unlock(&gvt->lock);
298
299	vfree(vgpu);
300}
301
302#define IDLE_VGPU_IDR 0
303
304/**
305 * intel_gvt_create_idle_vgpu - create an idle virtual GPU
306 * @gvt: GVT device
307 *
308 * This function is called when user wants to create an idle virtual GPU.
309 *
310 * Returns:
311 * pointer to intel_vgpu, error pointer if failed.
312 */
313struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
314{
315	struct intel_vgpu *vgpu;
316	enum intel_engine_id i;
317	int ret;
318
319	vgpu = vzalloc(sizeof(*vgpu));
320	if (!vgpu)
321		return ERR_PTR(-ENOMEM);
322
323	vgpu->id = IDLE_VGPU_IDR;
324	vgpu->gvt = gvt;
325	mutex_init(&vgpu->vgpu_lock);
326
327	for (i = 0; i < I915_NUM_ENGINES; i++)
328		INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
329
330	ret = intel_vgpu_init_sched_policy(vgpu);
331	if (ret)
332		goto out_free_vgpu;
333
334	vgpu->active = false;
335
336	return vgpu;
337
338out_free_vgpu:
339	vfree(vgpu);
340	return ERR_PTR(ret);
341}
342
343/**
344 * intel_gvt_destroy_vgpu - destroy an idle virtual GPU
345 * @vgpu: virtual GPU
346 *
347 * This function is called when user wants to destroy an idle virtual GPU.
348 *
349 */
350void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
351{
352	mutex_lock(&vgpu->vgpu_lock);
353	intel_vgpu_clean_sched_policy(vgpu);
354	mutex_unlock(&vgpu->vgpu_lock);
355
356	vfree(vgpu);
357}
358
359static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
360		struct intel_vgpu_creation_params *param)
361{
362	struct intel_vgpu *vgpu;
363	int ret;
364
365	gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
366			param->handle, param->low_gm_sz, param->high_gm_sz,
367			param->fence_sz);
368
369	vgpu = vzalloc(sizeof(*vgpu));
370	if (!vgpu)
371		return ERR_PTR(-ENOMEM);
372
373	ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
374		GFP_KERNEL);
375	if (ret < 0)
376		goto out_free_vgpu;
377
378	vgpu->id = ret;
379	vgpu->handle = param->handle;
380	vgpu->gvt = gvt;
381	vgpu->sched_ctl.weight = param->weight;
382	mutex_init(&vgpu->vgpu_lock);
383	mutex_init(&vgpu->dmabuf_lock);
384	INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
385	INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
386	idr_init(&vgpu->object_idr);
387	intel_vgpu_init_cfg_space(vgpu, param->primary);
388
389	ret = intel_vgpu_init_mmio(vgpu);
390	if (ret)
391		goto out_clean_idr;
392
393	ret = intel_vgpu_alloc_resource(vgpu, param);
394	if (ret)
395		goto out_clean_vgpu_mmio;
396
397	populate_pvinfo_page(vgpu);
398
399	ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
400	if (ret)
401		goto out_clean_vgpu_resource;
402
403	ret = intel_vgpu_init_gtt(vgpu);
404	if (ret)
405		goto out_detach_hypervisor_vgpu;
406
407	ret = intel_vgpu_init_opregion(vgpu);
408	if (ret)
409		goto out_clean_gtt;
410
411	ret = intel_vgpu_init_display(vgpu, param->resolution);
412	if (ret)
413		goto out_clean_opregion;
414
415	ret = intel_vgpu_setup_submission(vgpu);
416	if (ret)
417		goto out_clean_display;
418
419	ret = intel_vgpu_init_sched_policy(vgpu);
420	if (ret)
421		goto out_clean_submission;
422
423	intel_gvt_debugfs_add_vgpu(vgpu);
424
425	ret = intel_gvt_hypervisor_set_opregion(vgpu);
426	if (ret)
427		goto out_clean_sched_policy;
428
429	/*TODO: add more platforms support */
430	if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
431		ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
432	if (ret)
433		goto out_clean_sched_policy;
434
435	return vgpu;
436
437out_clean_sched_policy:
438	intel_vgpu_clean_sched_policy(vgpu);
439out_clean_submission:
440	intel_vgpu_clean_submission(vgpu);
441out_clean_display:
442	intel_vgpu_clean_display(vgpu);
443out_clean_opregion:
444	intel_vgpu_clean_opregion(vgpu);
445out_clean_gtt:
446	intel_vgpu_clean_gtt(vgpu);
447out_detach_hypervisor_vgpu:
448	intel_gvt_hypervisor_detach_vgpu(vgpu);
449out_clean_vgpu_resource:
450	intel_vgpu_free_resource(vgpu);
451out_clean_vgpu_mmio:
452	intel_vgpu_clean_mmio(vgpu);
453out_clean_idr:
454	idr_remove(&gvt->vgpu_idr, vgpu->id);
455out_free_vgpu:
456	vfree(vgpu);
457	return ERR_PTR(ret);
458}
459
460/**
461 * intel_gvt_create_vgpu - create a virtual GPU
462 * @gvt: GVT device
463 * @type: type of the vGPU to create
464 *
465 * This function is called when user wants to create a virtual GPU.
466 *
467 * Returns:
468 * pointer to intel_vgpu, error pointer if failed.
469 */
470struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
471				struct intel_vgpu_type *type)
472{
473	struct intel_vgpu_creation_params param;
474	struct intel_vgpu *vgpu;
475
476	param.handle = 0;
477	param.primary = 1;
478	param.low_gm_sz = type->low_gm_size;
479	param.high_gm_sz = type->high_gm_size;
480	param.fence_sz = type->fence;
481	param.weight = type->weight;
482	param.resolution = type->resolution;
483
484	/* XXX current param based on MB */
485	param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
486	param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
487
488	mutex_lock(&gvt->lock);
489	vgpu = __intel_gvt_create_vgpu(gvt, &param);
490	if (!IS_ERR(vgpu))
491		/* calculate left instance change for types */
492		intel_gvt_update_vgpu_types(gvt);
493	mutex_unlock(&gvt->lock);
494
495	return vgpu;
496}
497
498/**
499 * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
500 * @vgpu: virtual GPU
501 * @dmlr: vGPU Device Model Level Reset or GT Reset
502 * @engine_mask: engines to reset for GT reset
503 *
504 * This function is called when user wants to reset a virtual GPU through
505 * device model reset or GT reset. The caller should hold the vgpu lock.
506 *
507 * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
508 * the whole vGPU to default state as when it is created. This vGPU function
509 * is required both for functionary and security concerns.The ultimate goal
510 * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
511 * assign a vGPU to a virtual machine we must isse such reset first.
512 *
513 * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
514 * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
515 * Unlike the FLR, GT reset only reset particular resource of a vGPU per
516 * the reset request. Guest driver can issue a GT reset by programming the
517 * virtual GDRST register to reset specific virtual GPU engine or all
518 * engines.
519 *
520 * The parameter dev_level is to identify if we will do DMLR or GT reset.
521 * The parameter engine_mask is to specific the engines that need to be
522 * resetted. If value ALL_ENGINES is given for engine_mask, it means
523 * the caller requests a full GT reset that we will reset all virtual
524 * GPU engines. For FLR, engine_mask is ignored.
525 */
526void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
527				 intel_engine_mask_t engine_mask)
528{
529	struct intel_gvt *gvt = vgpu->gvt;
530	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
531	intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
532
533	gvt_dbg_core("------------------------------------------\n");
534	gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
535		     vgpu->id, dmlr, engine_mask);
536
537	vgpu->resetting_eng = resetting_eng;
538
539	intel_vgpu_stop_schedule(vgpu);
540	/*
541	 * The current_vgpu will set to NULL after stopping the
542	 * scheduler when the reset is triggered by current vgpu.
543	 */
544	if (scheduler->current_vgpu == NULL) {
545		mutex_unlock(&vgpu->vgpu_lock);
546		intel_gvt_wait_vgpu_idle(vgpu);
547		mutex_lock(&vgpu->vgpu_lock);
548	}
549
550	intel_vgpu_reset_submission(vgpu, resetting_eng);
551	/* full GPU reset or device model level reset */
552	if (engine_mask == ALL_ENGINES || dmlr) {
553		intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
554		intel_vgpu_invalidate_ppgtt(vgpu);
555		/*fence will not be reset during virtual reset */
556		if (dmlr) {
557			intel_vgpu_reset_gtt(vgpu);
558			intel_vgpu_reset_resource(vgpu);
559		}
560
561		intel_vgpu_reset_mmio(vgpu, dmlr);
562		populate_pvinfo_page(vgpu);
563		intel_vgpu_reset_display(vgpu);
564
565		if (dmlr) {
566			intel_vgpu_reset_cfg_space(vgpu);
567			/* only reset the failsafe mode when dmlr reset */
568			vgpu->failsafe = false;
569			vgpu->pv_notified = false;
570		}
571	}
572
573	vgpu->resetting_eng = 0;
574	gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
575	gvt_dbg_core("------------------------------------------\n");
576}
577
578/**
579 * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
580 * @vgpu: virtual GPU
581 *
582 * This function is called when user wants to reset a virtual GPU.
583 *
584 */
585void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
586{
587	mutex_lock(&vgpu->vgpu_lock);
588	intel_gvt_reset_vgpu_locked(vgpu, true, 0);
589	mutex_unlock(&vgpu->vgpu_lock);
590}