Loading...
Note: File does not exist in v4.6.
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Kevin Tian <kevin.tian@intel.com>
25 * Eddie Dong <eddie.dong@intel.com>
26 *
27 * Contributors:
28 * Niu Bing <bing.niu@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
30 *
31 */
32
33#ifndef _GVT_H_
34#define _GVT_H_
35
36#include "debug.h"
37#include "hypercall.h"
38#include "mmio.h"
39#include "reg.h"
40#include "interrupt.h"
41#include "gtt.h"
42#include "display.h"
43#include "edid.h"
44#include "execlist.h"
45#include "scheduler.h"
46#include "sched_policy.h"
47#include "mmio_context.h"
48#include "cmd_parser.h"
49#include "fb_decoder.h"
50#include "dmabuf.h"
51#include "page_track.h"
52
53#define GVT_MAX_VGPU 8
54
55enum {
56 INTEL_GVT_HYPERVISOR_XEN = 0,
57 INTEL_GVT_HYPERVISOR_KVM,
58};
59
60struct intel_gvt_host {
61 bool initialized;
62 int hypervisor_type;
63 struct intel_gvt_mpt *mpt;
64};
65
66extern struct intel_gvt_host intel_gvt_host;
67
68/* Describe per-platform limitations. */
69struct intel_gvt_device_info {
70 u32 max_support_vgpus;
71 u32 cfg_space_size;
72 u32 mmio_size;
73 u32 mmio_bar;
74 unsigned long msi_cap_offset;
75 u32 gtt_start_offset;
76 u32 gtt_entry_size;
77 u32 gtt_entry_size_shift;
78 int gmadr_bytes_in_cmd;
79 u32 max_surface_size;
80};
81
82/* GM resources owned by a vGPU */
83struct intel_vgpu_gm {
84 u64 aperture_sz;
85 u64 hidden_sz;
86 struct drm_mm_node low_gm_node;
87 struct drm_mm_node high_gm_node;
88};
89
90#define INTEL_GVT_MAX_NUM_FENCES 32
91
92/* Fences owned by a vGPU */
93struct intel_vgpu_fence {
94 struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
95 u32 base;
96 u32 size;
97};
98
99struct intel_vgpu_mmio {
100 void *vreg;
101 void *sreg;
102 bool disable_warn_untrack;
103};
104
105#define INTEL_GVT_MAX_BAR_NUM 4
106
107struct intel_vgpu_pci_bar {
108 u64 size;
109 bool tracked;
110};
111
112struct intel_vgpu_cfg_space {
113 unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
114 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
115};
116
117#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
118
119#define INTEL_GVT_MAX_PIPE 4
120
121struct intel_vgpu_irq {
122 bool irq_warn_once[INTEL_GVT_EVENT_MAX];
123 DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
124 INTEL_GVT_EVENT_MAX);
125};
126
127struct intel_vgpu_opregion {
128 bool mapped;
129 void *va;
130 u32 gfn[INTEL_GVT_OPREGION_PAGES];
131};
132
133#define vgpu_opregion(vgpu) (&(vgpu->opregion))
134
135struct intel_vgpu_display {
136 struct intel_vgpu_i2c_edid i2c_edid;
137 struct intel_vgpu_port ports[I915_MAX_PORTS];
138 struct intel_vgpu_sbi sbi;
139};
140
141struct vgpu_sched_ctl {
142 int weight;
143};
144
145enum {
146 INTEL_VGPU_EXECLIST_SUBMISSION = 1,
147 INTEL_VGPU_GUC_SUBMISSION,
148};
149
150struct intel_vgpu_submission_ops {
151 const char *name;
152 int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
153 void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
154 void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
155};
156
157struct intel_vgpu_submission {
158 struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
159 struct list_head workload_q_head[I915_NUM_ENGINES];
160 struct kmem_cache *workloads;
161 atomic_t running_workload_num;
162 struct i915_gem_context *shadow_ctx;
163 DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
164 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
165 void *ring_scan_buffer[I915_NUM_ENGINES];
166 int ring_scan_buffer_size[I915_NUM_ENGINES];
167 const struct intel_vgpu_submission_ops *ops;
168 int virtual_submission_interface;
169 bool active;
170};
171
172struct intel_vgpu {
173 struct intel_gvt *gvt;
174 int id;
175 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
176 bool active;
177 bool pv_notified;
178 bool failsafe;
179 unsigned int resetting_eng;
180 void *sched_data;
181 struct vgpu_sched_ctl sched_ctl;
182
183 struct intel_vgpu_fence fence;
184 struct intel_vgpu_gm gm;
185 struct intel_vgpu_cfg_space cfg_space;
186 struct intel_vgpu_mmio mmio;
187 struct intel_vgpu_irq irq;
188 struct intel_vgpu_gtt gtt;
189 struct intel_vgpu_opregion opregion;
190 struct intel_vgpu_display display;
191 struct intel_vgpu_submission submission;
192 struct radix_tree_root page_track_tree;
193 u32 hws_pga[I915_NUM_ENGINES];
194
195 struct dentry *debugfs;
196
197#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
198 struct {
199 struct mdev_device *mdev;
200 struct vfio_region *region;
201 int num_regions;
202 struct eventfd_ctx *intx_trigger;
203 struct eventfd_ctx *msi_trigger;
204
205 /*
206 * Two caches are used to avoid mapping duplicated pages (eg.
207 * scratch pages). This help to reduce dma setup overhead.
208 */
209 struct rb_root gfn_cache;
210 struct rb_root dma_addr_cache;
211 unsigned long nr_cache_entries;
212 struct mutex cache_lock;
213
214 struct notifier_block iommu_notifier;
215 struct notifier_block group_notifier;
216 struct kvm *kvm;
217 struct work_struct release_work;
218 atomic_t released;
219 struct vfio_device *vfio_device;
220 } vdev;
221#endif
222
223 struct list_head dmabuf_obj_list_head;
224 struct mutex dmabuf_lock;
225 struct idr object_idr;
226
227 struct completion vblank_done;
228
229};
230
231/* validating GM healthy status*/
232#define vgpu_is_vm_unhealthy(ret_val) \
233 (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
234
235struct intel_gvt_gm {
236 unsigned long vgpu_allocated_low_gm_size;
237 unsigned long vgpu_allocated_high_gm_size;
238};
239
240struct intel_gvt_fence {
241 unsigned long vgpu_allocated_fence_num;
242};
243
244/* Special MMIO blocks. */
245struct gvt_mmio_block {
246 unsigned int device;
247 i915_reg_t offset;
248 unsigned int size;
249 gvt_mmio_func read;
250 gvt_mmio_func write;
251};
252
253#define INTEL_GVT_MMIO_HASH_BITS 11
254
255struct intel_gvt_mmio {
256 u8 *mmio_attribute;
257/* Register contains RO bits */
258#define F_RO (1 << 0)
259/* Register contains graphics address */
260#define F_GMADR (1 << 1)
261/* Mode mask registers with high 16 bits as the mask bits */
262#define F_MODE_MASK (1 << 2)
263/* This reg can be accessed by GPU commands */
264#define F_CMD_ACCESS (1 << 3)
265/* This reg has been accessed by a VM */
266#define F_ACCESSED (1 << 4)
267/* This reg has been accessed through GPU commands */
268#define F_CMD_ACCESSED (1 << 5)
269/* This reg could be accessed by unaligned address */
270#define F_UNALIGN (1 << 6)
271
272 struct gvt_mmio_block *mmio_block;
273 unsigned int num_mmio_block;
274
275 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
276 unsigned long num_tracked_mmio;
277};
278
279struct intel_gvt_firmware {
280 void *cfg_space;
281 void *mmio;
282 bool firmware_loaded;
283};
284
285#define NR_MAX_INTEL_VGPU_TYPES 20
286struct intel_vgpu_type {
287 char name[16];
288 unsigned int avail_instance;
289 unsigned int low_gm_size;
290 unsigned int high_gm_size;
291 unsigned int fence;
292 unsigned int weight;
293 enum intel_vgpu_edid resolution;
294};
295
296struct intel_gvt {
297 struct mutex lock;
298 struct drm_i915_private *dev_priv;
299 struct idr vgpu_idr; /* vGPU IDR pool */
300
301 struct intel_gvt_device_info device_info;
302 struct intel_gvt_gm gm;
303 struct intel_gvt_fence fence;
304 struct intel_gvt_mmio mmio;
305 struct intel_gvt_firmware firmware;
306 struct intel_gvt_irq irq;
307 struct intel_gvt_gtt gtt;
308 struct intel_gvt_workload_scheduler scheduler;
309 struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
310 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
311 struct intel_vgpu_type *types;
312 unsigned int num_types;
313 struct intel_vgpu *idle_vgpu;
314
315 struct task_struct *service_thread;
316 wait_queue_head_t service_thread_wq;
317 unsigned long service_request;
318
319 struct {
320 struct engine_mmio *mmio;
321 int ctx_mmio_count[I915_NUM_ENGINES];
322 } engine_mmio_list;
323
324 struct dentry *debugfs_root;
325};
326
327static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
328{
329 return i915->gvt;
330}
331
332enum {
333 INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
334
335 /* Scheduling trigger by timer */
336 INTEL_GVT_REQUEST_SCHED = 1,
337
338 /* Scheduling trigger by event */
339 INTEL_GVT_REQUEST_EVENT_SCHED = 2,
340};
341
342static inline void intel_gvt_request_service(struct intel_gvt *gvt,
343 int service)
344{
345 set_bit(service, (void *)&gvt->service_request);
346 wake_up(&gvt->service_thread_wq);
347}
348
349void intel_gvt_free_firmware(struct intel_gvt *gvt);
350int intel_gvt_load_firmware(struct intel_gvt *gvt);
351
352/* Aperture/GM space definitions for GVT device */
353#define MB_TO_BYTES(mb) ((mb) << 20ULL)
354#define BYTES_TO_MB(b) ((b) >> 20ULL)
355
356#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
357#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
358#define HOST_FENCE 4
359
360/* Aperture/GM space definitions for GVT device */
361#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
362#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
363
364#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
365#define gvt_ggtt_sz(gvt) \
366 ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
367#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
368
369#define gvt_aperture_gmadr_base(gvt) (0)
370#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
371 + gvt_aperture_sz(gvt) - 1)
372
373#define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
374 + gvt_aperture_sz(gvt))
375#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
376 + gvt_hidden_sz(gvt) - 1)
377
378#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
379
380/* Aperture/GM space definitions for vGPU */
381#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
382#define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
383#define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
384#define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
385
386#define vgpu_aperture_pa_base(vgpu) \
387 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
388
389#define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
390
391#define vgpu_aperture_pa_end(vgpu) \
392 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
393
394#define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
395#define vgpu_aperture_gmadr_end(vgpu) \
396 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
397
398#define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
399#define vgpu_hidden_gmadr_end(vgpu) \
400 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
401
402#define vgpu_fence_base(vgpu) (vgpu->fence.base)
403#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
404
405struct intel_vgpu_creation_params {
406 __u64 handle;
407 __u64 low_gm_sz; /* in MB */
408 __u64 high_gm_sz; /* in MB */
409 __u64 fence_sz;
410 __u64 resolution;
411 __s32 primary;
412 __u64 vgpu_id;
413
414 __u32 weight;
415};
416
417int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
418 struct intel_vgpu_creation_params *param);
419void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
420void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
421void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
422 u32 fence, u64 value);
423
424/* Macros for easily accessing vGPU virtual/shadow register.
425 Explicitly seperate use for typed MMIO reg or real offset.*/
426#define vgpu_vreg_t(vgpu, reg) \
427 (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
428#define vgpu_vreg(vgpu, offset) \
429 (*(u32 *)(vgpu->mmio.vreg + (offset)))
430#define vgpu_vreg64_t(vgpu, reg) \
431 (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
432#define vgpu_vreg64(vgpu, offset) \
433 (*(u64 *)(vgpu->mmio.vreg + (offset)))
434#define vgpu_sreg_t(vgpu, reg) \
435 (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
436#define vgpu_sreg(vgpu, offset) \
437 (*(u32 *)(vgpu->mmio.sreg + (offset)))
438
439#define for_each_active_vgpu(gvt, vgpu, id) \
440 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
441 for_each_if(vgpu->active)
442
443static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
444 u32 offset, u32 val, bool low)
445{
446 u32 *pval;
447
448 /* BAR offset should be 32 bits algiend */
449 offset = rounddown(offset, 4);
450 pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
451
452 if (low) {
453 /*
454 * only update bit 31 - bit 4,
455 * leave the bit 3 - bit 0 unchanged.
456 */
457 *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
458 } else {
459 *pval = val;
460 }
461}
462
463int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
464void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
465
466struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
467void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
468struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
469 struct intel_vgpu_type *type);
470void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
471void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
472 unsigned int engine_mask);
473void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
474void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
475void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
476
477/* validating GM functions */
478#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
479 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
480 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
481
482#define vgpu_gmadr_is_hidden(vgpu, gmadr) \
483 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
484 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
485
486#define vgpu_gmadr_is_valid(vgpu, gmadr) \
487 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
488 (vgpu_gmadr_is_hidden(vgpu, gmadr))))
489
490#define gvt_gmadr_is_aperture(gvt, gmadr) \
491 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
492 (gmadr <= gvt_aperture_gmadr_end(gvt)))
493
494#define gvt_gmadr_is_hidden(gvt, gmadr) \
495 ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
496 (gmadr <= gvt_hidden_gmadr_end(gvt)))
497
498#define gvt_gmadr_is_valid(gvt, gmadr) \
499 (gvt_gmadr_is_aperture(gvt, gmadr) || \
500 gvt_gmadr_is_hidden(gvt, gmadr))
501
502bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
503int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
504int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
505int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
506 unsigned long *h_index);
507int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
508 unsigned long *g_index);
509
510void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
511 bool primary);
512void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
513
514int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
515 void *p_data, unsigned int bytes);
516
517int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
518 void *p_data, unsigned int bytes);
519
520static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
521{
522 /* We are 64bit bar. */
523 return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
524 PCI_BASE_ADDRESS_MEM_MASK;
525}
526
527void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
528int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
529int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
530
531int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
532void populate_pvinfo_page(struct intel_vgpu *vgpu);
533
534int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
535void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
536
537struct intel_gvt_ops {
538 int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
539 unsigned int);
540 int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
541 unsigned int);
542 int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
543 unsigned int);
544 int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
545 unsigned int);
546 struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
547 struct intel_vgpu_type *);
548 void (*vgpu_destroy)(struct intel_vgpu *);
549 void (*vgpu_reset)(struct intel_vgpu *);
550 void (*vgpu_activate)(struct intel_vgpu *);
551 void (*vgpu_deactivate)(struct intel_vgpu *);
552 struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
553 const char *name);
554 bool (*get_gvt_attrs)(struct attribute ***type_attrs,
555 struct attribute_group ***intel_vgpu_type_groups);
556 int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
557 int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
558 int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
559 unsigned int);
560};
561
562
563enum {
564 GVT_FAILSAFE_UNSUPPORTED_GUEST,
565 GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
566 GVT_FAILSAFE_GUEST_ERR,
567};
568
569static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
570{
571 intel_runtime_pm_get(dev_priv);
572}
573
574static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
575{
576 intel_runtime_pm_put(dev_priv);
577}
578
579/**
580 * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
581 * @gvt: a GVT device
582 * @offset: register offset
583 *
584 */
585static inline void intel_gvt_mmio_set_accessed(
586 struct intel_gvt *gvt, unsigned int offset)
587{
588 gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
589}
590
591/**
592 * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
593 * @gvt: a GVT device
594 * @offset: register offset
595 *
596 */
597static inline bool intel_gvt_mmio_is_cmd_access(
598 struct intel_gvt *gvt, unsigned int offset)
599{
600 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
601}
602
603/**
604 * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
605 * @gvt: a GVT device
606 * @offset: register offset
607 *
608 */
609static inline bool intel_gvt_mmio_is_unalign(
610 struct intel_gvt *gvt, unsigned int offset)
611{
612 return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
613}
614
615/**
616 * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
617 * @gvt: a GVT device
618 * @offset: register offset
619 *
620 */
621static inline void intel_gvt_mmio_set_cmd_accessed(
622 struct intel_gvt *gvt, unsigned int offset)
623{
624 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
625}
626
627/**
628 * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
629 * @gvt: a GVT device
630 * @offset: register offset
631 *
632 * Returns:
633 * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
634 *
635 */
636static inline bool intel_gvt_mmio_has_mode_mask(
637 struct intel_gvt *gvt, unsigned int offset)
638{
639 return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
640}
641
642int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
643void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
644int intel_gvt_debugfs_init(struct intel_gvt *gvt);
645void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
646
647
648#include "trace.h"
649#include "mpt.h"
650
651#endif