Loading...
Note: File does not exist in v3.1.
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Eddie Dong <eddie.dong@intel.com>
25 * Dexuan Cui
26 * Jike Song <jike.song@intel.com>
27 *
28 * Contributors:
29 * Zhi Wang <zhi.a.wang@intel.com>
30 *
31 */
32
33#ifndef _GVT_MPT_H_
34#define _GVT_MPT_H_
35
36/**
37 * DOC: Hypervisor Service APIs for GVT-g Core Logic
38 *
39 * This is the glue layer between specific hypervisor MPT modules and GVT-g core
40 * logic. Each kind of hypervisor MPT module provides a collection of function
41 * callbacks and will be attached to GVT host when the driver is loading.
42 * GVT-g core logic will call these APIs to request specific services from
43 * hypervisor.
44 */
45
46/**
47 * intel_gvt_hypervisor_host_init - init GVT-g host side
48 *
49 * Returns:
50 * Zero on success, negative error code if failed
51 */
52static inline int intel_gvt_hypervisor_host_init(struct device *dev,
53 void *gvt, const void *ops)
54{
55 if (!intel_gvt_host.mpt->host_init)
56 return -ENODEV;
57
58 return intel_gvt_host.mpt->host_init(dev, gvt, ops);
59}
60
61/**
62 * intel_gvt_hypervisor_host_exit - exit GVT-g host side
63 */
64static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
65{
66 /* optional to provide */
67 if (!intel_gvt_host.mpt->host_exit)
68 return;
69
70 intel_gvt_host.mpt->host_exit(dev);
71}
72
73/**
74 * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
75 * related stuffs inside hypervisor.
76 *
77 * Returns:
78 * Zero on success, negative error code if failed.
79 */
80static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
81{
82 /* optional to provide */
83 if (!intel_gvt_host.mpt->attach_vgpu)
84 return 0;
85
86 return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
87}
88
89/**
90 * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
91 * related stuffs inside hypervisor.
92 *
93 * Returns:
94 * Zero on success, negative error code if failed.
95 */
96static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
97{
98 /* optional to provide */
99 if (!intel_gvt_host.mpt->detach_vgpu)
100 return;
101
102 intel_gvt_host.mpt->detach_vgpu(vgpu);
103}
104
105#define MSI_CAP_CONTROL(offset) (offset + 2)
106#define MSI_CAP_ADDRESS(offset) (offset + 4)
107#define MSI_CAP_DATA(offset) (offset + 8)
108#define MSI_CAP_EN 0x1
109
110/**
111 * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
112 *
113 * Returns:
114 * Zero on success, negative error code if failed.
115 */
116static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
117{
118 unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
119 u16 control, data;
120 u32 addr;
121 int ret;
122
123 control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
124 addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
125 data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
126
127 /* Do not generate MSI if MSIEN is disable */
128 if (!(control & MSI_CAP_EN))
129 return 0;
130
131 if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
132 return -EINVAL;
133
134 trace_inject_msi(vgpu->id, addr, data);
135
136 ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
137 if (ret)
138 return ret;
139 return 0;
140}
141
142/**
143 * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
144 * @p: host kernel virtual address
145 *
146 * Returns:
147 * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
148 */
149static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
150{
151 return intel_gvt_host.mpt->from_virt_to_mfn(p);
152}
153
154/**
155 * intel_gvt_hypervisor_enable_page_track - track a guest page
156 * @vgpu: a vGPU
157 * @gfn: the gfn of guest
158 *
159 * Returns:
160 * Zero on success, negative error code if failed.
161 */
162static inline int intel_gvt_hypervisor_enable_page_track(
163 struct intel_vgpu *vgpu, unsigned long gfn)
164{
165 return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
166}
167
168/**
169 * intel_gvt_hypervisor_disable_page_track - untrack a guest page
170 * @vgpu: a vGPU
171 * @gfn: the gfn of guest
172 *
173 * Returns:
174 * Zero on success, negative error code if failed.
175 */
176static inline int intel_gvt_hypervisor_disable_page_track(
177 struct intel_vgpu *vgpu, unsigned long gfn)
178{
179 return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
180}
181
182/**
183 * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
184 * @vgpu: a vGPU
185 * @gpa: guest physical address
186 * @buf: host data buffer
187 * @len: data length
188 *
189 * Returns:
190 * Zero on success, negative error code if failed.
191 */
192static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
193 unsigned long gpa, void *buf, unsigned long len)
194{
195 return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
196}
197
198/**
199 * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
200 * @vgpu: a vGPU
201 * @gpa: guest physical address
202 * @buf: host data buffer
203 * @len: data length
204 *
205 * Returns:
206 * Zero on success, negative error code if failed.
207 */
208static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
209 unsigned long gpa, void *buf, unsigned long len)
210{
211 return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
212}
213
214/**
215 * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
216 * @vgpu: a vGPU
217 * @gpfn: guest pfn
218 *
219 * Returns:
220 * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
221 */
222static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
223 struct intel_vgpu *vgpu, unsigned long gfn)
224{
225 return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
226}
227
228/**
229 * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
230 * @vgpu: a vGPU
231 * @gfn: guest pfn
232 * @size: page size
233 * @dma_addr: retrieve allocated dma addr
234 *
235 * Returns:
236 * 0 on success, negative error code if failed.
237 */
238static inline int intel_gvt_hypervisor_dma_map_guest_page(
239 struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
240 dma_addr_t *dma_addr)
241{
242 return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
243 dma_addr);
244}
245
246/**
247 * intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
248 * @vgpu: a vGPU
249 * @dma_addr: the mapped dma addr
250 */
251static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
252 struct intel_vgpu *vgpu, dma_addr_t dma_addr)
253{
254 intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
255}
256
257/**
258 * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
259 * @vgpu: a vGPU
260 * @dma_addr: guest dma addr
261 *
262 * Returns:
263 * 0 on success, negative error code if failed.
264 */
265static inline int
266intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
267 dma_addr_t dma_addr)
268{
269 return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
270}
271
272/**
273 * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
274 * @vgpu: a vGPU
275 * @gfn: guest PFN
276 * @mfn: host PFN
277 * @nr: amount of PFNs
278 * @map: map or unmap
279 *
280 * Returns:
281 * Zero on success, negative error code if failed.
282 */
283static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
284 struct intel_vgpu *vgpu, unsigned long gfn,
285 unsigned long mfn, unsigned int nr,
286 bool map)
287{
288 /* a MPT implementation could have MMIO mapped elsewhere */
289 if (!intel_gvt_host.mpt->map_gfn_to_mfn)
290 return 0;
291
292 return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
293 map);
294}
295
296/**
297 * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
298 * @vgpu: a vGPU
299 * @start: the beginning of the guest physical address region
300 * @end: the end of the guest physical address region
301 * @map: map or unmap
302 *
303 * Returns:
304 * Zero on success, negative error code if failed.
305 */
306static inline int intel_gvt_hypervisor_set_trap_area(
307 struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
308{
309 /* a MPT implementation could have MMIO trapped elsewhere */
310 if (!intel_gvt_host.mpt->set_trap_area)
311 return 0;
312
313 return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
314}
315
316/**
317 * intel_gvt_hypervisor_set_opregion - Set opregion for guest
318 * @vgpu: a vGPU
319 *
320 * Returns:
321 * Zero on success, negative error code if failed.
322 */
323static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
324{
325 if (!intel_gvt_host.mpt->set_opregion)
326 return 0;
327
328 return intel_gvt_host.mpt->set_opregion(vgpu);
329}
330
331/**
332 * intel_gvt_hypervisor_set_edid - Set EDID region for guest
333 * @vgpu: a vGPU
334 * @port_num: display port number
335 *
336 * Returns:
337 * Zero on success, negative error code if failed.
338 */
339static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu,
340 int port_num)
341{
342 if (!intel_gvt_host.mpt->set_edid)
343 return 0;
344
345 return intel_gvt_host.mpt->set_edid(vgpu, port_num);
346}
347
348/**
349 * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
350 * @vgpu: a vGPU
351 *
352 * Returns:
353 * Zero on success, negative error code if failed.
354 */
355static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
356{
357 if (!intel_gvt_host.mpt->get_vfio_device)
358 return 0;
359
360 return intel_gvt_host.mpt->get_vfio_device(vgpu);
361}
362
363/**
364 * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
365 * @vgpu: a vGPU
366 *
367 * Returns:
368 * Zero on success, negative error code if failed.
369 */
370static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
371{
372 if (!intel_gvt_host.mpt->put_vfio_device)
373 return;
374
375 intel_gvt_host.mpt->put_vfio_device(vgpu);
376}
377
378/**
379 * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
380 * @vgpu: a vGPU
381 * @gfn: guest PFN
382 *
383 * Returns:
384 * true on valid gfn, false on not.
385 */
386static inline bool intel_gvt_hypervisor_is_valid_gfn(
387 struct intel_vgpu *vgpu, unsigned long gfn)
388{
389 if (!intel_gvt_host.mpt->is_valid_gfn)
390 return true;
391
392 return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
393}
394
395int intel_gvt_register_hypervisor(struct intel_gvt_mpt *);
396void intel_gvt_unregister_hypervisor(void);
397
398#endif /* _GVT_MPT_H_ */