Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 */
 25
 26#include <linux/virtio.h>
 27#include <linux/virtio_config.h>
 28#include <linux/virtio_ring.h>
 29
 30#include <drm/drm_file.h>
 31#include <drm/drm_managed.h>
 32
 33#include "virtgpu_drv.h"
 34
 35static void virtio_gpu_config_changed_work_func(struct work_struct *work)
 36{
 37	struct virtio_gpu_device *vgdev =
 38		container_of(work, struct virtio_gpu_device,
 39			     config_changed_work);
 40	u32 events_read, events_clear = 0;
 41
 42	/* read the config space */
 43	virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
 44			events_read, &events_read);
 45	if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
 46		if (vgdev->num_scanouts) {
 47			if (vgdev->has_edid)
 48				virtio_gpu_cmd_get_edids(vgdev);
 49			virtio_gpu_cmd_get_display_info(vgdev);
 50			virtio_gpu_notify(vgdev);
 51			drm_helper_hpd_irq_event(vgdev->ddev);
 52		}
 53		events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
 54	}
 55	virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
 56			 events_clear, &events_clear);
 57}
 58
 59static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
 60			       void (*work_func)(struct work_struct *work))
 61{
 62	spin_lock_init(&vgvq->qlock);
 63	init_waitqueue_head(&vgvq->ack_queue);
 64	INIT_WORK(&vgvq->dequeue_work, work_func);
 65}
 66
 67static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
 68				   int num_capsets)
 69{
 70	int i, ret;
 71	bool invalid_capset_id = false;
 72	struct drm_device *drm = vgdev->ddev;
 73
 74	vgdev->capsets = drmm_kcalloc(drm, num_capsets,
 75				      sizeof(struct virtio_gpu_drv_capset),
 76				      GFP_KERNEL);
 77	if (!vgdev->capsets) {
 78		DRM_ERROR("failed to allocate cap sets\n");
 79		return;
 80	}
 81	for (i = 0; i < num_capsets; i++) {
 82		virtio_gpu_cmd_get_capset_info(vgdev, i);
 83		virtio_gpu_notify(vgdev);
 84		ret = wait_event_timeout(vgdev->resp_wq,
 85					 vgdev->capsets[i].id > 0, 5 * HZ);
 86		/*
 87		 * Capability ids are defined in the virtio-gpu spec and are
 88		 * between 1 to 63, inclusive.
 89		 */
 90		if (!vgdev->capsets[i].id ||
 91		    vgdev->capsets[i].id > MAX_CAPSET_ID)
 92			invalid_capset_id = true;
 93
 94		if (ret == 0)
 95			DRM_ERROR("timed out waiting for cap set %d\n", i);
 96		else if (invalid_capset_id)
 97			DRM_ERROR("invalid capset id %u", vgdev->capsets[i].id);
 98
 99		if (ret == 0 || invalid_capset_id) {
100			spin_lock(&vgdev->display_info_lock);
101			drmm_kfree(drm, vgdev->capsets);
102			vgdev->capsets = NULL;
103			spin_unlock(&vgdev->display_info_lock);
104			return;
105		}
106
107		vgdev->capset_id_mask |= 1 << vgdev->capsets[i].id;
108		DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
109			 i, vgdev->capsets[i].id,
110			 vgdev->capsets[i].max_version,
111			 vgdev->capsets[i].max_size);
112	}
113
114	vgdev->num_capsets = num_capsets;
115}
116
117int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
118{
119	struct virtqueue_info vqs_info[] = {
120		{ "control", virtio_gpu_ctrl_ack },
121		{ "cursor", virtio_gpu_cursor_ack },
122	};
 
 
123	struct virtio_gpu_device *vgdev;
124	/* this will expand later */
125	struct virtqueue *vqs[2];
126	u32 num_scanouts, num_capsets;
127	int ret = 0;
128
129	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
130		return -ENODEV;
131
132	vgdev = drmm_kzalloc(dev, sizeof(struct virtio_gpu_device), GFP_KERNEL);
133	if (!vgdev)
134		return -ENOMEM;
135
136	vgdev->ddev = dev;
137	dev->dev_private = vgdev;
138	vgdev->vdev = vdev;
 
139
140	spin_lock_init(&vgdev->display_info_lock);
141	spin_lock_init(&vgdev->resource_export_lock);
142	spin_lock_init(&vgdev->host_visible_lock);
143	ida_init(&vgdev->ctx_id_ida);
144	ida_init(&vgdev->resource_ida);
145	init_waitqueue_head(&vgdev->resp_wq);
146	virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
147	virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
148
149	vgdev->fence_drv.context = dma_fence_context_alloc(1);
150	spin_lock_init(&vgdev->fence_drv.lock);
151	INIT_LIST_HEAD(&vgdev->fence_drv.fences);
152	INIT_LIST_HEAD(&vgdev->cap_cache);
153	INIT_WORK(&vgdev->config_changed_work,
154		  virtio_gpu_config_changed_work_func);
155
156	INIT_WORK(&vgdev->obj_free_work,
157		  virtio_gpu_array_put_free_work);
158	INIT_LIST_HEAD(&vgdev->obj_free_list);
159	spin_lock_init(&vgdev->obj_free_lock);
160
161#ifdef __LITTLE_ENDIAN
162	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
163		vgdev->has_virgl_3d = true;
164#endif
165	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
166		vgdev->has_edid = true;
167	}
168	if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
169		vgdev->has_indirect = true;
170	}
171	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
172		vgdev->has_resource_assign_uuid = true;
173	}
174	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
175		vgdev->has_resource_blob = true;
176	}
177	if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
178				  VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
179		if (!devm_request_mem_region(&vgdev->vdev->dev,
180					     vgdev->host_visible_region.addr,
181					     vgdev->host_visible_region.len,
182					     dev_name(&vgdev->vdev->dev))) {
183			DRM_ERROR("Could not reserve host visible region\n");
184			ret = -EBUSY;
185			goto err_vqs;
186		}
187
188		DRM_INFO("Host memory window: 0x%lx +0x%lx\n",
189			 (unsigned long)vgdev->host_visible_region.addr,
190			 (unsigned long)vgdev->host_visible_region.len);
191		vgdev->has_host_visible = true;
192		drm_mm_init(&vgdev->host_visible_mm,
193			    (unsigned long)vgdev->host_visible_region.addr,
194			    (unsigned long)vgdev->host_visible_region.len);
195	}
196	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) {
197		vgdev->has_context_init = true;
198	}
199
200	DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible",
201		 vgdev->has_virgl_3d    ? '+' : '-',
202		 vgdev->has_edid        ? '+' : '-',
203		 vgdev->has_resource_blob ? '+' : '-',
204		 vgdev->has_host_visible ? '+' : '-');
205
206	DRM_INFO("features: %ccontext_init\n",
207		 vgdev->has_context_init ? '+' : '-');
208
209	ret = virtio_find_vqs(vgdev->vdev, 2, vqs, vqs_info, NULL);
210	if (ret) {
211		DRM_ERROR("failed to find virt queues\n");
212		goto err_vqs;
213	}
214	vgdev->ctrlq.vq = vqs[0];
215	vgdev->cursorq.vq = vqs[1];
216	ret = virtio_gpu_alloc_vbufs(vgdev);
217	if (ret) {
218		DRM_ERROR("failed to alloc vbufs\n");
219		goto err_vbufs;
220	}
221
222	/* get display info */
223	virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
224			num_scanouts, &num_scanouts);
225	vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
226				    VIRTIO_GPU_MAX_SCANOUTS);
227
228	if (!IS_ENABLED(CONFIG_DRM_VIRTIO_GPU_KMS) || !vgdev->num_scanouts) {
229		DRM_INFO("KMS disabled\n");
230		vgdev->num_scanouts = 0;
231		vgdev->has_edid = false;
232		dev->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
233	} else {
234		DRM_INFO("number of scanouts: %d\n", num_scanouts);
235	}
 
236
237	virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
238			num_capsets, &num_capsets);
239	DRM_INFO("number of cap sets: %d\n", num_capsets);
240
241	ret = virtio_gpu_modeset_init(vgdev);
242	if (ret) {
243		DRM_ERROR("modeset init failed\n");
244		goto err_scanouts;
245	}
246
247	virtio_device_ready(vgdev->vdev);
248
249	if (num_capsets)
250		virtio_gpu_get_capsets(vgdev, num_capsets);
251	if (vgdev->num_scanouts) {
252		if (vgdev->has_edid)
253			virtio_gpu_cmd_get_edids(vgdev);
254		virtio_gpu_cmd_get_display_info(vgdev);
255		virtio_gpu_notify(vgdev);
256		wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
257				   5 * HZ);
258	}
259	return 0;
260
261err_scanouts:
262	virtio_gpu_free_vbufs(vgdev);
263err_vbufs:
264	vgdev->vdev->config->del_vqs(vgdev->vdev);
265err_vqs:
266	dev->dev_private = NULL;
 
267	return ret;
268}
269
270static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
271{
272	struct virtio_gpu_drv_cap_cache *cache_ent, *tmp;
273
274	list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) {
275		kfree(cache_ent->caps_cache);
276		kfree(cache_ent);
277	}
278}
279
280void virtio_gpu_deinit(struct drm_device *dev)
281{
282	struct virtio_gpu_device *vgdev = dev->dev_private;
283
284	flush_work(&vgdev->obj_free_work);
285	flush_work(&vgdev->ctrlq.dequeue_work);
286	flush_work(&vgdev->cursorq.dequeue_work);
287	flush_work(&vgdev->config_changed_work);
288	virtio_reset_device(vgdev->vdev);
289	vgdev->vdev->config->del_vqs(vgdev->vdev);
290}
291
292void virtio_gpu_release(struct drm_device *dev)
293{
294	struct virtio_gpu_device *vgdev = dev->dev_private;
295
296	if (!vgdev)
297		return;
298
299	virtio_gpu_modeset_fini(vgdev);
300	virtio_gpu_free_vbufs(vgdev);
301	virtio_gpu_cleanup_cap_cache(vgdev);
302
303	if (vgdev->has_host_visible)
304		drm_mm_takedown(&vgdev->host_visible_mm);
 
 
 
305}
306
307int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
308{
309	struct virtio_gpu_device *vgdev = dev->dev_private;
310	struct virtio_gpu_fpriv *vfpriv;
311	int handle;
312
313	/* can't create contexts without 3d renderer */
314	if (!vgdev->has_virgl_3d)
315		return 0;
316
317	/* allocate a virt GPU context for this opener */
318	vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
319	if (!vfpriv)
320		return -ENOMEM;
321
322	mutex_init(&vfpriv->context_lock);
323
324	handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
325	if (handle < 0) {
326		kfree(vfpriv);
327		return handle;
328	}
329
330	vfpriv->ctx_id = handle + 1;
331	file->driver_priv = vfpriv;
332	return 0;
333}
334
335void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
336{
337	struct virtio_gpu_device *vgdev = dev->dev_private;
338	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
339
340	if (!vgdev->has_virgl_3d)
341		return;
342
343	if (vfpriv->context_created) {
344		virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
345		virtio_gpu_notify(vgdev);
346	}
347
348	ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1);
349	mutex_destroy(&vfpriv->context_lock);
350	kfree(vfpriv);
351	file->driver_priv = NULL;
352}
v5.14.15
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 */
 25
 26#include <linux/virtio.h>
 27#include <linux/virtio_config.h>
 28#include <linux/virtio_ring.h>
 29
 30#include <drm/drm_file.h>
 
 31
 32#include "virtgpu_drv.h"
 33
 34static void virtio_gpu_config_changed_work_func(struct work_struct *work)
 35{
 36	struct virtio_gpu_device *vgdev =
 37		container_of(work, struct virtio_gpu_device,
 38			     config_changed_work);
 39	u32 events_read, events_clear = 0;
 40
 41	/* read the config space */
 42	virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
 43			events_read, &events_read);
 44	if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
 45		if (vgdev->has_edid)
 46			virtio_gpu_cmd_get_edids(vgdev);
 47		virtio_gpu_cmd_get_display_info(vgdev);
 48		virtio_gpu_notify(vgdev);
 49		drm_helper_hpd_irq_event(vgdev->ddev);
 
 
 50		events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
 51	}
 52	virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
 53			 events_clear, &events_clear);
 54}
 55
 56static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
 57			       void (*work_func)(struct work_struct *work))
 58{
 59	spin_lock_init(&vgvq->qlock);
 60	init_waitqueue_head(&vgvq->ack_queue);
 61	INIT_WORK(&vgvq->dequeue_work, work_func);
 62}
 63
 64static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
 65				   int num_capsets)
 66{
 67	int i, ret;
 
 
 68
 69	vgdev->capsets = kcalloc(num_capsets,
 70				 sizeof(struct virtio_gpu_drv_capset),
 71				 GFP_KERNEL);
 72	if (!vgdev->capsets) {
 73		DRM_ERROR("failed to allocate cap sets\n");
 74		return;
 75	}
 76	for (i = 0; i < num_capsets; i++) {
 77		virtio_gpu_cmd_get_capset_info(vgdev, i);
 78		virtio_gpu_notify(vgdev);
 79		ret = wait_event_timeout(vgdev->resp_wq,
 80					 vgdev->capsets[i].id > 0, 5 * HZ);
 81		if (ret == 0) {
 
 
 
 
 
 
 
 
 82			DRM_ERROR("timed out waiting for cap set %d\n", i);
 
 
 
 
 83			spin_lock(&vgdev->display_info_lock);
 84			kfree(vgdev->capsets);
 85			vgdev->capsets = NULL;
 86			spin_unlock(&vgdev->display_info_lock);
 87			return;
 88		}
 
 
 89		DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
 90			 i, vgdev->capsets[i].id,
 91			 vgdev->capsets[i].max_version,
 92			 vgdev->capsets[i].max_size);
 93	}
 
 94	vgdev->num_capsets = num_capsets;
 95}
 96
 97int virtio_gpu_init(struct drm_device *dev)
 98{
 99	static vq_callback_t *callbacks[] = {
100		virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
 
101	};
102	static const char * const names[] = { "control", "cursor" };
103
104	struct virtio_gpu_device *vgdev;
105	/* this will expand later */
106	struct virtqueue *vqs[2];
107	u32 num_scanouts, num_capsets;
108	int ret = 0;
109
110	if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1))
111		return -ENODEV;
112
113	vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
114	if (!vgdev)
115		return -ENOMEM;
116
117	vgdev->ddev = dev;
118	dev->dev_private = vgdev;
119	vgdev->vdev = dev_to_virtio(dev->dev);
120	vgdev->dev = dev->dev;
121
122	spin_lock_init(&vgdev->display_info_lock);
123	spin_lock_init(&vgdev->resource_export_lock);
124	spin_lock_init(&vgdev->host_visible_lock);
125	ida_init(&vgdev->ctx_id_ida);
126	ida_init(&vgdev->resource_ida);
127	init_waitqueue_head(&vgdev->resp_wq);
128	virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
129	virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
130
131	vgdev->fence_drv.context = dma_fence_context_alloc(1);
132	spin_lock_init(&vgdev->fence_drv.lock);
133	INIT_LIST_HEAD(&vgdev->fence_drv.fences);
134	INIT_LIST_HEAD(&vgdev->cap_cache);
135	INIT_WORK(&vgdev->config_changed_work,
136		  virtio_gpu_config_changed_work_func);
137
138	INIT_WORK(&vgdev->obj_free_work,
139		  virtio_gpu_array_put_free_work);
140	INIT_LIST_HEAD(&vgdev->obj_free_list);
141	spin_lock_init(&vgdev->obj_free_lock);
142
143#ifdef __LITTLE_ENDIAN
144	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
145		vgdev->has_virgl_3d = true;
146#endif
147	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
148		vgdev->has_edid = true;
149	}
150	if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
151		vgdev->has_indirect = true;
152	}
153	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
154		vgdev->has_resource_assign_uuid = true;
155	}
156	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
157		vgdev->has_resource_blob = true;
158	}
159	if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
160				  VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
161		if (!devm_request_mem_region(&vgdev->vdev->dev,
162					     vgdev->host_visible_region.addr,
163					     vgdev->host_visible_region.len,
164					     dev_name(&vgdev->vdev->dev))) {
165			DRM_ERROR("Could not reserve host visible region\n");
166			ret = -EBUSY;
167			goto err_vqs;
168		}
169
170		DRM_INFO("Host memory window: 0x%lx +0x%lx\n",
171			 (unsigned long)vgdev->host_visible_region.addr,
172			 (unsigned long)vgdev->host_visible_region.len);
173		vgdev->has_host_visible = true;
174		drm_mm_init(&vgdev->host_visible_mm,
175			    (unsigned long)vgdev->host_visible_region.addr,
176			    (unsigned long)vgdev->host_visible_region.len);
177	}
 
 
 
178
179	DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible\n",
180		 vgdev->has_virgl_3d    ? '+' : '-',
181		 vgdev->has_edid        ? '+' : '-',
182		 vgdev->has_resource_blob ? '+' : '-',
183		 vgdev->has_host_visible ? '+' : '-');
184
185	ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
 
 
 
186	if (ret) {
187		DRM_ERROR("failed to find virt queues\n");
188		goto err_vqs;
189	}
190	vgdev->ctrlq.vq = vqs[0];
191	vgdev->cursorq.vq = vqs[1];
192	ret = virtio_gpu_alloc_vbufs(vgdev);
193	if (ret) {
194		DRM_ERROR("failed to alloc vbufs\n");
195		goto err_vbufs;
196	}
197
198	/* get display info */
199	virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
200			num_scanouts, &num_scanouts);
201	vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
202				    VIRTIO_GPU_MAX_SCANOUTS);
203	if (!vgdev->num_scanouts) {
204		DRM_ERROR("num_scanouts is zero\n");
205		ret = -EINVAL;
206		goto err_scanouts;
 
 
 
 
207	}
208	DRM_INFO("number of scanouts: %d\n", num_scanouts);
209
210	virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
211			num_capsets, &num_capsets);
212	DRM_INFO("number of cap sets: %d\n", num_capsets);
213
214	ret = virtio_gpu_modeset_init(vgdev);
215	if (ret) {
216		DRM_ERROR("modeset init failed\n");
217		goto err_scanouts;
218	}
219
220	virtio_device_ready(vgdev->vdev);
221
222	if (num_capsets)
223		virtio_gpu_get_capsets(vgdev, num_capsets);
224	if (vgdev->has_edid)
225		virtio_gpu_cmd_get_edids(vgdev);
226	virtio_gpu_cmd_get_display_info(vgdev);
227	virtio_gpu_notify(vgdev);
228	wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
229			   5 * HZ);
 
 
230	return 0;
231
232err_scanouts:
233	virtio_gpu_free_vbufs(vgdev);
234err_vbufs:
235	vgdev->vdev->config->del_vqs(vgdev->vdev);
236err_vqs:
237	dev->dev_private = NULL;
238	kfree(vgdev);
239	return ret;
240}
241
242static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
243{
244	struct virtio_gpu_drv_cap_cache *cache_ent, *tmp;
245
246	list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) {
247		kfree(cache_ent->caps_cache);
248		kfree(cache_ent);
249	}
250}
251
252void virtio_gpu_deinit(struct drm_device *dev)
253{
254	struct virtio_gpu_device *vgdev = dev->dev_private;
255
256	flush_work(&vgdev->obj_free_work);
257	flush_work(&vgdev->ctrlq.dequeue_work);
258	flush_work(&vgdev->cursorq.dequeue_work);
259	flush_work(&vgdev->config_changed_work);
260	vgdev->vdev->config->reset(vgdev->vdev);
261	vgdev->vdev->config->del_vqs(vgdev->vdev);
262}
263
264void virtio_gpu_release(struct drm_device *dev)
265{
266	struct virtio_gpu_device *vgdev = dev->dev_private;
267
268	if (!vgdev)
269		return;
270
271	virtio_gpu_modeset_fini(vgdev);
272	virtio_gpu_free_vbufs(vgdev);
273	virtio_gpu_cleanup_cap_cache(vgdev);
274
275	if (vgdev->has_host_visible)
276		drm_mm_takedown(&vgdev->host_visible_mm);
277
278	kfree(vgdev->capsets);
279	kfree(vgdev);
280}
281
282int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
283{
284	struct virtio_gpu_device *vgdev = dev->dev_private;
285	struct virtio_gpu_fpriv *vfpriv;
286	int handle;
287
288	/* can't create contexts without 3d renderer */
289	if (!vgdev->has_virgl_3d)
290		return 0;
291
292	/* allocate a virt GPU context for this opener */
293	vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
294	if (!vfpriv)
295		return -ENOMEM;
296
297	mutex_init(&vfpriv->context_lock);
298
299	handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
300	if (handle < 0) {
301		kfree(vfpriv);
302		return handle;
303	}
304
305	vfpriv->ctx_id = handle + 1;
306	file->driver_priv = vfpriv;
307	return 0;
308}
309
310void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
311{
312	struct virtio_gpu_device *vgdev = dev->dev_private;
313	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
314
315	if (!vgdev->has_virgl_3d)
316		return;
317
318	if (vfpriv->context_created) {
319		virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
320		virtio_gpu_notify(vgdev);
321	}
322
323	ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1);
324	mutex_destroy(&vfpriv->context_lock);
325	kfree(vfpriv);
326	file->driver_priv = NULL;
327}