Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 */
 25
 26#include <drm/drm_atomic_helper.h>
 27#include <drm/drm_damage_helper.h>
 28#include <drm/drm_fourcc.h>
 
 29
 30#include "virtgpu_drv.h"
 31
 32static const uint32_t virtio_gpu_formats[] = {
 33	DRM_FORMAT_HOST_XRGB8888,
 34};
 35
 36static const uint32_t virtio_gpu_cursor_formats[] = {
 37	DRM_FORMAT_HOST_ARGB8888,
 38};
 39
 40uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
 41{
 42	uint32_t format;
 43
 44	switch (drm_fourcc) {
 45	case DRM_FORMAT_XRGB8888:
 46		format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
 47		break;
 48	case DRM_FORMAT_ARGB8888:
 49		format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
 50		break;
 51	case DRM_FORMAT_BGRX8888:
 52		format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
 53		break;
 54	case DRM_FORMAT_BGRA8888:
 55		format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
 56		break;
 57	default:
 58		/*
 59		 * This should not happen, we handle everything listed
 60		 * in virtio_gpu_formats[].
 61		 */
 62		format = 0;
 63		break;
 64	}
 65	WARN_ON(format == 0);
 66	return format;
 67}
 68
 69static struct
 70drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
 71{
 72	struct virtio_gpu_plane_state *new;
 73
 74	if (WARN_ON(!plane->state))
 75		return NULL;
 76
 77	new = kzalloc(sizeof(*new), GFP_KERNEL);
 78	if (!new)
 79		return NULL;
 80
 81	__drm_atomic_helper_plane_duplicate_state(plane, &new->base);
 82
 83	return &new->base;
 84}
 85
 86static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
 87	.update_plane		= drm_atomic_helper_update_plane,
 88	.disable_plane		= drm_atomic_helper_disable_plane,
 
 89	.reset			= drm_atomic_helper_plane_reset,
 90	.atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
 91	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
 92};
 93
 94static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
 95					 struct drm_atomic_state *state)
 96{
 97	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
 98										 plane);
 99	struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
100										 plane);
101	bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
102	struct drm_crtc_state *crtc_state;
103	int ret;
104
105	if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
106		return 0;
107
108	/*
109	 * Ignore damage clips if the framebuffer attached to the plane's state
110	 * has changed since the last plane update (page-flip). In this case, a
111	 * full plane update should happen because uploads are done per-buffer.
112	 */
113	if (old_plane_state->fb != new_plane_state->fb)
114		new_plane_state->ignore_damage_clips = true;
115
116	crtc_state = drm_atomic_get_crtc_state(state,
117					       new_plane_state->crtc);
118	if (IS_ERR(crtc_state))
119                return PTR_ERR(crtc_state);
120
121	ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
122						  DRM_PLANE_NO_SCALING,
123						  DRM_PLANE_NO_SCALING,
124						  is_cursor, true);
125	return ret;
126}
127
128static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
129				      struct drm_plane_state *state,
130				      struct drm_rect *rect)
131{
132	struct virtio_gpu_object *bo =
133		gem_to_virtio_gpu_obj(state->fb->obj[0]);
134	struct virtio_gpu_object_array *objs;
135	uint32_t w = rect->x2 - rect->x1;
136	uint32_t h = rect->y2 - rect->y1;
137	uint32_t x = rect->x1;
138	uint32_t y = rect->y1;
139	uint32_t off = x * state->fb->format->cpp[0] +
140		y * state->fb->pitches[0];
141
142	objs = virtio_gpu_array_alloc(1);
143	if (!objs)
144		return;
145	virtio_gpu_array_add_obj(objs, &bo->base.base);
146
147	virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
148					   objs, NULL);
149}
150
151static void virtio_gpu_resource_flush(struct drm_plane *plane,
152				      uint32_t x, uint32_t y,
153				      uint32_t width, uint32_t height)
154{
155	struct drm_device *dev = plane->dev;
156	struct virtio_gpu_device *vgdev = dev->dev_private;
157	struct virtio_gpu_framebuffer *vgfb;
158	struct virtio_gpu_plane_state *vgplane_st;
159	struct virtio_gpu_object *bo;
160
161	vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
162	vgplane_st = to_virtio_gpu_plane_state(plane->state);
163	bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
164	if (vgplane_st->fence) {
165		struct virtio_gpu_object_array *objs;
166
167		objs = virtio_gpu_array_alloc(1);
168		if (!objs)
169			return;
170		virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
171		virtio_gpu_array_lock_resv(objs);
172		virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
173					      width, height, objs,
174					      vgplane_st->fence);
175		virtio_gpu_notify(vgdev);
176		dma_fence_wait_timeout(&vgplane_st->fence->f, true,
177				       msecs_to_jiffies(50));
178	} else {
179		virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
180					      width, height, NULL, NULL);
181		virtio_gpu_notify(vgdev);
182	}
183}
184
185static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
186					    struct drm_atomic_state *state)
187{
188	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
189									   plane);
190	struct drm_device *dev = plane->dev;
191	struct virtio_gpu_device *vgdev = dev->dev_private;
192	struct virtio_gpu_output *output = NULL;
 
193	struct virtio_gpu_object *bo;
194	struct drm_rect rect;
195
196	if (plane->state->crtc)
197		output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
198	if (old_state->crtc)
199		output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
200	if (WARN_ON(!output))
201		return;
202
203	if (!plane->state->fb || !output->crtc.state->active) {
204		DRM_DEBUG("nofb\n");
205		virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
206					   plane->state->src_w >> 16,
207					   plane->state->src_h >> 16,
208					   0, 0);
209		virtio_gpu_notify(vgdev);
210		return;
211	}
212
213	if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
214		return;
215
216	bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
217	if (bo->dumb)
218		virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
219
220	if (plane->state->fb != old_state->fb ||
221	    plane->state->src_w != old_state->src_w ||
222	    plane->state->src_h != old_state->src_h ||
223	    plane->state->src_x != old_state->src_x ||
224	    plane->state->src_y != old_state->src_y ||
225	    output->needs_modeset) {
226		output->needs_modeset = false;
227		DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
228			  bo->hw_res_handle,
229			  plane->state->crtc_w, plane->state->crtc_h,
230			  plane->state->crtc_x, plane->state->crtc_y,
231			  plane->state->src_w >> 16,
232			  plane->state->src_h >> 16,
233			  plane->state->src_x >> 16,
234			  plane->state->src_y >> 16);
235
236		if (bo->host3d_blob || bo->guest_blob) {
237			virtio_gpu_cmd_set_scanout_blob
238						(vgdev, output->index, bo,
239						 plane->state->fb,
240						 plane->state->src_w >> 16,
241						 plane->state->src_h >> 16,
242						 plane->state->src_x >> 16,
243						 plane->state->src_y >> 16);
244		} else {
245			virtio_gpu_cmd_set_scanout(vgdev, output->index,
246						   bo->hw_res_handle,
247						   plane->state->src_w >> 16,
248						   plane->state->src_h >> 16,
249						   plane->state->src_x >> 16,
250						   plane->state->src_y >> 16);
251		}
 
 
252	}
253
254	virtio_gpu_resource_flush(plane,
255				  rect.x1,
256				  rect.y1,
257				  rect.x2 - rect.x1,
258				  rect.y2 - rect.y1);
 
 
 
 
 
 
 
 
 
 
 
 
 
259}
260
261static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
262				       struct drm_plane_state *new_state)
263{
264	struct drm_device *dev = plane->dev;
265	struct virtio_gpu_device *vgdev = dev->dev_private;
266	struct virtio_gpu_framebuffer *vgfb;
267	struct virtio_gpu_plane_state *vgplane_st;
268	struct virtio_gpu_object *bo;
269
270	if (!new_state->fb)
271		return 0;
272
273	vgfb = to_virtio_gpu_framebuffer(new_state->fb);
274	vgplane_st = to_virtio_gpu_plane_state(new_state);
275	bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
276	if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
277		return 0;
278
279	if (bo->dumb) {
280		vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
281						     vgdev->fence_drv.context,
282						     0);
283		if (!vgplane_st->fence)
284			return -ENOMEM;
285	}
286
287	return 0;
288}
289
290static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
291					struct drm_plane_state *state)
292{
293	struct virtio_gpu_plane_state *vgplane_st;
294
295	if (!state->fb)
296		return;
297
298	vgplane_st = to_virtio_gpu_plane_state(state);
299	if (vgplane_st->fence) {
300		dma_fence_put(&vgplane_st->fence->f);
301		vgplane_st->fence = NULL;
302	}
303}
304
305static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
306					   struct drm_atomic_state *state)
307{
308	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
309									   plane);
310	struct drm_device *dev = plane->dev;
311	struct virtio_gpu_device *vgdev = dev->dev_private;
312	struct virtio_gpu_output *output = NULL;
313	struct virtio_gpu_framebuffer *vgfb;
314	struct virtio_gpu_plane_state *vgplane_st;
315	struct virtio_gpu_object *bo = NULL;
316	uint32_t handle;
 
317
318	if (plane->state->crtc)
319		output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
320	if (old_state->crtc)
321		output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
322	if (WARN_ON(!output))
323		return;
324
325	if (plane->state->fb) {
326		vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
327		vgplane_st = to_virtio_gpu_plane_state(plane->state);
328		bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
329		handle = bo->hw_res_handle;
330	} else {
331		handle = 0;
332	}
333
334	if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
335		/* new cursor -- update & wait */
336		struct virtio_gpu_object_array *objs;
337
338		objs = virtio_gpu_array_alloc(1);
339		if (!objs)
340			return;
341		virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
342		virtio_gpu_array_lock_resv(objs);
343		virtio_gpu_cmd_transfer_to_host_2d
344			(vgdev, 0,
345			 plane->state->crtc_w,
346			 plane->state->crtc_h,
347			 0, 0, objs, vgplane_st->fence);
348		virtio_gpu_notify(vgdev);
349		dma_fence_wait(&vgplane_st->fence->f, true);
 
 
 
 
 
 
 
350	}
351
352	if (plane->state->fb != old_state->fb) {
353		DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
354			  plane->state->crtc_x,
355			  plane->state->crtc_y,
356			  plane->state->hotspot_x,
357			  plane->state->hotspot_y);
358		output->cursor.hdr.type =
359			cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
360		output->cursor.resource_id = cpu_to_le32(handle);
361		if (plane->state->fb) {
362			output->cursor.hot_x =
363				cpu_to_le32(plane->state->hotspot_x);
364			output->cursor.hot_y =
365				cpu_to_le32(plane->state->hotspot_y);
366		} else {
367			output->cursor.hot_x = cpu_to_le32(0);
368			output->cursor.hot_y = cpu_to_le32(0);
369		}
370	} else {
371		DRM_DEBUG("move +%d+%d\n",
372			  plane->state->crtc_x,
373			  plane->state->crtc_y);
374		output->cursor.hdr.type =
375			cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
376	}
377	output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
378	output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
379	virtio_gpu_cursor_ping(vgdev, output);
380}
381
382static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
383	.prepare_fb		= virtio_gpu_plane_prepare_fb,
384	.cleanup_fb		= virtio_gpu_plane_cleanup_fb,
385	.atomic_check		= virtio_gpu_plane_atomic_check,
386	.atomic_update		= virtio_gpu_primary_plane_update,
387};
388
389static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
390	.prepare_fb		= virtio_gpu_plane_prepare_fb,
391	.cleanup_fb		= virtio_gpu_plane_cleanup_fb,
392	.atomic_check		= virtio_gpu_plane_atomic_check,
393	.atomic_update		= virtio_gpu_cursor_plane_update,
394};
395
396struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
397					enum drm_plane_type type,
398					int index)
399{
400	struct drm_device *dev = vgdev->ddev;
401	const struct drm_plane_helper_funcs *funcs;
402	struct drm_plane *plane;
403	const uint32_t *formats;
404	int nformats;
 
 
 
 
405
406	if (type == DRM_PLANE_TYPE_CURSOR) {
407		formats = virtio_gpu_cursor_formats;
408		nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
409		funcs = &virtio_gpu_cursor_helper_funcs;
410	} else {
411		formats = virtio_gpu_formats;
412		nformats = ARRAY_SIZE(virtio_gpu_formats);
413		funcs = &virtio_gpu_primary_helper_funcs;
414	}
415
416	plane = drmm_universal_plane_alloc(dev, struct drm_plane, dev,
417					   1 << index, &virtio_gpu_plane_funcs,
418					   formats, nformats, NULL, type, NULL);
419	if (IS_ERR(plane))
420		return plane;
421
422	drm_plane_helper_add(plane, funcs);
423
424	if (type == DRM_PLANE_TYPE_PRIMARY)
425		drm_plane_enable_fb_damage_clips(plane);
426
427	return plane;
 
 
 
 
428}
v5.4
  1/*
  2 * Copyright (C) 2015 Red Hat, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 */
 25
 26#include <drm/drm_atomic_helper.h>
 
 27#include <drm/drm_fourcc.h>
 28#include <drm/drm_plane_helper.h>
 29
 30#include "virtgpu_drv.h"
 31
 32static const uint32_t virtio_gpu_formats[] = {
 33	DRM_FORMAT_HOST_XRGB8888,
 34};
 35
 36static const uint32_t virtio_gpu_cursor_formats[] = {
 37	DRM_FORMAT_HOST_ARGB8888,
 38};
 39
 40uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
 41{
 42	uint32_t format;
 43
 44	switch (drm_fourcc) {
 45	case DRM_FORMAT_XRGB8888:
 46		format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
 47		break;
 48	case DRM_FORMAT_ARGB8888:
 49		format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
 50		break;
 51	case DRM_FORMAT_BGRX8888:
 52		format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
 53		break;
 54	case DRM_FORMAT_BGRA8888:
 55		format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
 56		break;
 57	default:
 58		/*
 59		 * This should not happen, we handle everything listed
 60		 * in virtio_gpu_formats[].
 61		 */
 62		format = 0;
 63		break;
 64	}
 65	WARN_ON(format == 0);
 66	return format;
 67}
 68
 69static void virtio_gpu_plane_destroy(struct drm_plane *plane)
 
 70{
 71	drm_plane_cleanup(plane);
 72	kfree(plane);
 
 
 
 
 
 
 
 
 
 
 73}
 74
 75static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
 76	.update_plane		= drm_atomic_helper_update_plane,
 77	.disable_plane		= drm_atomic_helper_disable_plane,
 78	.destroy		= virtio_gpu_plane_destroy,
 79	.reset			= drm_atomic_helper_plane_reset,
 80	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
 81	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
 82};
 83
 84static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
 85					 struct drm_plane_state *state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86{
 87	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 88}
 89
 90static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
 91					    struct drm_plane_state *old_state)
 92{
 
 
 93	struct drm_device *dev = plane->dev;
 94	struct virtio_gpu_device *vgdev = dev->dev_private;
 95	struct virtio_gpu_output *output = NULL;
 96	struct virtio_gpu_framebuffer *vgfb;
 97	struct virtio_gpu_object *bo;
 98	uint32_t handle;
 99
100	if (plane->state->crtc)
101		output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
102	if (old_state->crtc)
103		output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
104	if (WARN_ON(!output))
105		return;
106
107	if (plane->state->fb && output->enabled) {
108		vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
109		bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
110		handle = bo->hw_res_handle;
111		if (bo->dumb) {
112			virtio_gpu_cmd_transfer_to_host_2d
113				(vgdev, bo, 0,
114				 cpu_to_le32(plane->state->src_w >> 16),
115				 cpu_to_le32(plane->state->src_h >> 16),
116				 cpu_to_le32(plane->state->src_x >> 16),
117				 cpu_to_le32(plane->state->src_y >> 16), NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118		}
119	} else {
120		handle = 0;
121	}
122
123	DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
124		  plane->state->crtc_w, plane->state->crtc_h,
125		  plane->state->crtc_x, plane->state->crtc_y,
126		  plane->state->src_w >> 16,
127		  plane->state->src_h >> 16,
128		  plane->state->src_x >> 16,
129		  plane->state->src_y >> 16);
130	virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
131				   plane->state->src_w >> 16,
132				   plane->state->src_h >> 16,
133				   plane->state->src_x >> 16,
134				   plane->state->src_y >> 16);
135	if (handle)
136		virtio_gpu_cmd_resource_flush(vgdev, handle,
137					      plane->state->src_x >> 16,
138					      plane->state->src_y >> 16,
139					      plane->state->src_w >> 16,
140					      plane->state->src_h >> 16);
141}
142
143static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
144					struct drm_plane_state *new_state)
145{
146	struct drm_device *dev = plane->dev;
147	struct virtio_gpu_device *vgdev = dev->dev_private;
148	struct virtio_gpu_framebuffer *vgfb;
 
149	struct virtio_gpu_object *bo;
150
151	if (!new_state->fb)
152		return 0;
153
154	vgfb = to_virtio_gpu_framebuffer(new_state->fb);
 
155	bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
156	if (bo && bo->dumb && (plane->state->fb != new_state->fb)) {
157		vgfb->fence = virtio_gpu_fence_alloc(vgdev);
158		if (!vgfb->fence)
 
 
 
 
 
159			return -ENOMEM;
160	}
161
162	return 0;
163}
164
165static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
166					 struct drm_plane_state *old_state)
167{
168	struct virtio_gpu_framebuffer *vgfb;
169
170	if (!plane->state->fb)
171		return;
172
173	vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
174	if (vgfb->fence) {
175		dma_fence_put(&vgfb->fence->f);
176		vgfb->fence = NULL;
177	}
178}
179
180static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
181					   struct drm_plane_state *old_state)
182{
 
 
183	struct drm_device *dev = plane->dev;
184	struct virtio_gpu_device *vgdev = dev->dev_private;
185	struct virtio_gpu_output *output = NULL;
186	struct virtio_gpu_framebuffer *vgfb;
 
187	struct virtio_gpu_object *bo = NULL;
188	uint32_t handle;
189	int ret = 0;
190
191	if (plane->state->crtc)
192		output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
193	if (old_state->crtc)
194		output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
195	if (WARN_ON(!output))
196		return;
197
198	if (plane->state->fb) {
199		vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
 
200		bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
201		handle = bo->hw_res_handle;
202	} else {
203		handle = 0;
204	}
205
206	if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
207		/* new cursor -- update & wait */
 
 
 
 
 
 
 
208		virtio_gpu_cmd_transfer_to_host_2d
209			(vgdev, bo, 0,
210			 cpu_to_le32(plane->state->crtc_w),
211			 cpu_to_le32(plane->state->crtc_h),
212			 0, 0, vgfb->fence);
213		ret = virtio_gpu_object_reserve(bo, false);
214		if (!ret) {
215			dma_resv_add_excl_fence(bo->tbo.base.resv,
216							  &vgfb->fence->f);
217			dma_fence_put(&vgfb->fence->f);
218			vgfb->fence = NULL;
219			virtio_gpu_object_unreserve(bo);
220			virtio_gpu_object_wait(bo, false);
221		}
222	}
223
224	if (plane->state->fb != old_state->fb) {
225		DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
226			  plane->state->crtc_x,
227			  plane->state->crtc_y,
228			  plane->state->fb ? plane->state->fb->hot_x : 0,
229			  plane->state->fb ? plane->state->fb->hot_y : 0);
230		output->cursor.hdr.type =
231			cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
232		output->cursor.resource_id = cpu_to_le32(handle);
233		if (plane->state->fb) {
234			output->cursor.hot_x =
235				cpu_to_le32(plane->state->fb->hot_x);
236			output->cursor.hot_y =
237				cpu_to_le32(plane->state->fb->hot_y);
238		} else {
239			output->cursor.hot_x = cpu_to_le32(0);
240			output->cursor.hot_y = cpu_to_le32(0);
241		}
242	} else {
243		DRM_DEBUG("move +%d+%d\n",
244			  plane->state->crtc_x,
245			  plane->state->crtc_y);
246		output->cursor.hdr.type =
247			cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
248	}
249	output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
250	output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
251	virtio_gpu_cursor_ping(vgdev, output);
252}
253
254static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
 
 
255	.atomic_check		= virtio_gpu_plane_atomic_check,
256	.atomic_update		= virtio_gpu_primary_plane_update,
257};
258
259static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
260	.prepare_fb		= virtio_gpu_cursor_prepare_fb,
261	.cleanup_fb		= virtio_gpu_cursor_cleanup_fb,
262	.atomic_check		= virtio_gpu_plane_atomic_check,
263	.atomic_update		= virtio_gpu_cursor_plane_update,
264};
265
266struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
267					enum drm_plane_type type,
268					int index)
269{
270	struct drm_device *dev = vgdev->ddev;
271	const struct drm_plane_helper_funcs *funcs;
272	struct drm_plane *plane;
273	const uint32_t *formats;
274	int ret, nformats;
275
276	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
277	if (!plane)
278		return ERR_PTR(-ENOMEM);
279
280	if (type == DRM_PLANE_TYPE_CURSOR) {
281		formats = virtio_gpu_cursor_formats;
282		nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
283		funcs = &virtio_gpu_cursor_helper_funcs;
284	} else {
285		formats = virtio_gpu_formats;
286		nformats = ARRAY_SIZE(virtio_gpu_formats);
287		funcs = &virtio_gpu_primary_helper_funcs;
288	}
289	ret = drm_universal_plane_init(dev, plane, 1 << index,
290				       &virtio_gpu_plane_funcs,
291				       formats, nformats,
292				       NULL, type, NULL);
293	if (ret)
294		goto err_plane_init;
295
296	drm_plane_helper_add(plane, funcs);
 
 
 
 
297	return plane;
298
299err_plane_init:
300	kfree(plane);
301	return ERR_PTR(ret);
302}