Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0+
  2
  3#include <linux/crc32.h>
  4
  5#include <drm/drm_atomic.h>
  6#include <drm/drm_atomic_helper.h>
  7#include <drm/drm_blend.h>
  8#include <drm/drm_fourcc.h>
  9#include <drm/drm_fixed.h>
 10#include <drm/drm_gem_framebuffer_helper.h>
 
 11#include <drm/drm_vblank.h>
 12#include <linux/minmax.h>
 13
 14#include "vkms_drv.h"
 15
 16static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha)
 
 17{
 18	u32 new_color;
 
 
 19
 20	new_color = (src * 0xffff + dst * (0xffff - alpha));
 21
 22	return DIV_ROUND_CLOSEST(new_color, 0xffff);
 23}
 24
 25/**
 26 * pre_mul_alpha_blend - alpha blending equation
 27 * @frame_info: Source framebuffer's metadata
 28 * @stage_buffer: The line with the pixels from src_plane
 29 * @output_buffer: A line buffer that receives all the blends output
 30 *
 31 * Using the information from the `frame_info`, this blends only the
 32 * necessary pixels from the `stage_buffer` to the `output_buffer`
 33 * using premultiplied blend formula.
 34 *
 35 * The current DRM assumption is that pixel color values have been already
 36 * pre-multiplied with the alpha channel values. See more
 37 * drm_plane_create_blend_mode_property(). Also, this formula assumes a
 38 * completely opaque background.
 39 */
 40static void pre_mul_alpha_blend(struct vkms_frame_info *frame_info,
 41				struct line_buffer *stage_buffer,
 42				struct line_buffer *output_buffer)
 43{
 44	int x_dst = frame_info->dst.x1;
 45	struct pixel_argb_u16 *out = output_buffer->pixels + x_dst;
 46	struct pixel_argb_u16 *in = stage_buffer->pixels;
 47	int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
 48			    stage_buffer->n_pixels);
 49
 50	for (int x = 0; x < x_limit; x++) {
 51		out[x].a = (u16)0xffff;
 52		out[x].r = pre_mul_blend_channel(in[x].r, out[x].r, in[x].a);
 53		out[x].g = pre_mul_blend_channel(in[x].g, out[x].g, in[x].a);
 54		out[x].b = pre_mul_blend_channel(in[x].b, out[x].b, in[x].a);
 
 55	}
 56}
 57
 58static int get_y_pos(struct vkms_frame_info *frame_info, int y)
 59{
 60	if (frame_info->rotation & DRM_MODE_REFLECT_Y)
 61		return drm_rect_height(&frame_info->rotated) - y - 1;
 62
 63	switch (frame_info->rotation & DRM_MODE_ROTATE_MASK) {
 64	case DRM_MODE_ROTATE_90:
 65		return frame_info->rotated.x2 - y - 1;
 66	case DRM_MODE_ROTATE_270:
 67		return y + frame_info->rotated.x1;
 68	default:
 69		return y;
 70	}
 71}
 72
 73static bool check_limit(struct vkms_frame_info *frame_info, int pos)
 74{
 75	if (drm_rotation_90_or_270(frame_info->rotation)) {
 76		if (pos >= 0 && pos < drm_rect_width(&frame_info->rotated))
 77			return true;
 78	} else {
 79		if (pos >= frame_info->rotated.y1 && pos < frame_info->rotated.y2)
 80			return true;
 81	}
 82
 83	return false;
 84}
 85
 86static void fill_background(const struct pixel_argb_u16 *background_color,
 87			    struct line_buffer *output_buffer)
 88{
 89	for (size_t i = 0; i < output_buffer->n_pixels; i++)
 90		output_buffer->pixels[i] = *background_color;
 91}
 92
 93// lerp(a, b, t) = a + (b - a) * t
 94static u16 lerp_u16(u16 a, u16 b, s64 t)
 95{
 96	s64 a_fp = drm_int2fixp(a);
 97	s64 b_fp = drm_int2fixp(b);
 98
 99	s64 delta = drm_fixp_mul(b_fp - a_fp,  t);
100
101	return drm_fixp2int(a_fp + delta);
102}
103
104static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)
105{
106	s64 color_channel_fp = drm_int2fixp(channel_value);
107
108	return drm_fixp_mul(color_channel_fp, lut->channel_value2index_ratio);
109}
110
111/*
112 * This enum is related to the positions of the variables inside
113 * `struct drm_color_lut`, so the order of both needs to be the same.
 
 
 
 
 
 
114 */
115enum lut_channel {
116	LUT_RED = 0,
117	LUT_GREEN,
118	LUT_BLUE,
119	LUT_RESERVED
120};
121
122static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value,
123				      enum lut_channel channel)
124{
125	s64 lut_index = get_lut_index(lut, channel_value);
126	u16 *floor_lut_value, *ceil_lut_value;
127	u16 floor_channel_value, ceil_channel_value;
128
129	/*
130	 * This checks if `struct drm_color_lut` has any gap added by the compiler
131	 * between the struct fields.
132	 */
133	static_assert(sizeof(struct drm_color_lut) == sizeof(__u16) * 4);
134
135	floor_lut_value = (__u16 *)&lut->base[drm_fixp2int(lut_index)];
136	if (drm_fixp2int(lut_index) == (lut->lut_length - 1))
137		/* We're at the end of the LUT array, use same value for ceil and floor */
138		ceil_lut_value = floor_lut_value;
139	else
140		ceil_lut_value = (__u16 *)&lut->base[drm_fixp2int_ceil(lut_index)];
141
142	floor_channel_value = floor_lut_value[channel];
143	ceil_channel_value = ceil_lut_value[channel];
144
145	return lerp_u16(floor_channel_value, ceil_channel_value,
146			lut_index & DRM_FIXED_DECIMAL_MASK);
 
 
147}
148
149static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buffer *output_buffer)
 
 
 
 
 
150{
151	if (!crtc_state->gamma_lut.base)
152		return;
153
154	if (!crtc_state->gamma_lut.lut_length)
155		return;
156
157	for (size_t x = 0; x < output_buffer->n_pixels; x++) {
158		struct pixel_argb_u16 *pixel = &output_buffer->pixels[x];
159
160		pixel->r = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->r, LUT_RED);
161		pixel->g = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->g, LUT_GREEN);
162		pixel->b = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->b, LUT_BLUE);
163	}
164}
165
166/**
167 * blend - blend the pixels from all planes and compute crc
168 * @wb: The writeback frame buffer metadata
169 * @crtc_state: The crtc state
170 * @crc32: The crc output of the final frame
171 * @output_buffer: A buffer of a row that will receive the result of the blend(s)
172 * @stage_buffer: The line with the pixels from plane being blend to the output
173 * @row_size: The size, in bytes, of a single row
174 *
175 * This function blends the pixels (Using the `pre_mul_alpha_blend`)
176 * from all planes, calculates the crc32 of the output from the former step,
177 * and, if necessary, convert and store the output to the writeback buffer.
 
 
 
 
178 */
179static void blend(struct vkms_writeback_job *wb,
180		  struct vkms_crtc_state *crtc_state,
181		  u32 *crc32, struct line_buffer *stage_buffer,
182		  struct line_buffer *output_buffer, size_t row_size)
183{
184	struct vkms_plane_state **plane = crtc_state->active_planes;
185	u32 n_active_planes = crtc_state->num_active_planes;
186	int y_pos;
187
188	const struct pixel_argb_u16 background_color = { .a = 0xffff };
189
190	size_t crtc_y_limit = crtc_state->base.mode.vdisplay;
191
192	/*
193	 * The planes are composed line-by-line to avoid heavy memory usage. It is a necessary
194	 * complexity to avoid poor blending performance.
195	 *
196	 * The function vkms_compose_row() is used to read a line, pixel-by-pixel, into the staging
197	 * buffer.
198	 */
199	for (size_t y = 0; y < crtc_y_limit; y++) {
200		fill_background(&background_color, output_buffer);
201
202		/* The active planes are composed associatively in z-order. */
203		for (size_t i = 0; i < n_active_planes; i++) {
204			y_pos = get_y_pos(plane[i]->frame_info, y);
205
206			if (!check_limit(plane[i]->frame_info, y_pos))
207				continue;
208
209			vkms_compose_row(stage_buffer, plane[i], y_pos);
210			pre_mul_alpha_blend(plane[i]->frame_info, stage_buffer,
211					    output_buffer);
 
212		}
213
214		apply_lut(crtc_state, output_buffer);
215
216		*crc32 = crc32_le(*crc32, (void *)output_buffer->pixels, row_size);
217
218		if (wb)
219			vkms_writeback_row(wb, output_buffer, y_pos);
220	}
221}
222
223static int check_format_funcs(struct vkms_crtc_state *crtc_state,
224			      struct vkms_writeback_job *active_wb)
225{
226	struct vkms_plane_state **planes = crtc_state->active_planes;
227	u32 n_active_planes = crtc_state->num_active_planes;
228
229	for (size_t i = 0; i < n_active_planes; i++)
230		if (!planes[i]->pixel_read)
231			return -1;
232
233	if (active_wb && !active_wb->pixel_write)
234		return -1;
235
236	return 0;
237}
238
239static int check_iosys_map(struct vkms_crtc_state *crtc_state)
240{
241	struct vkms_plane_state **plane_state = crtc_state->active_planes;
242	u32 n_active_planes = crtc_state->num_active_planes;
243
244	for (size_t i = 0; i < n_active_planes; i++)
245		if (iosys_map_is_null(&plane_state[i]->frame_info->map[0]))
246			return -1;
 
247
248	return 0;
 
249}
250
251static int compose_active_planes(struct vkms_writeback_job *active_wb,
252				 struct vkms_crtc_state *crtc_state,
253				 u32 *crc32)
254{
255	size_t line_width, pixel_size = sizeof(struct pixel_argb_u16);
256	struct line_buffer output_buffer, stage_buffer;
257	int ret = 0;
258
259	/*
260	 * This check exists so we can call `crc32_le` for the entire line
261	 * instead doing it for each channel of each pixel in case
262	 * `struct `pixel_argb_u16` had any gap added by the compiler
263	 * between the struct fields.
264	 */
265	static_assert(sizeof(struct pixel_argb_u16) == 8);
266
267	if (WARN_ON(check_iosys_map(crtc_state)))
268		return -EINVAL;
269
270	if (WARN_ON(check_format_funcs(crtc_state, active_wb)))
271		return -EINVAL;
272
273	line_width = crtc_state->base.mode.hdisplay;
274	stage_buffer.n_pixels = line_width;
275	output_buffer.n_pixels = line_width;
276
277	stage_buffer.pixels = kvmalloc(line_width * pixel_size, GFP_KERNEL);
278	if (!stage_buffer.pixels) {
279		DRM_ERROR("Cannot allocate memory for the output line buffer");
280		return -ENOMEM;
281	}
282
283	output_buffer.pixels = kvmalloc(line_width * pixel_size, GFP_KERNEL);
284	if (!output_buffer.pixels) {
285		DRM_ERROR("Cannot allocate memory for intermediate line buffer");
286		ret = -ENOMEM;
287		goto free_stage_buffer;
288	}
289
290	blend(active_wb, crtc_state, crc32, &stage_buffer,
291	      &output_buffer, line_width * pixel_size);
292
293	kvfree(output_buffer.pixels);
294free_stage_buffer:
295	kvfree(stage_buffer.pixels);
 
 
 
 
 
296
297	return ret;
298}
299
300/**
301 * vkms_composer_worker - ordered work_struct to compute CRC
302 *
303 * @work: work_struct
304 *
305 * Work handler for composing and computing CRCs. work_struct scheduled in
306 * an ordered workqueue that's periodically scheduled to run by
307 * vkms_vblank_simulate() and flushed at vkms_atomic_commit_tail().
308 */
309void vkms_composer_worker(struct work_struct *work)
310{
311	struct vkms_crtc_state *crtc_state = container_of(work,
312						struct vkms_crtc_state,
313						composer_work);
314	struct drm_crtc *crtc = crtc_state->base.crtc;
315	struct vkms_writeback_job *active_wb = crtc_state->active_writeback;
316	struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
 
 
317	bool crc_pending, wb_pending;
318	u64 frame_start, frame_end;
319	u32 crc32 = 0;
 
320	int ret;
321
322	spin_lock_irq(&out->composer_lock);
323	frame_start = crtc_state->frame_start;
324	frame_end = crtc_state->frame_end;
325	crc_pending = crtc_state->crc_pending;
326	wb_pending = crtc_state->wb_pending;
327	crtc_state->frame_start = 0;
328	crtc_state->frame_end = 0;
329	crtc_state->crc_pending = false;
330
331	if (crtc->state->gamma_lut) {
332		s64 max_lut_index_fp;
333		s64 u16_max_fp = drm_int2fixp(0xffff);
334
335		crtc_state->gamma_lut.base = (struct drm_color_lut *)crtc->state->gamma_lut->data;
336		crtc_state->gamma_lut.lut_length =
337			crtc->state->gamma_lut->length / sizeof(struct drm_color_lut);
338		max_lut_index_fp = drm_int2fixp(crtc_state->gamma_lut.lut_length  - 1);
339		crtc_state->gamma_lut.channel_value2index_ratio = drm_fixp_div(max_lut_index_fp,
340									       u16_max_fp);
341
342	} else {
343		crtc_state->gamma_lut.base = NULL;
344	}
345
346	spin_unlock_irq(&out->composer_lock);
347
348	/*
349	 * We raced with the vblank hrtimer and previous work already computed
350	 * the crc, nothing to do.
351	 */
352	if (!crc_pending)
353		return;
354
 
 
 
 
 
 
 
 
 
355	if (wb_pending)
356		ret = compose_active_planes(active_wb, crtc_state, &crc32);
357	else
358		ret = compose_active_planes(NULL, crtc_state, &crc32);
359
360	if (ret)
 
 
 
 
361		return;
 
 
 
362
363	if (wb_pending) {
364		drm_writeback_signal_completion(&out->wb_connector, 0);
365		spin_lock_irq(&out->composer_lock);
366		crtc_state->wb_pending = false;
367		spin_unlock_irq(&out->composer_lock);
 
 
368	}
369
370	/*
371	 * The worker can fall behind the vblank hrtimer, make sure we catch up.
372	 */
373	while (frame_start <= frame_end)
374		drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
375}
376
377static const char * const pipe_crc_sources[] = {"auto"};
378
379const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
380					size_t *count)
381{
382	*count = ARRAY_SIZE(pipe_crc_sources);
383	return pipe_crc_sources;
384}
385
386static int vkms_crc_parse_source(const char *src_name, bool *enabled)
387{
388	int ret = 0;
389
390	if (!src_name) {
391		*enabled = false;
392	} else if (strcmp(src_name, "auto") == 0) {
393		*enabled = true;
394	} else {
395		*enabled = false;
396		ret = -EINVAL;
397	}
398
399	return ret;
400}
401
402int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
403			   size_t *values_cnt)
404{
405	bool enabled;
406
407	if (vkms_crc_parse_source(src_name, &enabled) < 0) {
408		DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
409		return -EINVAL;
410	}
411
412	*values_cnt = 1;
413
414	return 0;
415}
416
417void vkms_set_composer(struct vkms_output *out, bool enabled)
418{
419	bool old_enabled;
420
421	if (enabled)
422		drm_crtc_vblank_get(&out->crtc);
423
424	spin_lock_irq(&out->lock);
425	old_enabled = out->composer_enabled;
426	out->composer_enabled = enabled;
427	spin_unlock_irq(&out->lock);
428
429	if (old_enabled)
430		drm_crtc_vblank_put(&out->crtc);
431}
432
433int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
434{
435	struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
436	bool enabled = false;
437	int ret = 0;
438
439	ret = vkms_crc_parse_source(src_name, &enabled);
440
441	vkms_set_composer(out, enabled);
442
443	return ret;
444}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0+
  2
  3#include <linux/crc32.h>
  4
  5#include <drm/drm_atomic.h>
  6#include <drm/drm_atomic_helper.h>
 
  7#include <drm/drm_fourcc.h>
 
  8#include <drm/drm_gem_framebuffer_helper.h>
  9#include <drm/drm_gem_shmem_helper.h>
 10#include <drm/drm_vblank.h>
 
 11
 12#include "vkms_drv.h"
 13
 14static u32 get_pixel_from_buffer(int x, int y, const u8 *buffer,
 15				 const struct vkms_composer *composer)
 16{
 17	u32 pixel;
 18	int src_offset = composer->offset + (y * composer->pitch)
 19				      + (x * composer->cpp);
 20
 21	pixel = *(u32 *)&buffer[src_offset];
 22
 23	return pixel;
 24}
 25
 26/**
 27 * compute_crc - Compute CRC value on output frame
 
 
 
 28 *
 29 * @vaddr: address to final framebuffer
 30 * @composer: framebuffer's metadata
 
 31 *
 32 * returns CRC value computed using crc32 on the visible portion of
 33 * the final framebuffer at vaddr_out
 
 
 34 */
 35static uint32_t compute_crc(const u8 *vaddr,
 36			    const struct vkms_composer *composer)
 
 37{
 38	int x, y;
 39	u32 crc = 0, pixel = 0;
 40	int x_src = composer->src.x1 >> 16;
 41	int y_src = composer->src.y1 >> 16;
 42	int h_src = drm_rect_height(&composer->src) >> 16;
 43	int w_src = drm_rect_width(&composer->src) >> 16;
 44
 45	for (y = y_src; y < y_src + h_src; ++y) {
 46		for (x = x_src; x < x_src + w_src; ++x) {
 47			pixel = get_pixel_from_buffer(x, y, vaddr, composer);
 48			crc = crc32_le(crc, (void *)&pixel, sizeof(u32));
 49		}
 50	}
 
 
 
 
 
 
 51
 52	return crc;
 
 
 
 
 
 
 
 53}
 54
 55static u8 blend_channel(u8 src, u8 dst, u8 alpha)
 56{
 57	u32 pre_blend;
 58	u8 new_color;
 
 
 
 
 
 59
 60	pre_blend = (src * 255 + dst * (255 - alpha));
 
 61
 62	/* Faster div by 255 */
 63	new_color = ((pre_blend + ((pre_blend + 257) >> 8)) >> 8);
 
 
 
 
 64
 65	return new_color;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 66}
 67
 68/**
 69 * alpha_blend - alpha blending equation
 70 * @argb_src: src pixel on premultiplied alpha mode
 71 * @argb_dst: dst pixel completely opaque
 72 *
 73 * blend pixels using premultiplied blend formula. The current DRM assumption
 74 * is that pixel color values have been already pre-multiplied with the alpha
 75 * channel values. See more drm_plane_create_blend_mode_property(). Also, this
 76 * formula assumes a completely opaque background.
 77 */
 78static void alpha_blend(const u8 *argb_src, u8 *argb_dst)
 
 
 
 
 
 
 
 
 79{
 80	u8 alpha;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 81
 82	alpha = argb_src[3];
 83	argb_dst[0] = blend_channel(argb_src[0], argb_dst[0], alpha);
 84	argb_dst[1] = blend_channel(argb_src[1], argb_dst[1], alpha);
 85	argb_dst[2] = blend_channel(argb_src[2], argb_dst[2], alpha);
 86}
 87
 88/**
 89 * x_blend - blending equation that ignores the pixel alpha
 90 *
 91 * overwrites RGB color value from src pixel to dst pixel.
 92 */
 93static void x_blend(const u8 *xrgb_src, u8 *xrgb_dst)
 94{
 95	memcpy(xrgb_dst, xrgb_src, sizeof(u8) * 3);
 
 
 
 
 
 
 
 
 
 
 
 
 96}
 97
 98/**
 99 * blend - blend value at vaddr_src with value at vaddr_dst
100 * @vaddr_dst: destination address
101 * @vaddr_src: source address
102 * @dst_composer: destination framebuffer's metadata
103 * @src_composer: source framebuffer's metadata
104 * @pixel_blend: blending equation based on plane format
 
105 *
106 * Blend the vaddr_src value with the vaddr_dst value using a pixel blend
107 * equation according to the supported plane formats DRM_FORMAT_(A/XRGB8888)
108 * and clearing alpha channel to an completely opaque background. This function
109 * uses buffer's metadata to locate the new composite values at vaddr_dst.
110 *
111 * TODO: completely clear the primary plane (a = 0xff) before starting to blend
112 * pixel color values
113 */
114static void blend(void *vaddr_dst, void *vaddr_src,
115		  struct vkms_composer *dst_composer,
116		  struct vkms_composer *src_composer,
117		  void (*pixel_blend)(const u8 *, u8 *))
118{
119	int i, j, j_dst, i_dst;
120	int offset_src, offset_dst;
121	u8 *pixel_dst, *pixel_src;
122
123	int x_src = src_composer->src.x1 >> 16;
124	int y_src = src_composer->src.y1 >> 16;
125
126	int x_dst = src_composer->dst.x1;
127	int y_dst = src_composer->dst.y1;
128	int h_dst = drm_rect_height(&src_composer->dst);
129	int w_dst = drm_rect_width(&src_composer->dst);
130
131	int y_limit = y_src + h_dst;
132	int x_limit = x_src + w_dst;
133
134	for (i = y_src, i_dst = y_dst; i < y_limit; ++i) {
135		for (j = x_src, j_dst = x_dst; j < x_limit; ++j) {
136			offset_dst = dst_composer->offset
137				     + (i_dst * dst_composer->pitch)
138				     + (j_dst++ * dst_composer->cpp);
139			offset_src = src_composer->offset
140				     + (i * src_composer->pitch)
141				     + (j * src_composer->cpp);
142
143			pixel_src = (u8 *)(vaddr_src + offset_src);
144			pixel_dst = (u8 *)(vaddr_dst + offset_dst);
145			pixel_blend(pixel_src, pixel_dst);
146			/* clearing alpha channel (0xff)*/
147			pixel_dst[3] = 0xff;
148		}
149		i_dst++;
 
 
 
 
 
 
150	}
151}
152
153static void compose_plane(struct vkms_composer *primary_composer,
154			  struct vkms_composer *plane_composer,
155			  void *vaddr_out)
156{
157	struct drm_gem_object *plane_obj;
158	struct drm_gem_shmem_object *plane_shmem_obj;
159	struct drm_framebuffer *fb = &plane_composer->fb;
160	void (*pixel_blend)(const u8 *p_src, u8 *p_dst);
 
161
162	plane_obj = drm_gem_fb_get_obj(&plane_composer->fb, 0);
163	plane_shmem_obj = to_drm_gem_shmem_obj(plane_obj);
164
165	if (WARN_ON(!plane_shmem_obj->vaddr))
166		return;
 
 
 
 
 
167
168	if (fb->format->format == DRM_FORMAT_ARGB8888)
169		pixel_blend = &alpha_blend;
170	else
171		pixel_blend = &x_blend;
172
173	blend(vaddr_out, plane_shmem_obj->vaddr, primary_composer,
174	      plane_composer, pixel_blend);
175}
176
177static int compose_active_planes(void **vaddr_out,
178				 struct vkms_composer *primary_composer,
179				 struct vkms_crtc_state *crtc_state)
180{
181	struct drm_framebuffer *fb = &primary_composer->fb;
182	struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
183	struct drm_gem_shmem_object *shmem_obj = to_drm_gem_shmem_obj(gem_obj);
184	int i;
185
186	if (!*vaddr_out) {
187		*vaddr_out = kzalloc(shmem_obj->base.size, GFP_KERNEL);
188		if (!*vaddr_out) {
189			DRM_ERROR("Cannot allocate memory for output frame.");
190			return -ENOMEM;
191		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192	}
193
194	if (WARN_ON(!shmem_obj->vaddr))
195		return -EINVAL;
 
 
 
 
196
197	memcpy(*vaddr_out, shmem_obj->vaddr, shmem_obj->base.size);
 
198
199	/* If there are other planes besides primary, we consider the active
200	 * planes should be in z-order and compose them associatively:
201	 * ((primary <- overlay) <- cursor)
202	 */
203	for (i = 1; i < crtc_state->num_active_planes; i++)
204		compose_plane(primary_composer,
205			      crtc_state->active_planes[i]->composer,
206			      *vaddr_out);
207
208	return 0;
209}
210
211/**
212 * vkms_composer_worker - ordered work_struct to compute CRC
213 *
214 * @work: work_struct
215 *
216 * Work handler for composing and computing CRCs. work_struct scheduled in
217 * an ordered workqueue that's periodically scheduled to run by
218 * _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state().
219 */
220void vkms_composer_worker(struct work_struct *work)
221{
222	struct vkms_crtc_state *crtc_state = container_of(work,
223						struct vkms_crtc_state,
224						composer_work);
225	struct drm_crtc *crtc = crtc_state->base.crtc;
 
226	struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
227	struct vkms_composer *primary_composer = NULL;
228	struct vkms_plane_state *act_plane = NULL;
229	bool crc_pending, wb_pending;
230	void *vaddr_out = NULL;
231	u32 crc32 = 0;
232	u64 frame_start, frame_end;
233	int ret;
234
235	spin_lock_irq(&out->composer_lock);
236	frame_start = crtc_state->frame_start;
237	frame_end = crtc_state->frame_end;
238	crc_pending = crtc_state->crc_pending;
239	wb_pending = crtc_state->wb_pending;
240	crtc_state->frame_start = 0;
241	crtc_state->frame_end = 0;
242	crtc_state->crc_pending = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243	spin_unlock_irq(&out->composer_lock);
244
245	/*
246	 * We raced with the vblank hrtimer and previous work already computed
247	 * the crc, nothing to do.
248	 */
249	if (!crc_pending)
250		return;
251
252	if (crtc_state->num_active_planes >= 1) {
253		act_plane = crtc_state->active_planes[0];
254		if (act_plane->base.plane->type == DRM_PLANE_TYPE_PRIMARY)
255			primary_composer = act_plane->composer;
256	}
257
258	if (!primary_composer)
259		return;
260
261	if (wb_pending)
262		vaddr_out = crtc_state->active_writeback;
 
 
263
264	ret = compose_active_planes(&vaddr_out, primary_composer,
265				    crtc_state);
266	if (ret) {
267		if (ret == -EINVAL && !wb_pending)
268			kfree(vaddr_out);
269		return;
270	}
271
272	crc32 = compute_crc(vaddr_out, primary_composer);
273
274	if (wb_pending) {
275		drm_writeback_signal_completion(&out->wb_connector, 0);
276		spin_lock_irq(&out->composer_lock);
277		crtc_state->wb_pending = false;
278		spin_unlock_irq(&out->composer_lock);
279	} else {
280		kfree(vaddr_out);
281	}
282
283	/*
284	 * The worker can fall behind the vblank hrtimer, make sure we catch up.
285	 */
286	while (frame_start <= frame_end)
287		drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
288}
289
290static const char * const pipe_crc_sources[] = {"auto"};
291
292const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
293					size_t *count)
294{
295	*count = ARRAY_SIZE(pipe_crc_sources);
296	return pipe_crc_sources;
297}
298
299static int vkms_crc_parse_source(const char *src_name, bool *enabled)
300{
301	int ret = 0;
302
303	if (!src_name) {
304		*enabled = false;
305	} else if (strcmp(src_name, "auto") == 0) {
306		*enabled = true;
307	} else {
308		*enabled = false;
309		ret = -EINVAL;
310	}
311
312	return ret;
313}
314
315int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
316			   size_t *values_cnt)
317{
318	bool enabled;
319
320	if (vkms_crc_parse_source(src_name, &enabled) < 0) {
321		DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
322		return -EINVAL;
323	}
324
325	*values_cnt = 1;
326
327	return 0;
328}
329
330void vkms_set_composer(struct vkms_output *out, bool enabled)
331{
332	bool old_enabled;
333
334	if (enabled)
335		drm_crtc_vblank_get(&out->crtc);
336
337	spin_lock_irq(&out->lock);
338	old_enabled = out->composer_enabled;
339	out->composer_enabled = enabled;
340	spin_unlock_irq(&out->lock);
341
342	if (old_enabled)
343		drm_crtc_vblank_put(&out->crtc);
344}
345
346int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
347{
348	struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
349	bool enabled = false;
350	int ret = 0;
351
352	ret = vkms_crc_parse_source(src_name, &enabled);
353
354	vkms_set_composer(out, enabled);
355
356	return ret;
357}