Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright 2020 Noralf Trønnes
  4 */
  5
  6#include <linux/lz4.h>
  7#include <linux/usb.h>
  8#include <linux/workqueue.h>
  9
 10#include <drm/drm_atomic.h>
 11#include <drm/drm_connector.h>
 12#include <drm/drm_damage_helper.h>
 13#include <drm/drm_drv.h>
 14#include <drm/drm_format_helper.h>
 15#include <drm/drm_fourcc.h>
 16#include <drm/drm_framebuffer.h>
 17#include <drm/drm_gem.h>
 18#include <drm/drm_gem_framebuffer_helper.h>
 19#include <drm/drm_print.h>
 20#include <drm/drm_rect.h>
 21#include <drm/drm_simple_kms_helper.h>
 22#include <drm/gud.h>
 23
 24#include "gud_internal.h"
 25
 26/*
 27 * Some userspace rendering loops runs all displays in the same loop.
 28 * This means that a fast display will have to wait for a slow one.
 29 * For this reason gud does flushing asynchronous by default.
 30 * The down side is that in e.g. a single display setup userspace thinks
 31 * the display is insanely fast since the driver reports back immediately
 32 * that the flush/pageflip is done. This wastes CPU and power.
 33 * Such users might want to set this module parameter to false.
 34 */
 35static bool gud_async_flush = true;
 36module_param_named(async_flush, gud_async_flush, bool, 0644);
 37MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=true]");
 38
 39/*
 40 * FIXME: The driver is probably broken on Big Endian machines.
 41 * See discussion:
 42 * https://lore.kernel.org/dri-devel/CAKb7UvihLX0hgBOP3VBG7O+atwZcUVCPVuBdfmDMpg0NjXe-cQ@mail.gmail.com/
 43 */
 44
 45static bool gud_is_big_endian(void)
 46{
 47#if defined(__BIG_ENDIAN)
 48	return true;
 49#else
 50	return false;
 51#endif
 52}
 53
 54static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format,
 55				   void *src, struct drm_framebuffer *fb,
 56				   struct drm_rect *rect)
 57{
 58	unsigned int block_width = drm_format_info_block_width(format, 0);
 59	unsigned int bits_per_pixel = 8 / block_width;
 60	unsigned int x, y, width, height;
 61	u8 pix, *pix8, *block = dst; /* Assign to silence compiler warning */
 62	struct iosys_map dst_map, vmap;
 63	size_t len;
 64	void *buf;
 65
 66	WARN_ON_ONCE(format->char_per_block[0] != 1);
 67
 68	/* Start on a byte boundary */
 69	rect->x1 = ALIGN_DOWN(rect->x1, block_width);
 70	width = drm_rect_width(rect);
 71	height = drm_rect_height(rect);
 72	len = drm_format_info_min_pitch(format, 0, width) * height;
 73
 74	buf = kmalloc(width * height, GFP_KERNEL);
 75	if (!buf)
 76		return 0;
 77
 78	iosys_map_set_vaddr(&dst_map, buf);
 79	iosys_map_set_vaddr(&vmap, src);
 80	drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect);
 81	pix8 = buf;
 82
 83	for (y = 0; y < height; y++) {
 84		for (x = 0; x < width; x++) {
 85			unsigned int pixpos = x % block_width; /* within byte from the left */
 86			unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel;
 87
 88			if (!pixpos) {
 89				block = dst++;
 90				*block = 0;
 91			}
 92
 93			pix = (*pix8++) >> (8 - bits_per_pixel);
 94			*block |= pix << pixshift;
 95		}
 96	}
 97
 98	kfree(buf);
 99
100	return len;
101}
102
103static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *format,
104				    void *src, struct drm_framebuffer *fb,
105				    struct drm_rect *rect)
106{
107	unsigned int block_width = drm_format_info_block_width(format, 0);
108	unsigned int bits_per_pixel = 8 / block_width;
109	u8 r, g, b, pix, *block = dst; /* Assign to silence compiler warning */
110	unsigned int x, y, width;
111	__le32 *sbuf32;
112	u32 pix32;
113	size_t len;
114
115	/* Start on a byte boundary */
116	rect->x1 = ALIGN_DOWN(rect->x1, block_width);
117	width = drm_rect_width(rect);
118	len = drm_format_info_min_pitch(format, 0, width) * drm_rect_height(rect);
119
120	for (y = rect->y1; y < rect->y2; y++) {
121		sbuf32 = src + (y * fb->pitches[0]);
122		sbuf32 += rect->x1;
123
124		for (x = 0; x < width; x++) {
125			unsigned int pixpos = x % block_width; /* within byte from the left */
126			unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel;
127
128			if (!pixpos) {
129				block = dst++;
130				*block = 0;
131			}
132
133			pix32 = le32_to_cpu(*sbuf32++);
134			r = pix32 >> 16;
135			g = pix32 >> 8;
136			b = pix32;
137
138			switch (format->format) {
139			case GUD_DRM_FORMAT_XRGB1111:
140				pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7);
141				break;
142			default:
143				WARN_ON_ONCE(1);
144				return len;
145			}
146
147			*block |= pix << pixshift;
148		}
149	}
150
151	return len;
152}
153
154static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
155			  const struct drm_format_info *format, struct drm_rect *rect,
156			  struct gud_set_buffer_req *req)
157{
158	struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
159	u8 compression = gdrm->compression;
160	struct iosys_map map[DRM_FORMAT_MAX_PLANES];
161	struct iosys_map map_data[DRM_FORMAT_MAX_PLANES];
162	struct iosys_map dst;
163	void *vaddr, *buf;
164	size_t pitch, len;
165	int ret = 0;
166
167	pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(rect));
168	len = pitch * drm_rect_height(rect);
169	if (len > gdrm->bulk_len)
170		return -E2BIG;
171
172	ret = drm_gem_fb_vmap(fb, map, map_data);
173	if (ret)
174		return ret;
175
176	vaddr = map_data[0].vaddr;
177
178	ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
179	if (ret)
180		goto vunmap;
181retry:
182	if (compression)
183		buf = gdrm->compress_buf;
184	else
185		buf = gdrm->bulk_buf;
186	iosys_map_set_vaddr(&dst, buf);
187
188	/*
189	 * Imported buffers are assumed to be write-combined and thus uncached
190	 * with slow reads (at least on ARM).
191	 */
192	if (format != fb->format) {
193		if (format->format == GUD_DRM_FORMAT_R1) {
194			len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect);
195			if (!len) {
196				ret = -ENOMEM;
197				goto end_cpu_access;
198			}
199		} else if (format->format == DRM_FORMAT_R8) {
200			drm_fb_xrgb8888_to_gray8(&dst, NULL, map_data, fb, rect);
201		} else if (format->format == DRM_FORMAT_RGB332) {
202			drm_fb_xrgb8888_to_rgb332(&dst, NULL, map_data, fb, rect);
203		} else if (format->format == DRM_FORMAT_RGB565) {
204			drm_fb_xrgb8888_to_rgb565(&dst, NULL, map_data, fb, rect,
205						  gud_is_big_endian());
206		} else if (format->format == DRM_FORMAT_RGB888) {
207			drm_fb_xrgb8888_to_rgb888(&dst, NULL, map_data, fb, rect);
208		} else {
209			len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
210		}
211	} else if (gud_is_big_endian() && format->cpp[0] > 1) {
212		drm_fb_swab(&dst, NULL, map_data, fb, rect, !import_attach);
213	} else if (compression && !import_attach && pitch == fb->pitches[0]) {
214		/* can compress directly from the framebuffer */
215		buf = vaddr + rect->y1 * pitch;
216	} else {
217		drm_fb_memcpy(&dst, NULL, map_data, fb, rect);
218	}
219
220	memset(req, 0, sizeof(*req));
221	req->x = cpu_to_le32(rect->x1);
222	req->y = cpu_to_le32(rect->y1);
223	req->width = cpu_to_le32(drm_rect_width(rect));
224	req->height = cpu_to_le32(drm_rect_height(rect));
225	req->length = cpu_to_le32(len);
226
227	if (compression & GUD_COMPRESSION_LZ4) {
228		int complen;
229
230		complen = LZ4_compress_default(buf, gdrm->bulk_buf, len, len, gdrm->lz4_comp_mem);
231		if (complen <= 0) {
232			compression = 0;
233			goto retry;
234		}
235
236		req->compression = GUD_COMPRESSION_LZ4;
237		req->compressed_length = cpu_to_le32(complen);
238	}
239
240end_cpu_access:
241	drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
242vunmap:
243	drm_gem_fb_vunmap(fb, map);
244
245	return ret;
246}
247
248struct gud_usb_bulk_context {
249	struct timer_list timer;
250	struct usb_sg_request sgr;
251};
252
253static void gud_usb_bulk_timeout(struct timer_list *t)
254{
255	struct gud_usb_bulk_context *ctx = from_timer(ctx, t, timer);
256
257	usb_sg_cancel(&ctx->sgr);
258}
259
260static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
261{
262	struct gud_usb_bulk_context ctx;
263	int ret;
264
265	ret = usb_sg_init(&ctx.sgr, gud_to_usb_device(gdrm), gdrm->bulk_pipe, 0,
266			  gdrm->bulk_sgt.sgl, gdrm->bulk_sgt.nents, len, GFP_KERNEL);
267	if (ret)
268		return ret;
269
270	timer_setup_on_stack(&ctx.timer, gud_usb_bulk_timeout, 0);
271	mod_timer(&ctx.timer, jiffies + msecs_to_jiffies(3000));
272
273	usb_sg_wait(&ctx.sgr);
274
275	if (!del_timer_sync(&ctx.timer))
276		ret = -ETIMEDOUT;
277	else if (ctx.sgr.status < 0)
278		ret = ctx.sgr.status;
279	else if (ctx.sgr.bytes != len)
280		ret = -EIO;
281
282	destroy_timer_on_stack(&ctx.timer);
283
284	return ret;
285}
286
287static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
288			  const struct drm_format_info *format, struct drm_rect *rect)
289{
290	struct gud_set_buffer_req req;
291	size_t len, trlen;
292	int ret;
293
294	drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
295
296	ret = gud_prep_flush(gdrm, fb, format, rect, &req);
297	if (ret)
298		return ret;
299
300	len = le32_to_cpu(req.length);
301
302	if (req.compression)
303		trlen = le32_to_cpu(req.compressed_length);
304	else
305		trlen = len;
306
307	gdrm->stats_length += len;
308	/* Did it wrap around? */
309	if (gdrm->stats_length <= len && gdrm->stats_actual_length) {
310		gdrm->stats_length = len;
311		gdrm->stats_actual_length = 0;
312	}
313	gdrm->stats_actual_length += trlen;
314
315	if (!(gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE) || gdrm->prev_flush_failed) {
316		ret = gud_usb_set(gdrm, GUD_REQ_SET_BUFFER, 0, &req, sizeof(req));
317		if (ret)
318			return ret;
319	}
320
321	ret = gud_usb_bulk(gdrm, trlen);
322	if (ret)
323		gdrm->stats_num_errors++;
324
325	return ret;
326}
327
328void gud_clear_damage(struct gud_device *gdrm)
329{
330	gdrm->damage.x1 = INT_MAX;
331	gdrm->damage.y1 = INT_MAX;
332	gdrm->damage.x2 = 0;
333	gdrm->damage.y2 = 0;
334}
335
336static void gud_add_damage(struct gud_device *gdrm, struct drm_rect *damage)
337{
338	gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
339	gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
340	gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
341	gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
342}
343
344static void gud_retry_failed_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
345				   struct drm_rect *damage)
346{
347	/*
348	 * pipe_update waits for the worker when the display mode is going to change.
349	 * This ensures that the width and height is still the same making it safe to
350	 * add back the damage.
351	 */
352
353	mutex_lock(&gdrm->damage_lock);
354	if (!gdrm->fb) {
355		drm_framebuffer_get(fb);
356		gdrm->fb = fb;
357	}
358	gud_add_damage(gdrm, damage);
359	mutex_unlock(&gdrm->damage_lock);
360
361	/* Retry only once to avoid a possible storm in case of continues errors. */
362	if (!gdrm->prev_flush_failed)
363		queue_work(system_long_wq, &gdrm->work);
364	gdrm->prev_flush_failed = true;
365}
366
367void gud_flush_work(struct work_struct *work)
368{
369	struct gud_device *gdrm = container_of(work, struct gud_device, work);
370	const struct drm_format_info *format;
371	struct drm_framebuffer *fb;
372	struct drm_rect damage;
373	unsigned int i, lines;
374	int idx, ret = 0;
375	size_t pitch;
376
377	if (!drm_dev_enter(&gdrm->drm, &idx))
378		return;
379
380	mutex_lock(&gdrm->damage_lock);
381	fb = gdrm->fb;
382	gdrm->fb = NULL;
383	damage = gdrm->damage;
384	gud_clear_damage(gdrm);
385	mutex_unlock(&gdrm->damage_lock);
386
387	if (!fb)
388		goto out;
389
390	format = fb->format;
391	if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
392		format = gdrm->xrgb8888_emulation_format;
393
394	/* Split update if it's too big */
395	pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(&damage));
396	lines = drm_rect_height(&damage);
397
398	if (gdrm->bulk_len < lines * pitch)
399		lines = gdrm->bulk_len / pitch;
400
401	for (i = 0; i < DIV_ROUND_UP(drm_rect_height(&damage), lines); i++) {
402		struct drm_rect rect = damage;
403
404		rect.y1 += i * lines;
405		rect.y2 = min_t(u32, rect.y1 + lines, damage.y2);
406
407		ret = gud_flush_rect(gdrm, fb, format, &rect);
408		if (ret) {
409			if (ret != -ENODEV && ret != -ECONNRESET &&
410			    ret != -ESHUTDOWN && ret != -EPROTO) {
411				bool prev_flush_failed = gdrm->prev_flush_failed;
412
413				gud_retry_failed_flush(gdrm, fb, &damage);
414				if (!prev_flush_failed)
415					dev_err_ratelimited(fb->dev->dev,
416							    "Failed to flush framebuffer: error=%d\n", ret);
417			}
418			break;
419		}
420
421		gdrm->prev_flush_failed = false;
422	}
423
424	drm_framebuffer_put(fb);
425out:
426	drm_dev_exit(idx);
427}
428
429static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
430				struct drm_rect *damage)
431{
432	struct drm_framebuffer *old_fb = NULL;
433
434	mutex_lock(&gdrm->damage_lock);
435
436	if (fb != gdrm->fb) {
437		old_fb = gdrm->fb;
438		drm_framebuffer_get(fb);
439		gdrm->fb = fb;
440	}
441
442	gud_add_damage(gdrm, damage);
443
444	mutex_unlock(&gdrm->damage_lock);
445
446	queue_work(system_long_wq, &gdrm->work);
447
448	if (old_fb)
449		drm_framebuffer_put(old_fb);
450}
451
452int gud_pipe_check(struct drm_simple_display_pipe *pipe,
453		   struct drm_plane_state *new_plane_state,
454		   struct drm_crtc_state *new_crtc_state)
455{
456	struct gud_device *gdrm = to_gud_device(pipe->crtc.dev);
457	struct drm_plane_state *old_plane_state = pipe->plane.state;
458	const struct drm_display_mode *mode = &new_crtc_state->mode;
459	struct drm_atomic_state *state = new_plane_state->state;
460	struct drm_framebuffer *old_fb = old_plane_state->fb;
461	struct drm_connector_state *connector_state = NULL;
462	struct drm_framebuffer *fb = new_plane_state->fb;
463	const struct drm_format_info *format = fb->format;
464	struct drm_connector *connector;
465	unsigned int i, num_properties;
466	struct gud_state_req *req;
467	int idx, ret;
468	size_t len;
469
470	if (WARN_ON_ONCE(!fb))
471		return -EINVAL;
472
473	if (old_plane_state->rotation != new_plane_state->rotation)
474		new_crtc_state->mode_changed = true;
475
476	if (old_fb && old_fb->format != format)
477		new_crtc_state->mode_changed = true;
478
479	if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
480		return 0;
481
482	/* Only one connector is supported */
483	if (hweight32(new_crtc_state->connector_mask) != 1)
484		return -EINVAL;
485
486	if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
487		format = gdrm->xrgb8888_emulation_format;
488
489	for_each_new_connector_in_state(state, connector, connector_state, i) {
490		if (connector_state->crtc)
491			break;
492	}
493
494	/*
495	 * DRM_IOCTL_MODE_OBJ_SETPROPERTY on the rotation property will not have
496	 * the connector included in the state.
497	 */
498	if (!connector_state) {
499		struct drm_connector_list_iter conn_iter;
500
501		drm_connector_list_iter_begin(pipe->crtc.dev, &conn_iter);
502		drm_for_each_connector_iter(connector, &conn_iter) {
503			if (connector->state->crtc) {
504				connector_state = connector->state;
505				break;
506			}
507		}
508		drm_connector_list_iter_end(&conn_iter);
509	}
510
511	if (WARN_ON_ONCE(!connector_state))
512		return -ENOENT;
513
514	len = struct_size(req, properties,
515			  GUD_PROPERTIES_MAX_NUM + GUD_CONNECTOR_PROPERTIES_MAX_NUM);
516	req = kzalloc(len, GFP_KERNEL);
517	if (!req)
518		return -ENOMEM;
519
520	gud_from_display_mode(&req->mode, mode);
521
522	req->format = gud_from_fourcc(format->format);
523	if (WARN_ON_ONCE(!req->format)) {
524		ret = -EINVAL;
525		goto out;
526	}
527
528	req->connector = drm_connector_index(connector_state->connector);
529
530	ret = gud_connector_fill_properties(connector_state, req->properties);
531	if (ret < 0)
532		goto out;
533
534	num_properties = ret;
535	for (i = 0; i < gdrm->num_properties; i++) {
536		u16 prop = gdrm->properties[i];
537		u64 val;
538
539		switch (prop) {
540		case GUD_PROPERTY_ROTATION:
541			/* DRM UAPI matches the protocol so use value directly */
542			val = new_plane_state->rotation;
543			break;
544		default:
545			WARN_ON_ONCE(1);
546			ret = -EINVAL;
547			goto out;
548		}
549
550		req->properties[num_properties + i].prop = cpu_to_le16(prop);
551		req->properties[num_properties + i].val = cpu_to_le64(val);
552		num_properties++;
553	}
554
555	if (drm_dev_enter(fb->dev, &idx)) {
556		len = struct_size(req, properties, num_properties);
557		ret = gud_usb_set(gdrm, GUD_REQ_SET_STATE_CHECK, 0, req, len);
558		drm_dev_exit(idx);
559	}  else {
560		ret = -ENODEV;
561	}
562out:
563	kfree(req);
564
565	return ret;
566}
567
568void gud_pipe_update(struct drm_simple_display_pipe *pipe,
569		     struct drm_plane_state *old_state)
570{
571	struct drm_device *drm = pipe->crtc.dev;
572	struct gud_device *gdrm = to_gud_device(drm);
573	struct drm_plane_state *state = pipe->plane.state;
574	struct drm_framebuffer *fb = state->fb;
575	struct drm_crtc *crtc = &pipe->crtc;
576	struct drm_rect damage;
577	int idx;
578
579	if (crtc->state->mode_changed || !crtc->state->enable) {
580		cancel_work_sync(&gdrm->work);
581		mutex_lock(&gdrm->damage_lock);
582		if (gdrm->fb) {
583			drm_framebuffer_put(gdrm->fb);
584			gdrm->fb = NULL;
585		}
586		gud_clear_damage(gdrm);
587		mutex_unlock(&gdrm->damage_lock);
588	}
589
590	if (!drm_dev_enter(drm, &idx))
591		return;
592
593	if (!old_state->fb)
594		gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1);
595
596	if (fb && (crtc->state->mode_changed || crtc->state->connectors_changed))
597		gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0);
598
599	if (crtc->state->active_changed)
600		gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active);
601
602	if (drm_atomic_helper_damage_merged(old_state, state, &damage)) {
603		if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE)
604			drm_rect_init(&damage, 0, 0, fb->width, fb->height);
605		gud_fb_queue_damage(gdrm, fb, &damage);
606		if (!gud_async_flush)
607			flush_work(&gdrm->work);
608	}
609
610	if (!crtc->state->enable)
611		gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0);
612
613	drm_dev_exit(idx);
614}