Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2015 Broadcom
  4 */
  5
  6/**
  7 * DOC: VC4 KMS
  8 *
  9 * This is the general code for implementing KMS mode setting that
 10 * doesn't clearly associate with any of the other objects (plane,
 11 * crtc, HDMI encoder).
 12 */
 13
 14#include <drm/drm_atomic.h>
 15#include <drm/drm_atomic_helper.h>
 16#include <drm/drm_crtc.h>
 17#include <drm/drm_gem_framebuffer_helper.h>
 18#include <drm/drm_plane_helper.h>
 19#include <drm/drm_probe_helper.h>
 20#include <drm/drm_vblank.h>
 21
 22#include "vc4_drv.h"
 23#include "vc4_regs.h"
 24
 25struct vc4_ctm_state {
 26	struct drm_private_state base;
 27	struct drm_color_ctm *ctm;
 28	int fifo;
 29};
 30
 31static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
 32{
 33	return container_of(priv, struct vc4_ctm_state, base);
 34}
 35
 36struct vc4_load_tracker_state {
 37	struct drm_private_state base;
 38	u64 hvs_load;
 39	u64 membus_load;
 40};
 41
 42static struct vc4_load_tracker_state *
 43to_vc4_load_tracker_state(struct drm_private_state *priv)
 44{
 45	return container_of(priv, struct vc4_load_tracker_state, base);
 46}
 47
 48static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
 49					       struct drm_private_obj *manager)
 50{
 51	struct drm_device *dev = state->dev;
 52	struct vc4_dev *vc4 = dev->dev_private;
 53	struct drm_private_state *priv_state;
 54	int ret;
 55
 56	ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
 57	if (ret)
 58		return ERR_PTR(ret);
 59
 60	priv_state = drm_atomic_get_private_obj_state(state, manager);
 61	if (IS_ERR(priv_state))
 62		return ERR_CAST(priv_state);
 63
 64	return to_vc4_ctm_state(priv_state);
 65}
 66
 67static struct drm_private_state *
 68vc4_ctm_duplicate_state(struct drm_private_obj *obj)
 69{
 70	struct vc4_ctm_state *state;
 71
 72	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
 73	if (!state)
 74		return NULL;
 75
 76	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
 77
 78	return &state->base;
 79}
 80
 81static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
 82				  struct drm_private_state *state)
 83{
 84	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
 85
 86	kfree(ctm_state);
 87}
 88
 89static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
 90	.atomic_duplicate_state = vc4_ctm_duplicate_state,
 91	.atomic_destroy_state = vc4_ctm_destroy_state,
 92};
 93
 94/* Converts a DRM S31.32 value to the HW S0.9 format. */
 95static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
 96{
 97	u16 r;
 98
 99	/* Sign bit. */
100	r = in & BIT_ULL(63) ? BIT(9) : 0;
101
102	if ((in & GENMASK_ULL(62, 32)) > 0) {
103		/* We have zero integer bits so we can only saturate here. */
104		r |= GENMASK(8, 0);
105	} else {
106		/* Otherwise take the 9 most important fractional bits. */
107		r |= (in >> 23) & GENMASK(8, 0);
108	}
109
110	return r;
111}
112
113static void
114vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
115{
116	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
117	struct drm_color_ctm *ctm = ctm_state->ctm;
118
119	if (ctm_state->fifo) {
120		HVS_WRITE(SCALER_OLEDCOEF2,
121			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
122					SCALER_OLEDCOEF2_R_TO_R) |
123			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
124					SCALER_OLEDCOEF2_R_TO_G) |
125			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
126					SCALER_OLEDCOEF2_R_TO_B));
127		HVS_WRITE(SCALER_OLEDCOEF1,
128			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
129					SCALER_OLEDCOEF1_G_TO_R) |
130			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
131					SCALER_OLEDCOEF1_G_TO_G) |
132			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
133					SCALER_OLEDCOEF1_G_TO_B));
134		HVS_WRITE(SCALER_OLEDCOEF0,
135			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
136					SCALER_OLEDCOEF0_B_TO_R) |
137			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
138					SCALER_OLEDCOEF0_B_TO_G) |
139			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
140					SCALER_OLEDCOEF0_B_TO_B));
141	}
142
143	HVS_WRITE(SCALER_OLEDOFFS,
144		  VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
145}
146
147static void
148vc4_atomic_complete_commit(struct drm_atomic_state *state)
149{
150	struct drm_device *dev = state->dev;
151	struct vc4_dev *vc4 = to_vc4_dev(dev);
152	struct vc4_crtc *vc4_crtc;
153	int i;
154
155	for (i = 0; i < dev->mode_config.num_crtc; i++) {
156		if (!state->crtcs[i].ptr || !state->crtcs[i].commit)
157			continue;
158
159		vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr);
160		vc4_hvs_mask_underrun(dev, vc4_crtc->channel);
161	}
162
163	drm_atomic_helper_wait_for_fences(dev, state, false);
164
165	drm_atomic_helper_wait_for_dependencies(state);
166
167	drm_atomic_helper_commit_modeset_disables(dev, state);
168
169	vc4_ctm_commit(vc4, state);
170
171	drm_atomic_helper_commit_planes(dev, state, 0);
172
173	drm_atomic_helper_commit_modeset_enables(dev, state);
174
175	drm_atomic_helper_fake_vblank(state);
176
177	drm_atomic_helper_commit_hw_done(state);
178
179	drm_atomic_helper_wait_for_flip_done(dev, state);
180
181	drm_atomic_helper_cleanup_planes(dev, state);
182
183	drm_atomic_helper_commit_cleanup_done(state);
184
185	drm_atomic_state_put(state);
186
187	up(&vc4->async_modeset);
188}
189
190static void commit_work(struct work_struct *work)
191{
192	struct drm_atomic_state *state = container_of(work,
193						      struct drm_atomic_state,
194						      commit_work);
195	vc4_atomic_complete_commit(state);
196}
197
198/**
199 * vc4_atomic_commit - commit validated state object
200 * @dev: DRM device
201 * @state: the driver state object
202 * @nonblock: nonblocking commit
203 *
204 * This function commits a with drm_atomic_helper_check() pre-validated state
205 * object. This can still fail when e.g. the framebuffer reservation fails. For
206 * now this doesn't implement asynchronous commits.
207 *
208 * RETURNS
209 * Zero for success or -errno.
210 */
211static int vc4_atomic_commit(struct drm_device *dev,
212			     struct drm_atomic_state *state,
213			     bool nonblock)
214{
215	struct vc4_dev *vc4 = to_vc4_dev(dev);
216	int ret;
217
218	if (state->async_update) {
219		ret = down_interruptible(&vc4->async_modeset);
220		if (ret)
221			return ret;
222
223		ret = drm_atomic_helper_prepare_planes(dev, state);
224		if (ret) {
225			up(&vc4->async_modeset);
226			return ret;
227		}
228
229		drm_atomic_helper_async_commit(dev, state);
230
231		drm_atomic_helper_cleanup_planes(dev, state);
232
233		up(&vc4->async_modeset);
234
235		return 0;
236	}
237
238	/* We know for sure we don't want an async update here. Set
239	 * state->legacy_cursor_update to false to prevent
240	 * drm_atomic_helper_setup_commit() from auto-completing
241	 * commit->flip_done.
242	 */
243	state->legacy_cursor_update = false;
244	ret = drm_atomic_helper_setup_commit(state, nonblock);
245	if (ret)
246		return ret;
247
248	INIT_WORK(&state->commit_work, commit_work);
249
250	ret = down_interruptible(&vc4->async_modeset);
251	if (ret)
252		return ret;
253
254	ret = drm_atomic_helper_prepare_planes(dev, state);
255	if (ret) {
256		up(&vc4->async_modeset);
257		return ret;
258	}
259
260	if (!nonblock) {
261		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
262		if (ret) {
263			drm_atomic_helper_cleanup_planes(dev, state);
264			up(&vc4->async_modeset);
265			return ret;
266		}
267	}
268
269	/*
270	 * This is the point of no return - everything below never fails except
271	 * when the hw goes bonghits. Which means we can commit the new state on
272	 * the software side now.
273	 */
274
275	BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
276
277	/*
278	 * Everything below can be run asynchronously without the need to grab
279	 * any modeset locks at all under one condition: It must be guaranteed
280	 * that the asynchronous work has either been cancelled (if the driver
281	 * supports it, which at least requires that the framebuffers get
282	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
283	 * before the new state gets committed on the software side with
284	 * drm_atomic_helper_swap_state().
285	 *
286	 * This scheme allows new atomic state updates to be prepared and
287	 * checked in parallel to the asynchronous completion of the previous
288	 * update. Which is important since compositors need to figure out the
289	 * composition of the next frame right after having submitted the
290	 * current layout.
291	 */
292
293	drm_atomic_state_get(state);
294	if (nonblock)
295		queue_work(system_unbound_wq, &state->commit_work);
296	else
297		vc4_atomic_complete_commit(state);
298
299	return 0;
300}
301
302static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
303					     struct drm_file *file_priv,
304					     const struct drm_mode_fb_cmd2 *mode_cmd)
305{
306	struct drm_mode_fb_cmd2 mode_cmd_local;
307
308	/* If the user didn't specify a modifier, use the
309	 * vc4_set_tiling_ioctl() state for the BO.
310	 */
311	if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
312		struct drm_gem_object *gem_obj;
313		struct vc4_bo *bo;
314
315		gem_obj = drm_gem_object_lookup(file_priv,
316						mode_cmd->handles[0]);
317		if (!gem_obj) {
318			DRM_DEBUG("Failed to look up GEM BO %d\n",
319				  mode_cmd->handles[0]);
320			return ERR_PTR(-ENOENT);
321		}
322		bo = to_vc4_bo(gem_obj);
323
324		mode_cmd_local = *mode_cmd;
325
326		if (bo->t_format) {
327			mode_cmd_local.modifier[0] =
328				DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
329		} else {
330			mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
331		}
332
333		drm_gem_object_put_unlocked(gem_obj);
334
335		mode_cmd = &mode_cmd_local;
336	}
337
338	return drm_gem_fb_create(dev, file_priv, mode_cmd);
339}
340
341/* Our CTM has some peculiar limitations: we can only enable it for one CRTC
342 * at a time and the HW only supports S0.9 scalars. To account for the latter,
343 * we don't allow userland to set a CTM that we have no hope of approximating.
344 */
345static int
346vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
347{
348	struct vc4_dev *vc4 = to_vc4_dev(dev);
349	struct vc4_ctm_state *ctm_state = NULL;
350	struct drm_crtc *crtc;
351	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
352	struct drm_color_ctm *ctm;
353	int i;
354
355	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
356		/* CTM is being disabled. */
357		if (!new_crtc_state->ctm && old_crtc_state->ctm) {
358			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
359			if (IS_ERR(ctm_state))
360				return PTR_ERR(ctm_state);
361			ctm_state->fifo = 0;
362		}
363	}
364
365	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
366		if (new_crtc_state->ctm == old_crtc_state->ctm)
367			continue;
368
369		if (!ctm_state) {
370			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
371			if (IS_ERR(ctm_state))
372				return PTR_ERR(ctm_state);
373		}
374
375		/* CTM is being enabled or the matrix changed. */
376		if (new_crtc_state->ctm) {
377			/* fifo is 1-based since 0 disables CTM. */
378			int fifo = to_vc4_crtc(crtc)->channel + 1;
379
380			/* Check userland isn't trying to turn on CTM for more
381			 * than one CRTC at a time.
382			 */
383			if (ctm_state->fifo && ctm_state->fifo != fifo) {
384				DRM_DEBUG_DRIVER("Too many CTM configured\n");
385				return -EINVAL;
386			}
387
388			/* Check we can approximate the specified CTM.
389			 * We disallow scalars |c| > 1.0 since the HW has
390			 * no integer bits.
391			 */
392			ctm = new_crtc_state->ctm->data;
393			for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
394				u64 val = ctm->matrix[i];
395
396				val &= ~BIT_ULL(63);
397				if (val > BIT_ULL(32))
398					return -EINVAL;
399			}
400
401			ctm_state->fifo = fifo;
402			ctm_state->ctm = ctm;
403		}
404	}
405
406	return 0;
407}
408
409static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
410{
411	struct drm_plane_state *old_plane_state, *new_plane_state;
412	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
413	struct vc4_load_tracker_state *load_state;
414	struct drm_private_state *priv_state;
415	struct drm_plane *plane;
416	int i;
417
418	priv_state = drm_atomic_get_private_obj_state(state,
419						      &vc4->load_tracker);
420	if (IS_ERR(priv_state))
421		return PTR_ERR(priv_state);
422
423	load_state = to_vc4_load_tracker_state(priv_state);
424	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
425				       new_plane_state, i) {
426		struct vc4_plane_state *vc4_plane_state;
427
428		if (old_plane_state->fb && old_plane_state->crtc) {
429			vc4_plane_state = to_vc4_plane_state(old_plane_state);
430			load_state->membus_load -= vc4_plane_state->membus_load;
431			load_state->hvs_load -= vc4_plane_state->hvs_load;
432		}
433
434		if (new_plane_state->fb && new_plane_state->crtc) {
435			vc4_plane_state = to_vc4_plane_state(new_plane_state);
436			load_state->membus_load += vc4_plane_state->membus_load;
437			load_state->hvs_load += vc4_plane_state->hvs_load;
438		}
439	}
440
441	/* Don't check the load when the tracker is disabled. */
442	if (!vc4->load_tracker_enabled)
443		return 0;
444
445	/* The absolute limit is 2Gbyte/sec, but let's take a margin to let
446	 * the system work when other blocks are accessing the memory.
447	 */
448	if (load_state->membus_load > SZ_1G + SZ_512M)
449		return -ENOSPC;
450
451	/* HVS clock is supposed to run @ 250Mhz, let's take a margin and
452	 * consider the maximum number of cycles is 240M.
453	 */
454	if (load_state->hvs_load > 240000000ULL)
455		return -ENOSPC;
456
457	return 0;
458}
459
460static struct drm_private_state *
461vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
462{
463	struct vc4_load_tracker_state *state;
464
465	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
466	if (!state)
467		return NULL;
468
469	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
470
471	return &state->base;
472}
473
474static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
475					   struct drm_private_state *state)
476{
477	struct vc4_load_tracker_state *load_state;
478
479	load_state = to_vc4_load_tracker_state(state);
480	kfree(load_state);
481}
482
483static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
484	.atomic_duplicate_state = vc4_load_tracker_duplicate_state,
485	.atomic_destroy_state = vc4_load_tracker_destroy_state,
486};
487
488static int
489vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
490{
491	int ret;
492
493	ret = vc4_ctm_atomic_check(dev, state);
494	if (ret < 0)
495		return ret;
496
497	ret = drm_atomic_helper_check(dev, state);
498	if (ret)
499		return ret;
500
501	return vc4_load_tracker_atomic_check(state);
502}
503
504static const struct drm_mode_config_funcs vc4_mode_funcs = {
505	.atomic_check = vc4_atomic_check,
506	.atomic_commit = vc4_atomic_commit,
507	.fb_create = vc4_fb_create,
508};
509
510int vc4_kms_load(struct drm_device *dev)
511{
512	struct vc4_dev *vc4 = to_vc4_dev(dev);
513	struct vc4_ctm_state *ctm_state;
514	struct vc4_load_tracker_state *load_state;
515	int ret;
516
517	/* Start with the load tracker enabled. Can be disabled through the
518	 * debugfs load_tracker file.
519	 */
520	vc4->load_tracker_enabled = true;
521
522	sema_init(&vc4->async_modeset, 1);
523
524	/* Set support for vblank irq fast disable, before drm_vblank_init() */
525	dev->vblank_disable_immediate = true;
526
527	dev->irq_enabled = true;
528	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
529	if (ret < 0) {
530		dev_err(dev->dev, "failed to initialize vblank\n");
531		return ret;
532	}
533
534	dev->mode_config.max_width = 2048;
535	dev->mode_config.max_height = 2048;
536	dev->mode_config.funcs = &vc4_mode_funcs;
537	dev->mode_config.preferred_depth = 24;
538	dev->mode_config.async_page_flip = true;
539	dev->mode_config.allow_fb_modifiers = true;
540
541	drm_modeset_lock_init(&vc4->ctm_state_lock);
542
543	ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
544	if (!ctm_state)
545		return -ENOMEM;
546
547	drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
548				    &vc4_ctm_state_funcs);
549
550	load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
551	if (!load_state) {
552		drm_atomic_private_obj_fini(&vc4->ctm_manager);
553		return -ENOMEM;
554	}
555
556	drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base,
557				    &vc4_load_tracker_state_funcs);
558
559	drm_mode_config_reset(dev);
560
561	drm_kms_helper_poll_init(dev);
562
563	return 0;
564}