Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright (C) 2015 Broadcom
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 */
  8
  9/**
 10 * DOC: VC4 KMS
 11 *
 12 * This is the general code for implementing KMS mode setting that
 13 * doesn't clearly associate with any of the other objects (plane,
 14 * crtc, HDMI encoder).
 15 */
 16
 17#include "drm_crtc.h"
 18#include "drm_atomic.h"
 19#include "drm_atomic_helper.h"
 20#include "drm_crtc_helper.h"
 21#include "drm_plane_helper.h"
 22#include "drm_fb_cma_helper.h"
 23#include "vc4_drv.h"
 24
 25static void vc4_output_poll_changed(struct drm_device *dev)
 26{
 27	struct vc4_dev *vc4 = to_vc4_dev(dev);
 28
 29	drm_fbdev_cma_hotplug_event(vc4->fbdev);
 30}
 31
 32struct vc4_commit {
 33	struct drm_device *dev;
 34	struct drm_atomic_state *state;
 35	struct vc4_seqno_cb cb;
 36};
 37
 38static void
 39vc4_atomic_complete_commit(struct vc4_commit *c)
 40{
 41	struct drm_atomic_state *state = c->state;
 42	struct drm_device *dev = state->dev;
 43	struct vc4_dev *vc4 = to_vc4_dev(dev);
 44
 45	drm_atomic_helper_commit_modeset_disables(dev, state);
 46
 47	drm_atomic_helper_commit_planes(dev, state, 0);
 48
 49	drm_atomic_helper_commit_modeset_enables(dev, state);
 50
 51	/* Make sure that drm_atomic_helper_wait_for_vblanks()
 52	 * actually waits for vblank.  If we're doing a full atomic
 53	 * modeset (as opposed to a vc4_update_plane() short circuit),
 54	 * then we need to wait for scanout to be done with our
 55	 * display lists before we free it and potentially reallocate
 56	 * and overwrite the dlist memory with a new modeset.
 57	 */
 58	state->legacy_cursor_update = false;
 59
 60	drm_atomic_helper_wait_for_vblanks(dev, state);
 61
 62	drm_atomic_helper_cleanup_planes(dev, state);
 63
 64	drm_atomic_state_put(state);
 65
 66	up(&vc4->async_modeset);
 67
 68	kfree(c);
 69}
 70
 71static void
 72vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb)
 73{
 74	struct vc4_commit *c = container_of(cb, struct vc4_commit, cb);
 75
 76	vc4_atomic_complete_commit(c);
 77}
 78
 79static struct vc4_commit *commit_init(struct drm_atomic_state *state)
 80{
 81	struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
 82
 83	if (!c)
 84		return NULL;
 85	c->dev = state->dev;
 86	c->state = state;
 87
 88	return c;
 89}
 90
 91/**
 92 * vc4_atomic_commit - commit validated state object
 93 * @dev: DRM device
 94 * @state: the driver state object
 95 * @nonblock: nonblocking commit
 96 *
 97 * This function commits a with drm_atomic_helper_check() pre-validated state
 98 * object. This can still fail when e.g. the framebuffer reservation fails. For
 99 * now this doesn't implement asynchronous commits.
100 *
101 * RETURNS
102 * Zero for success or -errno.
103 */
104static int vc4_atomic_commit(struct drm_device *dev,
105			     struct drm_atomic_state *state,
106			     bool nonblock)
107{
108	struct vc4_dev *vc4 = to_vc4_dev(dev);
109	int ret;
110	int i;
111	uint64_t wait_seqno = 0;
112	struct vc4_commit *c;
113	struct drm_plane *plane;
114	struct drm_plane_state *new_state;
115
116	c = commit_init(state);
117	if (!c)
118		return -ENOMEM;
119
120	/* Make sure that any outstanding modesets have finished. */
121	if (nonblock) {
122		struct drm_crtc *crtc;
123		struct drm_crtc_state *crtc_state;
124		unsigned long flags;
125		bool busy = false;
126
127		/*
128		 * If there's an undispatched event to send then we're
129		 * obviously still busy.  If there isn't, then we can
130		 * unconditionally wait for the semaphore because it
131		 * shouldn't be contended (for long).
132		 *
133		 * This is to prevent a race where queuing a new flip
134		 * from userspace immediately on receipt of an event
135		 * beats our clean-up and returns EBUSY.
136		 */
137		spin_lock_irqsave(&dev->event_lock, flags);
138		for_each_crtc_in_state(state, crtc, crtc_state, i)
139			busy |= vc4_event_pending(crtc);
140		spin_unlock_irqrestore(&dev->event_lock, flags);
141		if (busy) {
142			kfree(c);
143			return -EBUSY;
144		}
145	}
146	ret = down_interruptible(&vc4->async_modeset);
147	if (ret) {
148		kfree(c);
149		return ret;
150	}
151
152	ret = drm_atomic_helper_prepare_planes(dev, state);
153	if (ret) {
154		kfree(c);
155		up(&vc4->async_modeset);
156		return ret;
157	}
158
159	for_each_plane_in_state(state, plane, new_state, i) {
160		if ((plane->state->fb != new_state->fb) && new_state->fb) {
161			struct drm_gem_cma_object *cma_bo =
162				drm_fb_cma_get_gem_obj(new_state->fb, 0);
163			struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
164
165			wait_seqno = max(bo->seqno, wait_seqno);
166		}
167	}
168
169	/*
170	 * This is the point of no return - everything below never fails except
171	 * when the hw goes bonghits. Which means we can commit the new state on
172	 * the software side now.
173	 */
174
175	drm_atomic_helper_swap_state(state, true);
176
177	/*
178	 * Everything below can be run asynchronously without the need to grab
179	 * any modeset locks at all under one condition: It must be guaranteed
180	 * that the asynchronous work has either been cancelled (if the driver
181	 * supports it, which at least requires that the framebuffers get
182	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
183	 * before the new state gets committed on the software side with
184	 * drm_atomic_helper_swap_state().
185	 *
186	 * This scheme allows new atomic state updates to be prepared and
187	 * checked in parallel to the asynchronous completion of the previous
188	 * update. Which is important since compositors need to figure out the
189	 * composition of the next frame right after having submitted the
190	 * current layout.
191	 */
192
193	drm_atomic_state_get(state);
194	if (nonblock) {
195		vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
196				   vc4_atomic_complete_commit_seqno_cb);
197	} else {
198		vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false);
199		vc4_atomic_complete_commit(c);
200	}
201
202	return 0;
203}
204
205static const struct drm_mode_config_funcs vc4_mode_funcs = {
206	.output_poll_changed = vc4_output_poll_changed,
207	.atomic_check = drm_atomic_helper_check,
208	.atomic_commit = vc4_atomic_commit,
209	.fb_create = drm_fb_cma_create,
210};
211
212int vc4_kms_load(struct drm_device *dev)
213{
214	struct vc4_dev *vc4 = to_vc4_dev(dev);
215	int ret;
216
217	sema_init(&vc4->async_modeset, 1);
218
219	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
220	if (ret < 0) {
221		dev_err(dev->dev, "failed to initialize vblank\n");
222		return ret;
223	}
224
225	dev->mode_config.max_width = 2048;
226	dev->mode_config.max_height = 2048;
227	dev->mode_config.funcs = &vc4_mode_funcs;
228	dev->mode_config.preferred_depth = 24;
229	dev->mode_config.async_page_flip = true;
230
231	drm_mode_config_reset(dev);
232
233	vc4->fbdev = drm_fbdev_cma_init(dev, 32,
234					dev->mode_config.num_crtc,
235					dev->mode_config.num_connector);
236	if (IS_ERR(vc4->fbdev))
237		vc4->fbdev = NULL;
238
239	drm_kms_helper_poll_init(dev);
240
241	return 0;
242}