Loading...
Note: File does not exist in v3.1.
1/*
2 * Copyright (C) 2014 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_kms.h"
20#include "msm_gem.h"
21#include "msm_fence.h"
22
23struct msm_commit {
24 struct drm_device *dev;
25 struct drm_atomic_state *state;
26 struct work_struct work;
27 uint32_t crtc_mask;
28};
29
30static void commit_worker(struct work_struct *work);
31
32/* block until specified crtcs are no longer pending update, and
33 * atomically mark them as pending update
34 */
35static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
36{
37 int ret;
38
39 spin_lock(&priv->pending_crtcs_event.lock);
40 ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
41 !(priv->pending_crtcs & crtc_mask));
42 if (ret == 0) {
43 DBG("start: %08x", crtc_mask);
44 priv->pending_crtcs |= crtc_mask;
45 }
46 spin_unlock(&priv->pending_crtcs_event.lock);
47
48 return ret;
49}
50
51/* clear specified crtcs (no longer pending update)
52 */
53static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
54{
55 spin_lock(&priv->pending_crtcs_event.lock);
56 DBG("end: %08x", crtc_mask);
57 priv->pending_crtcs &= ~crtc_mask;
58 wake_up_all_locked(&priv->pending_crtcs_event);
59 spin_unlock(&priv->pending_crtcs_event.lock);
60}
61
62static struct msm_commit *commit_init(struct drm_atomic_state *state)
63{
64 struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
65
66 if (!c)
67 return NULL;
68
69 c->dev = state->dev;
70 c->state = state;
71
72 INIT_WORK(&c->work, commit_worker);
73
74 return c;
75}
76
77static void commit_destroy(struct msm_commit *c)
78{
79 end_atomic(c->dev->dev_private, c->crtc_mask);
80 kfree(c);
81}
82
83static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
84 struct drm_atomic_state *old_state)
85{
86 struct drm_crtc *crtc;
87 struct drm_crtc_state *new_crtc_state;
88 struct msm_drm_private *priv = old_state->dev->dev_private;
89 struct msm_kms *kms = priv->kms;
90 int i;
91
92 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
93 if (!new_crtc_state->active)
94 continue;
95
96 kms->funcs->wait_for_crtc_commit_done(kms, crtc);
97 }
98}
99
100/* The (potentially) asynchronous part of the commit. At this point
101 * nothing can fail short of armageddon.
102 */
103static void complete_commit(struct msm_commit *c, bool async)
104{
105 struct drm_atomic_state *state = c->state;
106 struct drm_device *dev = state->dev;
107 struct msm_drm_private *priv = dev->dev_private;
108 struct msm_kms *kms = priv->kms;
109
110 drm_atomic_helper_wait_for_fences(dev, state, false);
111
112 kms->funcs->prepare_commit(kms, state);
113
114 drm_atomic_helper_commit_modeset_disables(dev, state);
115
116 drm_atomic_helper_commit_planes(dev, state, 0);
117
118 drm_atomic_helper_commit_modeset_enables(dev, state);
119
120 /* NOTE: _wait_for_vblanks() only waits for vblank on
121 * enabled CRTCs. So we end up faulting when disabling
122 * due to (potentially) unref'ing the outgoing fb's
123 * before the vblank when the disable has latched.
124 *
125 * But if it did wait on disabled (or newly disabled)
126 * CRTCs, that would be racy (ie. we could have missed
127 * the irq. We need some way to poll for pipe shut
128 * down. Or just live with occasionally hitting the
129 * timeout in the CRTC disable path (which really should
130 * not be critical path)
131 */
132
133 msm_atomic_wait_for_commit_done(dev, state);
134
135 drm_atomic_helper_cleanup_planes(dev, state);
136
137 kms->funcs->complete_commit(kms, state);
138
139 drm_atomic_state_put(state);
140
141 commit_destroy(c);
142}
143
144static void commit_worker(struct work_struct *work)
145{
146 complete_commit(container_of(work, struct msm_commit, work), true);
147}
148
149/**
150 * drm_atomic_helper_commit - commit validated state object
151 * @dev: DRM device
152 * @state: the driver state object
153 * @nonblock: nonblocking commit
154 *
155 * This function commits a with drm_atomic_helper_check() pre-validated state
156 * object. This can still fail when e.g. the framebuffer reservation fails.
157 *
158 * RETURNS
159 * Zero for success or -errno.
160 */
161int msm_atomic_commit(struct drm_device *dev,
162 struct drm_atomic_state *state, bool nonblock)
163{
164 struct msm_drm_private *priv = dev->dev_private;
165 struct msm_commit *c;
166 struct drm_crtc *crtc;
167 struct drm_crtc_state *crtc_state;
168 struct drm_plane *plane;
169 struct drm_plane_state *old_plane_state, *new_plane_state;
170 int i, ret;
171
172 ret = drm_atomic_helper_prepare_planes(dev, state);
173 if (ret)
174 return ret;
175
176 /*
177 * Note that plane->atomic_async_check() should fail if we need
178 * to re-assign hwpipe or anything that touches global atomic
179 * state, so we'll never go down the async update path in those
180 * cases.
181 */
182 if (state->async_update) {
183 drm_atomic_helper_async_commit(dev, state);
184 drm_atomic_helper_cleanup_planes(dev, state);
185 return 0;
186 }
187
188 c = commit_init(state);
189 if (!c) {
190 ret = -ENOMEM;
191 goto error;
192 }
193
194 /*
195 * Figure out what crtcs we have:
196 */
197 for_each_new_crtc_in_state(state, crtc, crtc_state, i)
198 c->crtc_mask |= drm_crtc_mask(crtc);
199
200 /*
201 * Figure out what fence to wait for:
202 */
203 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
204 if ((new_plane_state->fb != old_plane_state->fb) && new_plane_state->fb) {
205 struct drm_gem_object *obj = msm_framebuffer_bo(new_plane_state->fb, 0);
206 struct msm_gem_object *msm_obj = to_msm_bo(obj);
207 struct dma_fence *fence = reservation_object_get_excl_rcu(msm_obj->resv);
208
209 drm_atomic_set_fence_for_plane(new_plane_state, fence);
210 }
211 }
212
213 /*
214 * Wait for pending updates on any of the same crtc's and then
215 * mark our set of crtc's as busy:
216 */
217 ret = start_atomic(dev->dev_private, c->crtc_mask);
218 if (ret)
219 goto err_free;
220
221 BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
222
223 /*
224 * This is the point of no return - everything below never fails except
225 * when the hw goes bonghits. Which means we can commit the new state on
226 * the software side now.
227 *
228 * swap driver private state while still holding state_lock
229 */
230 if (to_kms_state(state)->state)
231 priv->kms->funcs->swap_state(priv->kms, state);
232
233 /*
234 * Everything below can be run asynchronously without the need to grab
235 * any modeset locks at all under one conditions: It must be guaranteed
236 * that the asynchronous work has either been cancelled (if the driver
237 * supports it, which at least requires that the framebuffers get
238 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
239 * before the new state gets committed on the software side with
240 * drm_atomic_helper_swap_state().
241 *
242 * This scheme allows new atomic state updates to be prepared and
243 * checked in parallel to the asynchronous completion of the previous
244 * update. Which is important since compositors need to figure out the
245 * composition of the next frame right after having submitted the
246 * current layout.
247 */
248
249 drm_atomic_state_get(state);
250 if (nonblock) {
251 queue_work(priv->atomic_wq, &c->work);
252 return 0;
253 }
254
255 complete_commit(c, false);
256
257 return 0;
258
259err_free:
260 kfree(c);
261error:
262 drm_atomic_helper_cleanup_planes(dev, state);
263 return ret;
264}
265
266struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev)
267{
268 struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
269
270 if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
271 kfree(state);
272 return NULL;
273 }
274
275 return &state->base;
276}
277
278void msm_atomic_state_clear(struct drm_atomic_state *s)
279{
280 struct msm_kms_state *state = to_kms_state(s);
281 drm_atomic_state_default_clear(&state->base);
282 kfree(state->state);
283 state->state = NULL;
284}
285
286void msm_atomic_state_free(struct drm_atomic_state *state)
287{
288 kfree(to_kms_state(state)->state);
289 drm_atomic_state_default_release(state);
290 kfree(state);
291}