Loading...
1// SPDX-License-Identifier: GPL-2.0+
2
3#include <linux/crc32.h>
4
5#include <drm/drm_atomic.h>
6#include <drm/drm_atomic_helper.h>
7#include <drm/drm_blend.h>
8#include <drm/drm_fourcc.h>
9#include <drm/drm_fixed.h>
10#include <drm/drm_gem_framebuffer_helper.h>
11#include <drm/drm_vblank.h>
12#include <linux/minmax.h>
13
14#include "vkms_drv.h"
15
16static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha)
17{
18 u32 new_color;
19
20 new_color = (src * 0xffff + dst * (0xffff - alpha));
21
22 return DIV_ROUND_CLOSEST(new_color, 0xffff);
23}
24
25/**
26 * pre_mul_alpha_blend - alpha blending equation
27 * @frame_info: Source framebuffer's metadata
28 * @stage_buffer: The line with the pixels from src_plane
29 * @output_buffer: A line buffer that receives all the blends output
30 *
31 * Using the information from the `frame_info`, this blends only the
32 * necessary pixels from the `stage_buffer` to the `output_buffer`
33 * using premultiplied blend formula.
34 *
35 * The current DRM assumption is that pixel color values have been already
36 * pre-multiplied with the alpha channel values. See more
37 * drm_plane_create_blend_mode_property(). Also, this formula assumes a
38 * completely opaque background.
39 */
40static void pre_mul_alpha_blend(struct vkms_frame_info *frame_info,
41 struct line_buffer *stage_buffer,
42 struct line_buffer *output_buffer)
43{
44 int x_dst = frame_info->dst.x1;
45 struct pixel_argb_u16 *out = output_buffer->pixels + x_dst;
46 struct pixel_argb_u16 *in = stage_buffer->pixels;
47 int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
48 stage_buffer->n_pixels);
49
50 for (int x = 0; x < x_limit; x++) {
51 out[x].a = (u16)0xffff;
52 out[x].r = pre_mul_blend_channel(in[x].r, out[x].r, in[x].a);
53 out[x].g = pre_mul_blend_channel(in[x].g, out[x].g, in[x].a);
54 out[x].b = pre_mul_blend_channel(in[x].b, out[x].b, in[x].a);
55 }
56}
57
58static int get_y_pos(struct vkms_frame_info *frame_info, int y)
59{
60 if (frame_info->rotation & DRM_MODE_REFLECT_Y)
61 return drm_rect_height(&frame_info->rotated) - y - 1;
62
63 switch (frame_info->rotation & DRM_MODE_ROTATE_MASK) {
64 case DRM_MODE_ROTATE_90:
65 return frame_info->rotated.x2 - y - 1;
66 case DRM_MODE_ROTATE_270:
67 return y + frame_info->rotated.x1;
68 default:
69 return y;
70 }
71}
72
73static bool check_limit(struct vkms_frame_info *frame_info, int pos)
74{
75 if (drm_rotation_90_or_270(frame_info->rotation)) {
76 if (pos >= 0 && pos < drm_rect_width(&frame_info->rotated))
77 return true;
78 } else {
79 if (pos >= frame_info->rotated.y1 && pos < frame_info->rotated.y2)
80 return true;
81 }
82
83 return false;
84}
85
86static void fill_background(const struct pixel_argb_u16 *background_color,
87 struct line_buffer *output_buffer)
88{
89 for (size_t i = 0; i < output_buffer->n_pixels; i++)
90 output_buffer->pixels[i] = *background_color;
91}
92
93// lerp(a, b, t) = a + (b - a) * t
94static u16 lerp_u16(u16 a, u16 b, s64 t)
95{
96 s64 a_fp = drm_int2fixp(a);
97 s64 b_fp = drm_int2fixp(b);
98
99 s64 delta = drm_fixp_mul(b_fp - a_fp, t);
100
101 return drm_fixp2int(a_fp + delta);
102}
103
104static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)
105{
106 s64 color_channel_fp = drm_int2fixp(channel_value);
107
108 return drm_fixp_mul(color_channel_fp, lut->channel_value2index_ratio);
109}
110
111/*
112 * This enum is related to the positions of the variables inside
113 * `struct drm_color_lut`, so the order of both needs to be the same.
114 */
115enum lut_channel {
116 LUT_RED = 0,
117 LUT_GREEN,
118 LUT_BLUE,
119 LUT_RESERVED
120};
121
122static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value,
123 enum lut_channel channel)
124{
125 s64 lut_index = get_lut_index(lut, channel_value);
126 u16 *floor_lut_value, *ceil_lut_value;
127 u16 floor_channel_value, ceil_channel_value;
128
129 /*
130 * This checks if `struct drm_color_lut` has any gap added by the compiler
131 * between the struct fields.
132 */
133 static_assert(sizeof(struct drm_color_lut) == sizeof(__u16) * 4);
134
135 floor_lut_value = (__u16 *)&lut->base[drm_fixp2int(lut_index)];
136 if (drm_fixp2int(lut_index) == (lut->lut_length - 1))
137 /* We're at the end of the LUT array, use same value for ceil and floor */
138 ceil_lut_value = floor_lut_value;
139 else
140 ceil_lut_value = (__u16 *)&lut->base[drm_fixp2int_ceil(lut_index)];
141
142 floor_channel_value = floor_lut_value[channel];
143 ceil_channel_value = ceil_lut_value[channel];
144
145 return lerp_u16(floor_channel_value, ceil_channel_value,
146 lut_index & DRM_FIXED_DECIMAL_MASK);
147}
148
149static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buffer *output_buffer)
150{
151 if (!crtc_state->gamma_lut.base)
152 return;
153
154 if (!crtc_state->gamma_lut.lut_length)
155 return;
156
157 for (size_t x = 0; x < output_buffer->n_pixels; x++) {
158 struct pixel_argb_u16 *pixel = &output_buffer->pixels[x];
159
160 pixel->r = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->r, LUT_RED);
161 pixel->g = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->g, LUT_GREEN);
162 pixel->b = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->b, LUT_BLUE);
163 }
164}
165
166/**
167 * blend - blend the pixels from all planes and compute crc
168 * @wb: The writeback frame buffer metadata
169 * @crtc_state: The crtc state
170 * @crc32: The crc output of the final frame
171 * @output_buffer: A buffer of a row that will receive the result of the blend(s)
172 * @stage_buffer: The line with the pixels from plane being blend to the output
173 * @row_size: The size, in bytes, of a single row
174 *
175 * This function blends the pixels (Using the `pre_mul_alpha_blend`)
176 * from all planes, calculates the crc32 of the output from the former step,
177 * and, if necessary, convert and store the output to the writeback buffer.
178 */
179static void blend(struct vkms_writeback_job *wb,
180 struct vkms_crtc_state *crtc_state,
181 u32 *crc32, struct line_buffer *stage_buffer,
182 struct line_buffer *output_buffer, size_t row_size)
183{
184 struct vkms_plane_state **plane = crtc_state->active_planes;
185 u32 n_active_planes = crtc_state->num_active_planes;
186 int y_pos;
187
188 const struct pixel_argb_u16 background_color = { .a = 0xffff };
189
190 size_t crtc_y_limit = crtc_state->base.mode.vdisplay;
191
192 /*
193 * The planes are composed line-by-line to avoid heavy memory usage. It is a necessary
194 * complexity to avoid poor blending performance.
195 *
196 * The function vkms_compose_row() is used to read a line, pixel-by-pixel, into the staging
197 * buffer.
198 */
199 for (size_t y = 0; y < crtc_y_limit; y++) {
200 fill_background(&background_color, output_buffer);
201
202 /* The active planes are composed associatively in z-order. */
203 for (size_t i = 0; i < n_active_planes; i++) {
204 y_pos = get_y_pos(plane[i]->frame_info, y);
205
206 if (!check_limit(plane[i]->frame_info, y_pos))
207 continue;
208
209 vkms_compose_row(stage_buffer, plane[i], y_pos);
210 pre_mul_alpha_blend(plane[i]->frame_info, stage_buffer,
211 output_buffer);
212 }
213
214 apply_lut(crtc_state, output_buffer);
215
216 *crc32 = crc32_le(*crc32, (void *)output_buffer->pixels, row_size);
217
218 if (wb)
219 vkms_writeback_row(wb, output_buffer, y_pos);
220 }
221}
222
223static int check_format_funcs(struct vkms_crtc_state *crtc_state,
224 struct vkms_writeback_job *active_wb)
225{
226 struct vkms_plane_state **planes = crtc_state->active_planes;
227 u32 n_active_planes = crtc_state->num_active_planes;
228
229 for (size_t i = 0; i < n_active_planes; i++)
230 if (!planes[i]->pixel_read)
231 return -1;
232
233 if (active_wb && !active_wb->pixel_write)
234 return -1;
235
236 return 0;
237}
238
239static int check_iosys_map(struct vkms_crtc_state *crtc_state)
240{
241 struct vkms_plane_state **plane_state = crtc_state->active_planes;
242 u32 n_active_planes = crtc_state->num_active_planes;
243
244 for (size_t i = 0; i < n_active_planes; i++)
245 if (iosys_map_is_null(&plane_state[i]->frame_info->map[0]))
246 return -1;
247
248 return 0;
249}
250
251static int compose_active_planes(struct vkms_writeback_job *active_wb,
252 struct vkms_crtc_state *crtc_state,
253 u32 *crc32)
254{
255 size_t line_width, pixel_size = sizeof(struct pixel_argb_u16);
256 struct line_buffer output_buffer, stage_buffer;
257 int ret = 0;
258
259 /*
260 * This check exists so we can call `crc32_le` for the entire line
261 * instead doing it for each channel of each pixel in case
262 * `struct `pixel_argb_u16` had any gap added by the compiler
263 * between the struct fields.
264 */
265 static_assert(sizeof(struct pixel_argb_u16) == 8);
266
267 if (WARN_ON(check_iosys_map(crtc_state)))
268 return -EINVAL;
269
270 if (WARN_ON(check_format_funcs(crtc_state, active_wb)))
271 return -EINVAL;
272
273 line_width = crtc_state->base.mode.hdisplay;
274 stage_buffer.n_pixels = line_width;
275 output_buffer.n_pixels = line_width;
276
277 stage_buffer.pixels = kvmalloc(line_width * pixel_size, GFP_KERNEL);
278 if (!stage_buffer.pixels) {
279 DRM_ERROR("Cannot allocate memory for the output line buffer");
280 return -ENOMEM;
281 }
282
283 output_buffer.pixels = kvmalloc(line_width * pixel_size, GFP_KERNEL);
284 if (!output_buffer.pixels) {
285 DRM_ERROR("Cannot allocate memory for intermediate line buffer");
286 ret = -ENOMEM;
287 goto free_stage_buffer;
288 }
289
290 blend(active_wb, crtc_state, crc32, &stage_buffer,
291 &output_buffer, line_width * pixel_size);
292
293 kvfree(output_buffer.pixels);
294free_stage_buffer:
295 kvfree(stage_buffer.pixels);
296
297 return ret;
298}
299
300/**
301 * vkms_composer_worker - ordered work_struct to compute CRC
302 *
303 * @work: work_struct
304 *
305 * Work handler for composing and computing CRCs. work_struct scheduled in
306 * an ordered workqueue that's periodically scheduled to run by
307 * vkms_vblank_simulate() and flushed at vkms_atomic_commit_tail().
308 */
309void vkms_composer_worker(struct work_struct *work)
310{
311 struct vkms_crtc_state *crtc_state = container_of(work,
312 struct vkms_crtc_state,
313 composer_work);
314 struct drm_crtc *crtc = crtc_state->base.crtc;
315 struct vkms_writeback_job *active_wb = crtc_state->active_writeback;
316 struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
317 bool crc_pending, wb_pending;
318 u64 frame_start, frame_end;
319 u32 crc32 = 0;
320 int ret;
321
322 spin_lock_irq(&out->composer_lock);
323 frame_start = crtc_state->frame_start;
324 frame_end = crtc_state->frame_end;
325 crc_pending = crtc_state->crc_pending;
326 wb_pending = crtc_state->wb_pending;
327 crtc_state->frame_start = 0;
328 crtc_state->frame_end = 0;
329 crtc_state->crc_pending = false;
330
331 if (crtc->state->gamma_lut) {
332 s64 max_lut_index_fp;
333 s64 u16_max_fp = drm_int2fixp(0xffff);
334
335 crtc_state->gamma_lut.base = (struct drm_color_lut *)crtc->state->gamma_lut->data;
336 crtc_state->gamma_lut.lut_length =
337 crtc->state->gamma_lut->length / sizeof(struct drm_color_lut);
338 max_lut_index_fp = drm_int2fixp(crtc_state->gamma_lut.lut_length - 1);
339 crtc_state->gamma_lut.channel_value2index_ratio = drm_fixp_div(max_lut_index_fp,
340 u16_max_fp);
341
342 } else {
343 crtc_state->gamma_lut.base = NULL;
344 }
345
346 spin_unlock_irq(&out->composer_lock);
347
348 /*
349 * We raced with the vblank hrtimer and previous work already computed
350 * the crc, nothing to do.
351 */
352 if (!crc_pending)
353 return;
354
355 if (wb_pending)
356 ret = compose_active_planes(active_wb, crtc_state, &crc32);
357 else
358 ret = compose_active_planes(NULL, crtc_state, &crc32);
359
360 if (ret)
361 return;
362
363 if (wb_pending) {
364 drm_writeback_signal_completion(&out->wb_connector, 0);
365 spin_lock_irq(&out->composer_lock);
366 crtc_state->wb_pending = false;
367 spin_unlock_irq(&out->composer_lock);
368 }
369
370 /*
371 * The worker can fall behind the vblank hrtimer, make sure we catch up.
372 */
373 while (frame_start <= frame_end)
374 drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
375}
376
377static const char * const pipe_crc_sources[] = {"auto"};
378
379const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
380 size_t *count)
381{
382 *count = ARRAY_SIZE(pipe_crc_sources);
383 return pipe_crc_sources;
384}
385
386static int vkms_crc_parse_source(const char *src_name, bool *enabled)
387{
388 int ret = 0;
389
390 if (!src_name) {
391 *enabled = false;
392 } else if (strcmp(src_name, "auto") == 0) {
393 *enabled = true;
394 } else {
395 *enabled = false;
396 ret = -EINVAL;
397 }
398
399 return ret;
400}
401
402int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
403 size_t *values_cnt)
404{
405 bool enabled;
406
407 if (vkms_crc_parse_source(src_name, &enabled) < 0) {
408 DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
409 return -EINVAL;
410 }
411
412 *values_cnt = 1;
413
414 return 0;
415}
416
417void vkms_set_composer(struct vkms_output *out, bool enabled)
418{
419 bool old_enabled;
420
421 if (enabled)
422 drm_crtc_vblank_get(&out->crtc);
423
424 spin_lock_irq(&out->lock);
425 old_enabled = out->composer_enabled;
426 out->composer_enabled = enabled;
427 spin_unlock_irq(&out->lock);
428
429 if (old_enabled)
430 drm_crtc_vblank_put(&out->crtc);
431}
432
433int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
434{
435 struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
436 bool enabled = false;
437 int ret = 0;
438
439 ret = vkms_crc_parse_source(src_name, &enabled);
440
441 vkms_set_composer(out, enabled);
442
443 return ret;
444}
1// SPDX-License-Identifier: GPL-2.0+
2
3#include <linux/crc32.h>
4
5#include <drm/drm_atomic.h>
6#include <drm/drm_atomic_helper.h>
7#include <drm/drm_gem_framebuffer_helper.h>
8#include <drm/drm_vblank.h>
9
10#include "vkms_drv.h"
11
12/**
13 * compute_crc - Compute CRC value on output frame
14 *
15 * @vaddr_out: address to final framebuffer
16 * @composer: framebuffer's metadata
17 *
18 * returns CRC value computed using crc32 on the visible portion of
19 * the final framebuffer at vaddr_out
20 */
21static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
22{
23 int i, j, src_offset;
24 int x_src = composer->src.x1 >> 16;
25 int y_src = composer->src.y1 >> 16;
26 int h_src = drm_rect_height(&composer->src) >> 16;
27 int w_src = drm_rect_width(&composer->src) >> 16;
28 u32 crc = 0;
29
30 for (i = y_src; i < y_src + h_src; ++i) {
31 for (j = x_src; j < x_src + w_src; ++j) {
32 src_offset = composer->offset
33 + (i * composer->pitch)
34 + (j * composer->cpp);
35 /* XRGB format ignores Alpha channel */
36 memset(vaddr_out + src_offset + 24, 0, 8);
37 crc = crc32_le(crc, vaddr_out + src_offset,
38 sizeof(u32));
39 }
40 }
41
42 return crc;
43}
44
45/**
46 * blend - belnd value at vaddr_src with value at vaddr_dst
47 * @vaddr_dst: destination address
48 * @vaddr_src: source address
49 * @dest_composer: destination framebuffer's metadata
50 * @src_composer: source framebuffer's metadata
51 *
52 * Blend value at vaddr_src with value at vaddr_dst.
53 * Currently, this function write value at vaddr_src on value
54 * at vaddr_dst using buffer's metadata to locate the new values
55 * from vaddr_src and their distenation at vaddr_dst.
56 *
57 * Todo: Use the alpha value to blend vaddr_src with vaddr_dst
58 * instead of overwriting it.
59 */
60static void blend(void *vaddr_dst, void *vaddr_src,
61 struct vkms_composer *dest_composer,
62 struct vkms_composer *src_composer)
63{
64 int i, j, j_dst, i_dst;
65 int offset_src, offset_dst;
66
67 int x_src = src_composer->src.x1 >> 16;
68 int y_src = src_composer->src.y1 >> 16;
69
70 int x_dst = src_composer->dst.x1;
71 int y_dst = src_composer->dst.y1;
72 int h_dst = drm_rect_height(&src_composer->dst);
73 int w_dst = drm_rect_width(&src_composer->dst);
74
75 int y_limit = y_src + h_dst;
76 int x_limit = x_src + w_dst;
77
78 for (i = y_src, i_dst = y_dst; i < y_limit; ++i) {
79 for (j = x_src, j_dst = x_dst; j < x_limit; ++j) {
80 offset_dst = dest_composer->offset
81 + (i_dst * dest_composer->pitch)
82 + (j_dst++ * dest_composer->cpp);
83 offset_src = src_composer->offset
84 + (i * src_composer->pitch)
85 + (j * src_composer->cpp);
86
87 memcpy(vaddr_dst + offset_dst,
88 vaddr_src + offset_src, sizeof(u32));
89 }
90 i_dst++;
91 }
92}
93
94static void compose_cursor(struct vkms_composer *cursor_composer,
95 struct vkms_composer *primary_composer,
96 void *vaddr_out)
97{
98 struct drm_gem_object *cursor_obj;
99 struct vkms_gem_object *cursor_vkms_obj;
100
101 cursor_obj = drm_gem_fb_get_obj(&cursor_composer->fb, 0);
102 cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj);
103
104 if (WARN_ON(!cursor_vkms_obj->vaddr))
105 return;
106
107 blend(vaddr_out, cursor_vkms_obj->vaddr,
108 primary_composer, cursor_composer);
109}
110
111static uint32_t _vkms_get_crc(struct vkms_composer *primary_composer,
112 struct vkms_composer *cursor_composer)
113{
114 struct drm_framebuffer *fb = &primary_composer->fb;
115 struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
116 struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
117 void *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL);
118 u32 crc = 0;
119
120 if (!vaddr_out) {
121 DRM_ERROR("Failed to allocate memory for output frame.");
122 return 0;
123 }
124
125 if (WARN_ON(!vkms_obj->vaddr)) {
126 kfree(vaddr_out);
127 return crc;
128 }
129
130 memcpy(vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size);
131
132 if (cursor_composer)
133 compose_cursor(cursor_composer, primary_composer, vaddr_out);
134
135 crc = compute_crc(vaddr_out, primary_composer);
136
137 kfree(vaddr_out);
138
139 return crc;
140}
141
142/**
143 * vkms_composer_worker - ordered work_struct to compute CRC
144 *
145 * @work: work_struct
146 *
147 * Work handler for composing and computing CRCs. work_struct scheduled in
148 * an ordered workqueue that's periodically scheduled to run by
149 * _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state().
150 */
151void vkms_composer_worker(struct work_struct *work)
152{
153 struct vkms_crtc_state *crtc_state = container_of(work,
154 struct vkms_crtc_state,
155 composer_work);
156 struct drm_crtc *crtc = crtc_state->base.crtc;
157 struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
158 struct vkms_composer *primary_composer = NULL;
159 struct vkms_composer *cursor_composer = NULL;
160 u32 crc32 = 0;
161 u64 frame_start, frame_end;
162 bool crc_pending;
163
164 spin_lock_irq(&out->composer_lock);
165 frame_start = crtc_state->frame_start;
166 frame_end = crtc_state->frame_end;
167 crc_pending = crtc_state->crc_pending;
168 crtc_state->frame_start = 0;
169 crtc_state->frame_end = 0;
170 crtc_state->crc_pending = false;
171 spin_unlock_irq(&out->composer_lock);
172
173 /*
174 * We raced with the vblank hrtimer and previous work already computed
175 * the crc, nothing to do.
176 */
177 if (!crc_pending)
178 return;
179
180 if (crtc_state->num_active_planes >= 1)
181 primary_composer = crtc_state->active_planes[0]->composer;
182
183 if (crtc_state->num_active_planes == 2)
184 cursor_composer = crtc_state->active_planes[1]->composer;
185
186 if (primary_composer)
187 crc32 = _vkms_get_crc(primary_composer, cursor_composer);
188
189 /*
190 * The worker can fall behind the vblank hrtimer, make sure we catch up.
191 */
192 while (frame_start <= frame_end)
193 drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
194}
195
196static const char * const pipe_crc_sources[] = {"auto"};
197
198const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
199 size_t *count)
200{
201 *count = ARRAY_SIZE(pipe_crc_sources);
202 return pipe_crc_sources;
203}
204
205static int vkms_crc_parse_source(const char *src_name, bool *enabled)
206{
207 int ret = 0;
208
209 if (!src_name) {
210 *enabled = false;
211 } else if (strcmp(src_name, "auto") == 0) {
212 *enabled = true;
213 } else {
214 *enabled = false;
215 ret = -EINVAL;
216 }
217
218 return ret;
219}
220
221int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
222 size_t *values_cnt)
223{
224 bool enabled;
225
226 if (vkms_crc_parse_source(src_name, &enabled) < 0) {
227 DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
228 return -EINVAL;
229 }
230
231 *values_cnt = 1;
232
233 return 0;
234}
235
236int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
237{
238 struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
239 bool enabled = false;
240 int ret = 0;
241
242 ret = vkms_crc_parse_source(src_name, &enabled);
243
244 spin_lock_irq(&out->lock);
245 out->composer_enabled = enabled;
246 spin_unlock_irq(&out->lock);
247
248 return ret;
249}