Loading...
1/**************************************************************************
2 *
3 * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include <drm/drmP.h>
30#include "vmwgfx_drv.h"
31
32#include <drm/ttm/ttm_placement.h>
33
34#include "device_include/svga_overlay.h"
35#include "device_include/svga_escape.h"
36
37#define VMW_MAX_NUM_STREAMS 1
38#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
39
40struct vmw_stream {
41 struct vmw_dma_buffer *buf;
42 bool claimed;
43 bool paused;
44 struct drm_vmw_control_stream_arg saved;
45};
46
47/**
48 * Overlay control
49 */
50struct vmw_overlay {
51 /*
52 * Each stream is a single overlay. In Xv these are called ports.
53 */
54 struct mutex mutex;
55 struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
56};
57
58static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
59{
60 struct vmw_private *dev_priv = vmw_priv(dev);
61 return dev_priv ? dev_priv->overlay_priv : NULL;
62}
63
64struct vmw_escape_header {
65 uint32_t cmd;
66 SVGAFifoCmdEscape body;
67};
68
69struct vmw_escape_video_flush {
70 struct vmw_escape_header escape;
71 SVGAEscapeVideoFlush flush;
72};
73
74static inline void fill_escape(struct vmw_escape_header *header,
75 uint32_t size)
76{
77 header->cmd = SVGA_CMD_ESCAPE;
78 header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
79 header->body.size = size;
80}
81
82static inline void fill_flush(struct vmw_escape_video_flush *cmd,
83 uint32_t stream_id)
84{
85 fill_escape(&cmd->escape, sizeof(cmd->flush));
86 cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
87 cmd->flush.streamId = stream_id;
88}
89
90/**
91 * Send put command to hw.
92 *
93 * Returns
94 * -ERESTARTSYS if interrupted by a signal.
95 */
96static int vmw_overlay_send_put(struct vmw_private *dev_priv,
97 struct vmw_dma_buffer *buf,
98 struct drm_vmw_control_stream_arg *arg,
99 bool interruptible)
100{
101 struct vmw_escape_video_flush *flush;
102 size_t fifo_size;
103 bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
104 int i, num_items;
105 SVGAGuestPtr ptr;
106
107 struct {
108 struct vmw_escape_header escape;
109 struct {
110 uint32_t cmdType;
111 uint32_t streamId;
112 } header;
113 } *cmds;
114 struct {
115 uint32_t registerId;
116 uint32_t value;
117 } *items;
118
119 /* defines are a index needs + 1 */
120 if (have_so)
121 num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
122 else
123 num_items = SVGA_VIDEO_PITCH_3 + 1;
124
125 fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
126
127 cmds = vmw_fifo_reserve(dev_priv, fifo_size);
128 /* hardware has hung, can't do anything here */
129 if (!cmds)
130 return -ENOMEM;
131
132 items = (typeof(items))&cmds[1];
133 flush = (struct vmw_escape_video_flush *)&items[num_items];
134
135 /* the size is header + number of items */
136 fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
137
138 cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
139 cmds->header.streamId = arg->stream_id;
140
141 /* the IDs are neatly numbered */
142 for (i = 0; i < num_items; i++)
143 items[i].registerId = i;
144
145 vmw_bo_get_guest_ptr(&buf->base, &ptr);
146 ptr.offset += arg->offset;
147
148 items[SVGA_VIDEO_ENABLED].value = true;
149 items[SVGA_VIDEO_FLAGS].value = arg->flags;
150 items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
151 items[SVGA_VIDEO_FORMAT].value = arg->format;
152 items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
153 items[SVGA_VIDEO_SIZE].value = arg->size;
154 items[SVGA_VIDEO_WIDTH].value = arg->width;
155 items[SVGA_VIDEO_HEIGHT].value = arg->height;
156 items[SVGA_VIDEO_SRC_X].value = arg->src.x;
157 items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
158 items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
159 items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
160 items[SVGA_VIDEO_DST_X].value = arg->dst.x;
161 items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
162 items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
163 items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
164 items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
165 items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
166 items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
167 if (have_so) {
168 items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId;
169 items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
170 }
171
172 fill_flush(flush, arg->stream_id);
173
174 vmw_fifo_commit(dev_priv, fifo_size);
175
176 return 0;
177}
178
179/**
180 * Send stop command to hw.
181 *
182 * Returns
183 * -ERESTARTSYS if interrupted by a signal.
184 */
185static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
186 uint32_t stream_id,
187 bool interruptible)
188{
189 struct {
190 struct vmw_escape_header escape;
191 SVGAEscapeVideoSetRegs body;
192 struct vmw_escape_video_flush flush;
193 } *cmds;
194 int ret;
195
196 for (;;) {
197 cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
198 if (cmds)
199 break;
200
201 ret = vmw_fallback_wait(dev_priv, false, true, 0,
202 interruptible, 3*HZ);
203 if (interruptible && ret == -ERESTARTSYS)
204 return ret;
205 else
206 BUG_ON(ret != 0);
207 }
208
209 fill_escape(&cmds->escape, sizeof(cmds->body));
210 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
211 cmds->body.header.streamId = stream_id;
212 cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
213 cmds->body.items[0].value = false;
214 fill_flush(&cmds->flush, stream_id);
215
216 vmw_fifo_commit(dev_priv, sizeof(*cmds));
217
218 return 0;
219}
220
221/**
222 * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
223 *
224 * With the introduction of screen objects buffers could now be
225 * used with GMRs instead of being locked to vram.
226 */
227static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
228 struct vmw_dma_buffer *buf,
229 bool pin, bool inter)
230{
231 if (!pin)
232 return vmw_dmabuf_unpin(dev_priv, buf, inter);
233
234 if (dev_priv->active_display_unit == vmw_du_legacy)
235 return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
236
237 return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
238}
239
240/**
241 * Stop or pause a stream.
242 *
243 * If the stream is paused the no evict flag is removed from the buffer
244 * but left in vram. This allows for instance mode_set to evict it
245 * should it need to.
246 *
247 * The caller must hold the overlay lock.
248 *
249 * @stream_id which stream to stop/pause.
250 * @pause true to pause, false to stop completely.
251 */
252static int vmw_overlay_stop(struct vmw_private *dev_priv,
253 uint32_t stream_id, bool pause,
254 bool interruptible)
255{
256 struct vmw_overlay *overlay = dev_priv->overlay_priv;
257 struct vmw_stream *stream = &overlay->stream[stream_id];
258 int ret;
259
260 /* no buffer attached the stream is completely stopped */
261 if (!stream->buf)
262 return 0;
263
264 /* If the stream is paused this is already done */
265 if (!stream->paused) {
266 ret = vmw_overlay_send_stop(dev_priv, stream_id,
267 interruptible);
268 if (ret)
269 return ret;
270
271 /* We just remove the NO_EVICT flag so no -ENOMEM */
272 ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
273 interruptible);
274 if (interruptible && ret == -ERESTARTSYS)
275 return ret;
276 else
277 BUG_ON(ret != 0);
278 }
279
280 if (!pause) {
281 vmw_dmabuf_unreference(&stream->buf);
282 stream->paused = false;
283 } else {
284 stream->paused = true;
285 }
286
287 return 0;
288}
289
290/**
291 * Update a stream and send any put or stop fifo commands needed.
292 *
293 * The caller must hold the overlay lock.
294 *
295 * Returns
296 * -ENOMEM if buffer doesn't fit in vram.
297 * -ERESTARTSYS if interrupted.
298 */
299static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
300 struct vmw_dma_buffer *buf,
301 struct drm_vmw_control_stream_arg *arg,
302 bool interruptible)
303{
304 struct vmw_overlay *overlay = dev_priv->overlay_priv;
305 struct vmw_stream *stream = &overlay->stream[arg->stream_id];
306 int ret = 0;
307
308 if (!buf)
309 return -EINVAL;
310
311 DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
312 stream->buf, buf, stream->paused ? "" : "not ");
313
314 if (stream->buf != buf) {
315 ret = vmw_overlay_stop(dev_priv, arg->stream_id,
316 false, interruptible);
317 if (ret)
318 return ret;
319 } else if (!stream->paused) {
320 /* If the buffers match and not paused then just send
321 * the put command, no need to do anything else.
322 */
323 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
324 if (ret == 0)
325 stream->saved = *arg;
326 else
327 BUG_ON(!interruptible);
328
329 return ret;
330 }
331
332 /* We don't start the old stream if we are interrupted.
333 * Might return -ENOMEM if it can't fit the buffer in vram.
334 */
335 ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
336 if (ret)
337 return ret;
338
339 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
340 if (ret) {
341 /* This one needs to happen no matter what. We only remove
342 * the NO_EVICT flag so this is safe from -ENOMEM.
343 */
344 BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
345 != 0);
346 return ret;
347 }
348
349 if (stream->buf != buf)
350 stream->buf = vmw_dmabuf_reference(buf);
351 stream->saved = *arg;
352 /* stream is no longer stopped/paused */
353 stream->paused = false;
354
355 return 0;
356}
357
358/**
359 * Stop all streams.
360 *
361 * Used by the fb code when starting.
362 *
363 * Takes the overlay lock.
364 */
365int vmw_overlay_stop_all(struct vmw_private *dev_priv)
366{
367 struct vmw_overlay *overlay = dev_priv->overlay_priv;
368 int i, ret;
369
370 if (!overlay)
371 return 0;
372
373 mutex_lock(&overlay->mutex);
374
375 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
376 struct vmw_stream *stream = &overlay->stream[i];
377 if (!stream->buf)
378 continue;
379
380 ret = vmw_overlay_stop(dev_priv, i, false, false);
381 WARN_ON(ret != 0);
382 }
383
384 mutex_unlock(&overlay->mutex);
385
386 return 0;
387}
388
389/**
390 * Try to resume all paused streams.
391 *
392 * Used by the kms code after moving a new scanout buffer to vram.
393 *
394 * Takes the overlay lock.
395 */
396int vmw_overlay_resume_all(struct vmw_private *dev_priv)
397{
398 struct vmw_overlay *overlay = dev_priv->overlay_priv;
399 int i, ret;
400
401 if (!overlay)
402 return 0;
403
404 mutex_lock(&overlay->mutex);
405
406 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
407 struct vmw_stream *stream = &overlay->stream[i];
408 if (!stream->paused)
409 continue;
410
411 ret = vmw_overlay_update_stream(dev_priv, stream->buf,
412 &stream->saved, false);
413 if (ret != 0)
414 DRM_INFO("%s: *warning* failed to resume stream %i\n",
415 __func__, i);
416 }
417
418 mutex_unlock(&overlay->mutex);
419
420 return 0;
421}
422
423/**
424 * Pauses all active streams.
425 *
426 * Used by the kms code when moving a new scanout buffer to vram.
427 *
428 * Takes the overlay lock.
429 */
430int vmw_overlay_pause_all(struct vmw_private *dev_priv)
431{
432 struct vmw_overlay *overlay = dev_priv->overlay_priv;
433 int i, ret;
434
435 if (!overlay)
436 return 0;
437
438 mutex_lock(&overlay->mutex);
439
440 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
441 if (overlay->stream[i].paused)
442 DRM_INFO("%s: *warning* stream %i already paused\n",
443 __func__, i);
444 ret = vmw_overlay_stop(dev_priv, i, true, false);
445 WARN_ON(ret != 0);
446 }
447
448 mutex_unlock(&overlay->mutex);
449
450 return 0;
451}
452
453
454static bool vmw_overlay_available(const struct vmw_private *dev_priv)
455{
456 return (dev_priv->overlay_priv != NULL &&
457 ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
458 VMW_OVERLAY_CAP_MASK));
459}
460
461int vmw_overlay_ioctl(struct drm_device *dev, void *data,
462 struct drm_file *file_priv)
463{
464 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
465 struct vmw_private *dev_priv = vmw_priv(dev);
466 struct vmw_overlay *overlay = dev_priv->overlay_priv;
467 struct drm_vmw_control_stream_arg *arg =
468 (struct drm_vmw_control_stream_arg *)data;
469 struct vmw_dma_buffer *buf;
470 struct vmw_resource *res;
471 int ret;
472
473 if (!vmw_overlay_available(dev_priv))
474 return -ENOSYS;
475
476 ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
477 if (ret)
478 return ret;
479
480 mutex_lock(&overlay->mutex);
481
482 if (!arg->enabled) {
483 ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
484 goto out_unlock;
485 }
486
487 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
488 if (ret)
489 goto out_unlock;
490
491 ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
492
493 vmw_dmabuf_unreference(&buf);
494
495out_unlock:
496 mutex_unlock(&overlay->mutex);
497 vmw_resource_unreference(&res);
498
499 return ret;
500}
501
502int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
503{
504 if (!vmw_overlay_available(dev_priv))
505 return 0;
506
507 return VMW_MAX_NUM_STREAMS;
508}
509
510int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
511{
512 struct vmw_overlay *overlay = dev_priv->overlay_priv;
513 int i, k;
514
515 if (!vmw_overlay_available(dev_priv))
516 return 0;
517
518 mutex_lock(&overlay->mutex);
519
520 for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
521 if (!overlay->stream[i].claimed)
522 k++;
523
524 mutex_unlock(&overlay->mutex);
525
526 return k;
527}
528
529int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
530{
531 struct vmw_overlay *overlay = dev_priv->overlay_priv;
532 int i;
533
534 if (!overlay)
535 return -ENOSYS;
536
537 mutex_lock(&overlay->mutex);
538
539 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
540
541 if (overlay->stream[i].claimed)
542 continue;
543
544 overlay->stream[i].claimed = true;
545 *out = i;
546 mutex_unlock(&overlay->mutex);
547 return 0;
548 }
549
550 mutex_unlock(&overlay->mutex);
551 return -ESRCH;
552}
553
554int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
555{
556 struct vmw_overlay *overlay = dev_priv->overlay_priv;
557
558 BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
559
560 if (!overlay)
561 return -ENOSYS;
562
563 mutex_lock(&overlay->mutex);
564
565 WARN_ON(!overlay->stream[stream_id].claimed);
566 vmw_overlay_stop(dev_priv, stream_id, false, false);
567 overlay->stream[stream_id].claimed = false;
568
569 mutex_unlock(&overlay->mutex);
570 return 0;
571}
572
573int vmw_overlay_init(struct vmw_private *dev_priv)
574{
575 struct vmw_overlay *overlay;
576 int i;
577
578 if (dev_priv->overlay_priv)
579 return -EINVAL;
580
581 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
582 if (!overlay)
583 return -ENOMEM;
584
585 mutex_init(&overlay->mutex);
586 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
587 overlay->stream[i].buf = NULL;
588 overlay->stream[i].paused = false;
589 overlay->stream[i].claimed = false;
590 }
591
592 dev_priv->overlay_priv = overlay;
593
594 return 0;
595}
596
597int vmw_overlay_close(struct vmw_private *dev_priv)
598{
599 struct vmw_overlay *overlay = dev_priv->overlay_priv;
600 bool forgotten_buffer = false;
601 int i;
602
603 if (!overlay)
604 return -ENOSYS;
605
606 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
607 if (overlay->stream[i].buf) {
608 forgotten_buffer = true;
609 vmw_overlay_stop(dev_priv, i, false, false);
610 }
611 }
612
613 WARN_ON(forgotten_buffer);
614
615 dev_priv->overlay_priv = NULL;
616 kfree(overlay);
617
618 return 0;
619}
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <drm/ttm/ttm_placement.h>
29
30#include "device_include/svga_overlay.h"
31#include "device_include/svga_escape.h"
32
33#include "vmwgfx_drv.h"
34
35#define VMW_MAX_NUM_STREAMS 1
36#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
37
38struct vmw_stream {
39 struct vmw_buffer_object *buf;
40 bool claimed;
41 bool paused;
42 struct drm_vmw_control_stream_arg saved;
43};
44
45/*
46 * Overlay control
47 */
48struct vmw_overlay {
49 /*
50 * Each stream is a single overlay. In Xv these are called ports.
51 */
52 struct mutex mutex;
53 struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
54};
55
56static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
57{
58 struct vmw_private *dev_priv = vmw_priv(dev);
59 return dev_priv ? dev_priv->overlay_priv : NULL;
60}
61
62struct vmw_escape_header {
63 uint32_t cmd;
64 SVGAFifoCmdEscape body;
65};
66
67struct vmw_escape_video_flush {
68 struct vmw_escape_header escape;
69 SVGAEscapeVideoFlush flush;
70};
71
72static inline void fill_escape(struct vmw_escape_header *header,
73 uint32_t size)
74{
75 header->cmd = SVGA_CMD_ESCAPE;
76 header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
77 header->body.size = size;
78}
79
80static inline void fill_flush(struct vmw_escape_video_flush *cmd,
81 uint32_t stream_id)
82{
83 fill_escape(&cmd->escape, sizeof(cmd->flush));
84 cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
85 cmd->flush.streamId = stream_id;
86}
87
88/*
89 * Send put command to hw.
90 *
91 * Returns
92 * -ERESTARTSYS if interrupted by a signal.
93 */
94static int vmw_overlay_send_put(struct vmw_private *dev_priv,
95 struct vmw_buffer_object *buf,
96 struct drm_vmw_control_stream_arg *arg,
97 bool interruptible)
98{
99 struct vmw_escape_video_flush *flush;
100 size_t fifo_size;
101 bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
102 int i, num_items;
103 SVGAGuestPtr ptr;
104
105 struct {
106 struct vmw_escape_header escape;
107 struct {
108 uint32_t cmdType;
109 uint32_t streamId;
110 } header;
111 } *cmds;
112 struct {
113 uint32_t registerId;
114 uint32_t value;
115 } *items;
116
117 /* defines are a index needs + 1 */
118 if (have_so)
119 num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
120 else
121 num_items = SVGA_VIDEO_PITCH_3 + 1;
122
123 fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
124
125 cmds = VMW_CMD_RESERVE(dev_priv, fifo_size);
126 /* hardware has hung, can't do anything here */
127 if (!cmds)
128 return -ENOMEM;
129
130 items = (typeof(items))&cmds[1];
131 flush = (struct vmw_escape_video_flush *)&items[num_items];
132
133 /* the size is header + number of items */
134 fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
135
136 cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
137 cmds->header.streamId = arg->stream_id;
138
139 /* the IDs are neatly numbered */
140 for (i = 0; i < num_items; i++)
141 items[i].registerId = i;
142
143 vmw_bo_get_guest_ptr(&buf->base, &ptr);
144 ptr.offset += arg->offset;
145
146 items[SVGA_VIDEO_ENABLED].value = true;
147 items[SVGA_VIDEO_FLAGS].value = arg->flags;
148 items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
149 items[SVGA_VIDEO_FORMAT].value = arg->format;
150 items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
151 items[SVGA_VIDEO_SIZE].value = arg->size;
152 items[SVGA_VIDEO_WIDTH].value = arg->width;
153 items[SVGA_VIDEO_HEIGHT].value = arg->height;
154 items[SVGA_VIDEO_SRC_X].value = arg->src.x;
155 items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
156 items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
157 items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
158 items[SVGA_VIDEO_DST_X].value = arg->dst.x;
159 items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
160 items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
161 items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
162 items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
163 items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
164 items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
165 if (have_so) {
166 items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId;
167 items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
168 }
169
170 fill_flush(flush, arg->stream_id);
171
172 vmw_cmd_commit(dev_priv, fifo_size);
173
174 return 0;
175}
176
177/*
178 * Send stop command to hw.
179 *
180 * Returns
181 * -ERESTARTSYS if interrupted by a signal.
182 */
183static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
184 uint32_t stream_id,
185 bool interruptible)
186{
187 struct {
188 struct vmw_escape_header escape;
189 SVGAEscapeVideoSetRegs body;
190 struct vmw_escape_video_flush flush;
191 } *cmds;
192 int ret;
193
194 for (;;) {
195 cmds = VMW_CMD_RESERVE(dev_priv, sizeof(*cmds));
196 if (cmds)
197 break;
198
199 ret = vmw_fallback_wait(dev_priv, false, true, 0,
200 interruptible, 3*HZ);
201 if (interruptible && ret == -ERESTARTSYS)
202 return ret;
203 else
204 BUG_ON(ret != 0);
205 }
206
207 fill_escape(&cmds->escape, sizeof(cmds->body));
208 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
209 cmds->body.header.streamId = stream_id;
210 cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
211 cmds->body.items[0].value = false;
212 fill_flush(&cmds->flush, stream_id);
213
214 vmw_cmd_commit(dev_priv, sizeof(*cmds));
215
216 return 0;
217}
218
219/*
220 * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
221 *
222 * With the introduction of screen objects buffers could now be
223 * used with GMRs instead of being locked to vram.
224 */
225static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
226 struct vmw_buffer_object *buf,
227 bool pin, bool inter)
228{
229 if (!pin)
230 return vmw_bo_unpin(dev_priv, buf, inter);
231
232 if (dev_priv->active_display_unit == vmw_du_legacy)
233 return vmw_bo_pin_in_vram(dev_priv, buf, inter);
234
235 return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter);
236}
237
238/*
239 * Stop or pause a stream.
240 *
241 * If the stream is paused the no evict flag is removed from the buffer
242 * but left in vram. This allows for instance mode_set to evict it
243 * should it need to.
244 *
245 * The caller must hold the overlay lock.
246 *
247 * @stream_id which stream to stop/pause.
248 * @pause true to pause, false to stop completely.
249 */
250static int vmw_overlay_stop(struct vmw_private *dev_priv,
251 uint32_t stream_id, bool pause,
252 bool interruptible)
253{
254 struct vmw_overlay *overlay = dev_priv->overlay_priv;
255 struct vmw_stream *stream = &overlay->stream[stream_id];
256 int ret;
257
258 /* no buffer attached the stream is completely stopped */
259 if (!stream->buf)
260 return 0;
261
262 /* If the stream is paused this is already done */
263 if (!stream->paused) {
264 ret = vmw_overlay_send_stop(dev_priv, stream_id,
265 interruptible);
266 if (ret)
267 return ret;
268
269 /* We just remove the NO_EVICT flag so no -ENOMEM */
270 ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
271 interruptible);
272 if (interruptible && ret == -ERESTARTSYS)
273 return ret;
274 else
275 BUG_ON(ret != 0);
276 }
277
278 if (!pause) {
279 vmw_bo_unreference(&stream->buf);
280 stream->paused = false;
281 } else {
282 stream->paused = true;
283 }
284
285 return 0;
286}
287
288/*
289 * Update a stream and send any put or stop fifo commands needed.
290 *
291 * The caller must hold the overlay lock.
292 *
293 * Returns
294 * -ENOMEM if buffer doesn't fit in vram.
295 * -ERESTARTSYS if interrupted.
296 */
297static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
298 struct vmw_buffer_object *buf,
299 struct drm_vmw_control_stream_arg *arg,
300 bool interruptible)
301{
302 struct vmw_overlay *overlay = dev_priv->overlay_priv;
303 struct vmw_stream *stream = &overlay->stream[arg->stream_id];
304 int ret = 0;
305
306 if (!buf)
307 return -EINVAL;
308
309 DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
310 stream->buf, buf, stream->paused ? "" : "not ");
311
312 if (stream->buf != buf) {
313 ret = vmw_overlay_stop(dev_priv, arg->stream_id,
314 false, interruptible);
315 if (ret)
316 return ret;
317 } else if (!stream->paused) {
318 /* If the buffers match and not paused then just send
319 * the put command, no need to do anything else.
320 */
321 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
322 if (ret == 0)
323 stream->saved = *arg;
324 else
325 BUG_ON(!interruptible);
326
327 return ret;
328 }
329
330 /* We don't start the old stream if we are interrupted.
331 * Might return -ENOMEM if it can't fit the buffer in vram.
332 */
333 ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
334 if (ret)
335 return ret;
336
337 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
338 if (ret) {
339 /* This one needs to happen no matter what. We only remove
340 * the NO_EVICT flag so this is safe from -ENOMEM.
341 */
342 BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
343 != 0);
344 return ret;
345 }
346
347 if (stream->buf != buf)
348 stream->buf = vmw_bo_reference(buf);
349 stream->saved = *arg;
350 /* stream is no longer stopped/paused */
351 stream->paused = false;
352
353 return 0;
354}
355
356/*
357 * Try to resume all paused streams.
358 *
359 * Used by the kms code after moving a new scanout buffer to vram.
360 *
361 * Takes the overlay lock.
362 */
363int vmw_overlay_resume_all(struct vmw_private *dev_priv)
364{
365 struct vmw_overlay *overlay = dev_priv->overlay_priv;
366 int i, ret;
367
368 if (!overlay)
369 return 0;
370
371 mutex_lock(&overlay->mutex);
372
373 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
374 struct vmw_stream *stream = &overlay->stream[i];
375 if (!stream->paused)
376 continue;
377
378 ret = vmw_overlay_update_stream(dev_priv, stream->buf,
379 &stream->saved, false);
380 if (ret != 0)
381 DRM_INFO("%s: *warning* failed to resume stream %i\n",
382 __func__, i);
383 }
384
385 mutex_unlock(&overlay->mutex);
386
387 return 0;
388}
389
390/*
391 * Pauses all active streams.
392 *
393 * Used by the kms code when moving a new scanout buffer to vram.
394 *
395 * Takes the overlay lock.
396 */
397int vmw_overlay_pause_all(struct vmw_private *dev_priv)
398{
399 struct vmw_overlay *overlay = dev_priv->overlay_priv;
400 int i, ret;
401
402 if (!overlay)
403 return 0;
404
405 mutex_lock(&overlay->mutex);
406
407 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
408 if (overlay->stream[i].paused)
409 DRM_INFO("%s: *warning* stream %i already paused\n",
410 __func__, i);
411 ret = vmw_overlay_stop(dev_priv, i, true, false);
412 WARN_ON(ret != 0);
413 }
414
415 mutex_unlock(&overlay->mutex);
416
417 return 0;
418}
419
420
421static bool vmw_overlay_available(const struct vmw_private *dev_priv)
422{
423 return (dev_priv->overlay_priv != NULL &&
424 ((vmw_fifo_caps(dev_priv) & VMW_OVERLAY_CAP_MASK) ==
425 VMW_OVERLAY_CAP_MASK));
426}
427
428int vmw_overlay_ioctl(struct drm_device *dev, void *data,
429 struct drm_file *file_priv)
430{
431 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
432 struct vmw_private *dev_priv = vmw_priv(dev);
433 struct vmw_overlay *overlay = dev_priv->overlay_priv;
434 struct drm_vmw_control_stream_arg *arg =
435 (struct drm_vmw_control_stream_arg *)data;
436 struct vmw_buffer_object *buf;
437 struct vmw_resource *res;
438 int ret;
439
440 if (!vmw_overlay_available(dev_priv))
441 return -ENOSYS;
442
443 ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
444 if (ret)
445 return ret;
446
447 mutex_lock(&overlay->mutex);
448
449 if (!arg->enabled) {
450 ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
451 goto out_unlock;
452 }
453
454 ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL);
455 if (ret)
456 goto out_unlock;
457
458 ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
459
460 vmw_bo_unreference(&buf);
461
462out_unlock:
463 mutex_unlock(&overlay->mutex);
464 vmw_resource_unreference(&res);
465
466 return ret;
467}
468
469int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
470{
471 if (!vmw_overlay_available(dev_priv))
472 return 0;
473
474 return VMW_MAX_NUM_STREAMS;
475}
476
477int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
478{
479 struct vmw_overlay *overlay = dev_priv->overlay_priv;
480 int i, k;
481
482 if (!vmw_overlay_available(dev_priv))
483 return 0;
484
485 mutex_lock(&overlay->mutex);
486
487 for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
488 if (!overlay->stream[i].claimed)
489 k++;
490
491 mutex_unlock(&overlay->mutex);
492
493 return k;
494}
495
496int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
497{
498 struct vmw_overlay *overlay = dev_priv->overlay_priv;
499 int i;
500
501 if (!overlay)
502 return -ENOSYS;
503
504 mutex_lock(&overlay->mutex);
505
506 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
507
508 if (overlay->stream[i].claimed)
509 continue;
510
511 overlay->stream[i].claimed = true;
512 *out = i;
513 mutex_unlock(&overlay->mutex);
514 return 0;
515 }
516
517 mutex_unlock(&overlay->mutex);
518 return -ESRCH;
519}
520
521int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
522{
523 struct vmw_overlay *overlay = dev_priv->overlay_priv;
524
525 BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
526
527 if (!overlay)
528 return -ENOSYS;
529
530 mutex_lock(&overlay->mutex);
531
532 WARN_ON(!overlay->stream[stream_id].claimed);
533 vmw_overlay_stop(dev_priv, stream_id, false, false);
534 overlay->stream[stream_id].claimed = false;
535
536 mutex_unlock(&overlay->mutex);
537 return 0;
538}
539
540int vmw_overlay_init(struct vmw_private *dev_priv)
541{
542 struct vmw_overlay *overlay;
543 int i;
544
545 if (dev_priv->overlay_priv)
546 return -EINVAL;
547
548 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
549 if (!overlay)
550 return -ENOMEM;
551
552 mutex_init(&overlay->mutex);
553 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
554 overlay->stream[i].buf = NULL;
555 overlay->stream[i].paused = false;
556 overlay->stream[i].claimed = false;
557 }
558
559 dev_priv->overlay_priv = overlay;
560
561 return 0;
562}
563
564int vmw_overlay_close(struct vmw_private *dev_priv)
565{
566 struct vmw_overlay *overlay = dev_priv->overlay_priv;
567 bool forgotten_buffer = false;
568 int i;
569
570 if (!overlay)
571 return -ENOSYS;
572
573 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
574 if (overlay->stream[i].buf) {
575 forgotten_buffer = true;
576 vmw_overlay_stop(dev_priv, i, false, false);
577 }
578 }
579
580 WARN_ON(forgotten_buffer);
581
582 dev_priv->overlay_priv = NULL;
583 kfree(overlay);
584
585 return 0;
586}