Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/**************************************************************************
  3 *
  4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27#include "vmwgfx_bo.h"
 28#include "vmwgfx_drv.h"
 29#include "vmwgfx_devcaps.h"
 30
 31#include <drm/ttm/ttm_placement.h>
 32
 33#include <linux/sched/signal.h>
 34#include <linux/vmalloc.h>
 35
 36bool vmw_supports_3d(struct vmw_private *dev_priv)
 37{
 38	uint32_t fifo_min, hwversion;
 39	const struct vmw_fifo_state *fifo = dev_priv->fifo;
 40
 41	if (!(dev_priv->capabilities & SVGA_CAP_3D))
 42		return false;
 43
 44	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
 45		uint32_t result;
 46
 47		if (!dev_priv->has_mob)
 48			return false;
 49
 50		result = vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_3D);
 51
 52		return (result != 0);
 53	}
 54
 55	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
 56		return false;
 57
 58	BUG_ON(vmw_is_svga_v3(dev_priv));
 59
 60	fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
 61	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
 62		return false;
 63
 64	hwversion = vmw_fifo_mem_read(dev_priv,
 65				      ((fifo->capabilities &
 66					SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
 67					       SVGA_FIFO_3D_HWVERSION_REVISED :
 68					       SVGA_FIFO_3D_HWVERSION));
 69
 70	if (hwversion == 0)
 71		return false;
 72
 73	if (hwversion < SVGA3D_HWVERSION_WS8_B1)
 74		return false;
 75
 76	/* Legacy Display Unit does not support surfaces */
 77	if (dev_priv->active_display_unit == vmw_du_legacy)
 78		return false;
 79
 80	return true;
 81}
 82
 83bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
 84{
 85	uint32_t caps;
 86
 87	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
 88		return false;
 89
 90	caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
 91	if (caps & SVGA_FIFO_CAP_PITCHLOCK)
 92		return true;
 93
 94	return false;
 95}
 96
 97struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
 98{
 99	struct vmw_fifo_state *fifo;
100	uint32_t max;
101	uint32_t min;
102
103	if (!dev_priv->fifo_mem)
104		return NULL;
105
106	fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
107	if (!fifo)
108		return ERR_PTR(-ENOMEM);
109	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
110	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
111	if (unlikely(fifo->static_buffer == NULL)) {
112		kfree(fifo);
113		return ERR_PTR(-ENOMEM);
114	}
115
116	fifo->dynamic_buffer = NULL;
117	fifo->reserved_size = 0;
118	fifo->using_bounce_buffer = false;
119
120	mutex_init(&fifo->fifo_mutex);
121	init_rwsem(&fifo->rwsem);
122	min = 4;
123	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
124		min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
125	min <<= 2;
126
127	if (min < PAGE_SIZE)
128		min = PAGE_SIZE;
129
130	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min);
131	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size);
132	wmb();
133	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min);
134	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min);
135	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0);
136	mb();
137
138	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
139
140	max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
141	min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
142	fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
143
144	drm_info(&dev_priv->drm,
145		 "Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
146		 (unsigned int) max,
147		 (unsigned int) min,
148		 (unsigned int) fifo->capabilities);
149
150	if (unlikely(min >= max)) {
151		drm_warn(&dev_priv->drm,
152			 "FIFO memory is not usable. Driver failed to initialize.");
153		return ERR_PTR(-ENXIO);
154	}
155
156	return fifo;
157}
158
159void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
160{
161	u32 *fifo_mem = dev_priv->fifo_mem;
162	if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
163		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
164
165}
166
167void vmw_fifo_destroy(struct vmw_private *dev_priv)
168{
169	struct vmw_fifo_state *fifo = dev_priv->fifo;
170
171	if (!fifo)
172		return;
173
174	if (likely(fifo->static_buffer != NULL)) {
175		vfree(fifo->static_buffer);
176		fifo->static_buffer = NULL;
177	}
178
179	if (likely(fifo->dynamic_buffer != NULL)) {
180		vfree(fifo->dynamic_buffer);
181		fifo->dynamic_buffer = NULL;
182	}
183	kfree(fifo);
184	dev_priv->fifo = NULL;
185}
186
187static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
188{
189	uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
190	uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
191	uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
192	uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
193
194	return ((max - next_cmd) + (stop - min) <= bytes);
195}
196
197static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
198			       uint32_t bytes, bool interruptible,
199			       unsigned long timeout)
200{
201	int ret = 0;
202	unsigned long end_jiffies = jiffies + timeout;
203	DEFINE_WAIT(__wait);
204
205	DRM_INFO("Fifo wait noirq.\n");
206
207	for (;;) {
208		prepare_to_wait(&dev_priv->fifo_queue, &__wait,
209				(interruptible) ?
210				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
211		if (!vmw_fifo_is_full(dev_priv, bytes))
212			break;
213		if (time_after_eq(jiffies, end_jiffies)) {
214			ret = -EBUSY;
215			DRM_ERROR("SVGA device lockup.\n");
216			break;
217		}
218		schedule_timeout(1);
219		if (interruptible && signal_pending(current)) {
220			ret = -ERESTARTSYS;
221			break;
222		}
223	}
224	finish_wait(&dev_priv->fifo_queue, &__wait);
225	wake_up_all(&dev_priv->fifo_queue);
226	DRM_INFO("Fifo noirq exit.\n");
227	return ret;
228}
229
230static int vmw_fifo_wait(struct vmw_private *dev_priv,
231			 uint32_t bytes, bool interruptible,
232			 unsigned long timeout)
233{
234	long ret = 1L;
235
236	if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
237		return 0;
238
239	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
240	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
241		return vmw_fifo_wait_noirq(dev_priv, bytes,
242					   interruptible, timeout);
243
244	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
245			       &dev_priv->fifo_queue_waiters);
246
247	if (interruptible)
248		ret = wait_event_interruptible_timeout
249		    (dev_priv->fifo_queue,
250		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
251	else
252		ret = wait_event_timeout
253		    (dev_priv->fifo_queue,
254		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
255
256	if (unlikely(ret == 0))
257		ret = -EBUSY;
258	else if (likely(ret > 0))
259		ret = 0;
260
261	vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
262				  &dev_priv->fifo_queue_waiters);
263
264	return ret;
265}
266
267/*
268 * Reserve @bytes number of bytes in the fifo.
269 *
270 * This function will return NULL (error) on two conditions:
271 *  If it timeouts waiting for fifo space, or if @bytes is larger than the
272 *   available fifo space.
273 *
274 * Returns:
275 *   Pointer to the fifo, or null on error (possible hardware hang).
276 */
277static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
278				    uint32_t bytes)
279{
280	struct vmw_fifo_state *fifo_state = dev_priv->fifo;
281	u32  *fifo_mem = dev_priv->fifo_mem;
282	uint32_t max;
283	uint32_t min;
284	uint32_t next_cmd;
285	uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
286	int ret;
287
288	mutex_lock(&fifo_state->fifo_mutex);
289	max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
290	min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
291	next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
292
293	if (unlikely(bytes >= (max - min)))
294		goto out_err;
295
296	BUG_ON(fifo_state->reserved_size != 0);
297	BUG_ON(fifo_state->dynamic_buffer != NULL);
298
299	fifo_state->reserved_size = bytes;
300
301	while (1) {
302		uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
303		bool need_bounce = false;
304		bool reserve_in_place = false;
305
306		if (next_cmd >= stop) {
307			if (likely((next_cmd + bytes < max ||
308				    (next_cmd + bytes == max && stop > min))))
309				reserve_in_place = true;
310
311			else if (vmw_fifo_is_full(dev_priv, bytes)) {
312				ret = vmw_fifo_wait(dev_priv, bytes,
313						    false, 3 * HZ);
314				if (unlikely(ret != 0))
315					goto out_err;
316			} else
317				need_bounce = true;
318
319		} else {
320
321			if (likely((next_cmd + bytes < stop)))
322				reserve_in_place = true;
323			else {
324				ret = vmw_fifo_wait(dev_priv, bytes,
325						    false, 3 * HZ);
326				if (unlikely(ret != 0))
327					goto out_err;
328			}
329		}
330
331		if (reserve_in_place) {
332			if (reserveable || bytes <= sizeof(uint32_t)) {
333				fifo_state->using_bounce_buffer = false;
334
335				if (reserveable)
336					vmw_fifo_mem_write(dev_priv,
337							   SVGA_FIFO_RESERVED,
338							   bytes);
339				return (void __force *) (fifo_mem +
340							 (next_cmd >> 2));
341			} else {
342				need_bounce = true;
343			}
344		}
345
346		if (need_bounce) {
347			fifo_state->using_bounce_buffer = true;
348			if (bytes < fifo_state->static_buffer_size)
349				return fifo_state->static_buffer;
350			else {
351				fifo_state->dynamic_buffer = vmalloc(bytes);
352				if (!fifo_state->dynamic_buffer)
353					goto out_err;
354				return fifo_state->dynamic_buffer;
355			}
356		}
357	}
358out_err:
359	fifo_state->reserved_size = 0;
360	mutex_unlock(&fifo_state->fifo_mutex);
361
362	return NULL;
363}
364
365void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes,
366			  int ctx_id)
367{
368	void *ret;
369
370	if (dev_priv->cman)
371		ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
372					 ctx_id, false, NULL);
373	else if (ctx_id == SVGA3D_INVALID_ID)
374		ret = vmw_local_fifo_reserve(dev_priv, bytes);
375	else {
376		WARN(1, "Command buffer has not been allocated.\n");
377		ret = NULL;
378	}
379	if (IS_ERR_OR_NULL(ret))
380		return NULL;
381
382	return ret;
383}
384
385static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
386			      struct vmw_private *vmw,
387			      uint32_t next_cmd,
388			      uint32_t max, uint32_t min, uint32_t bytes)
389{
390	u32 *fifo_mem = vmw->fifo_mem;
391	uint32_t chunk_size = max - next_cmd;
392	uint32_t rest;
393	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
394	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
395
396	if (bytes < chunk_size)
397		chunk_size = bytes;
398
399	vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes);
400	mb();
401	memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
402	rest = bytes - chunk_size;
403	if (rest)
404		memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
405}
406
407static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
408			       struct vmw_private *vmw,
409			       uint32_t next_cmd,
410			       uint32_t max, uint32_t min, uint32_t bytes)
411{
412	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
413	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
414
415	while (bytes > 0) {
416		vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++);
417		next_cmd += sizeof(uint32_t);
418		if (unlikely(next_cmd == max))
419			next_cmd = min;
420		mb();
421		vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd);
422		mb();
423		bytes -= sizeof(uint32_t);
424	}
425}
426
427static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
428{
429	struct vmw_fifo_state *fifo_state = dev_priv->fifo;
430	uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
431	uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
432	uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
433	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
434
435	BUG_ON((bytes & 3) != 0);
436	BUG_ON(bytes > fifo_state->reserved_size);
437
438	fifo_state->reserved_size = 0;
439
440	if (fifo_state->using_bounce_buffer) {
441		if (reserveable)
442			vmw_fifo_res_copy(fifo_state, dev_priv,
443					  next_cmd, max, min, bytes);
444		else
445			vmw_fifo_slow_copy(fifo_state, dev_priv,
446					   next_cmd, max, min, bytes);
447
448		if (fifo_state->dynamic_buffer) {
449			vfree(fifo_state->dynamic_buffer);
450			fifo_state->dynamic_buffer = NULL;
451		}
452
453	}
454
455	down_write(&fifo_state->rwsem);
456	if (fifo_state->using_bounce_buffer || reserveable) {
457		next_cmd += bytes;
458		if (next_cmd >= max)
459			next_cmd -= max - min;
460		mb();
461		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd);
462	}
463
464	if (reserveable)
465		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0);
466	mb();
467	up_write(&fifo_state->rwsem);
468	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
469	mutex_unlock(&fifo_state->fifo_mutex);
470}
471
472void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
473{
474	if (dev_priv->cman)
475		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
476	else
477		vmw_local_fifo_commit(dev_priv, bytes);
478}
479
480
481/**
482 * vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands.
483 *
484 * @dev_priv: Pointer to device private structure.
485 * @bytes: Number of bytes to commit.
486 */
487void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
488{
489	if (dev_priv->cman)
490		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
491	else
492		vmw_local_fifo_commit(dev_priv, bytes);
493}
494
495/**
496 * vmw_cmd_flush - Flush any buffered commands and make sure command processing
497 * starts.
498 *
499 * @dev_priv: Pointer to device private structure.
500 * @interruptible: Whether to wait interruptible if function needs to sleep.
501 */
502int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
503{
504	might_sleep();
505
506	if (dev_priv->cman)
507		return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
508	else
509		return 0;
510}
511
512int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
513{
514	struct svga_fifo_cmd_fence *cmd_fence;
515	u32 *fm;
516	int ret = 0;
517	uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
518
519	fm = VMW_CMD_RESERVE(dev_priv, bytes);
520	if (unlikely(fm == NULL)) {
521		*seqno = atomic_read(&dev_priv->marker_seq);
522		ret = -ENOMEM;
523		(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
524					false, 3*HZ);
525		goto out_err;
526	}
527
528	do {
529		*seqno = atomic_add_return(1, &dev_priv->marker_seq);
530	} while (*seqno == 0);
531
532	if (!vmw_has_fences(dev_priv)) {
533
534		/*
535		 * Don't request hardware to send a fence. The
536		 * waiting code in vmwgfx_irq.c will emulate this.
537		 */
538
539		vmw_cmd_commit(dev_priv, 0);
540		return 0;
541	}
542
543	*fm++ = SVGA_CMD_FENCE;
544	cmd_fence = (struct svga_fifo_cmd_fence *) fm;
545	cmd_fence->fence = *seqno;
546	vmw_cmd_commit_flush(dev_priv, bytes);
547	vmw_update_seqno(dev_priv);
548
549out_err:
550	return ret;
551}
552
553/**
554 * vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using
555 * legacy query commands.
556 *
557 * @dev_priv: The device private structure.
558 * @cid: The hardware context id used for the query.
559 *
560 * See the vmw_cmd_emit_dummy_query documentation.
561 */
562static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
563					    uint32_t cid)
564{
565	/*
566	 * A query wait without a preceding query end will
567	 * actually finish all queries for this cid
568	 * without writing to the query result structure.
569	 */
570
571	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
572	struct {
573		SVGA3dCmdHeader header;
574		SVGA3dCmdWaitForQuery body;
575	} *cmd;
576
577	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
578	if (unlikely(cmd == NULL))
579		return -ENOMEM;
580
581	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
582	cmd->header.size = sizeof(cmd->body);
583	cmd->body.cid = cid;
584	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
585
586	if (bo->resource->mem_type == TTM_PL_VRAM) {
587		cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
588		cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
589	} else {
590		cmd->body.guestResult.gmrId = bo->resource->start;
591		cmd->body.guestResult.offset = 0;
592	}
593
594	vmw_cmd_commit(dev_priv, sizeof(*cmd));
595
596	return 0;
597}
598
599/**
600 * vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using
601 * guest-backed resource query commands.
602 *
603 * @dev_priv: The device private structure.
604 * @cid: The hardware context id used for the query.
605 *
606 * See the vmw_cmd_emit_dummy_query documentation.
607 */
608static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
609				       uint32_t cid)
610{
611	/*
612	 * A query wait without a preceding query end will
613	 * actually finish all queries for this cid
614	 * without writing to the query result structure.
615	 */
616
617	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
618	struct {
619		SVGA3dCmdHeader header;
620		SVGA3dCmdWaitForGBQuery body;
621	} *cmd;
622
623	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
624	if (unlikely(cmd == NULL))
625		return -ENOMEM;
626
627	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
628	cmd->header.size = sizeof(cmd->body);
629	cmd->body.cid = cid;
630	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
631	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
632	cmd->body.mobid = bo->resource->start;
633	cmd->body.offset = 0;
634
635	vmw_cmd_commit(dev_priv, sizeof(*cmd));
636
637	return 0;
638}
639
640
641/**
642 * vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using
643 * appropriate resource query commands.
644 *
645 * @dev_priv: The device private structure.
646 * @cid: The hardware context id used for the query.
647 *
648 * This function is used to emit a dummy occlusion query with
649 * no primitives rendered between query begin and query end.
650 * It's used to provide a query barrier, in order to know that when
651 * this query is finished, all preceding queries are also finished.
652 *
653 * A Query results structure should have been initialized at the start
654 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
655 * must also be either reserved or pinned when this function is called.
656 *
657 * Returns -ENOMEM on failure to reserve fifo space.
658 */
659int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
660			      uint32_t cid)
661{
662	if (dev_priv->has_mob)
663		return vmw_cmd_emit_dummy_gb_query(dev_priv, cid);
664
665	return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid);
666}
667
668
669/**
670 * vmw_cmd_supported - returns true if the given device supports
671 * command queues.
672 *
673 * @vmw: The device private structure.
674 *
675 * Returns true if we can issue commands.
676 */
677bool vmw_cmd_supported(struct vmw_private *vmw)
678{
679	bool has_cmdbufs =
680		(vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
681				      SVGA_CAP_CMD_BUFFERS_2)) != 0;
682	if (vmw_is_svga_v3(vmw))
683		return (has_cmdbufs &&
684			(vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0);
685	/*
686	 * We have FIFO cmd's
687	 */
688	return has_cmdbufs || vmw->fifo_mem != NULL;
689}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/**************************************************************************
  3 *
  4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27#include "vmwgfx_bo.h"
 28#include "vmwgfx_drv.h"
 29#include "vmwgfx_devcaps.h"
 30
 31#include <drm/ttm/ttm_placement.h>
 32
 33#include <linux/sched/signal.h>
 
 34
 35bool vmw_supports_3d(struct vmw_private *dev_priv)
 36{
 37	uint32_t fifo_min, hwversion;
 38	const struct vmw_fifo_state *fifo = dev_priv->fifo;
 39
 40	if (!(dev_priv->capabilities & SVGA_CAP_3D))
 41		return false;
 42
 43	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
 44		uint32_t result;
 45
 46		if (!dev_priv->has_mob)
 47			return false;
 48
 49		result = vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_3D);
 50
 51		return (result != 0);
 52	}
 53
 54	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
 55		return false;
 56
 57	BUG_ON(vmw_is_svga_v3(dev_priv));
 58
 59	fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
 60	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
 61		return false;
 62
 63	hwversion = vmw_fifo_mem_read(dev_priv,
 64				      ((fifo->capabilities &
 65					SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
 66					       SVGA_FIFO_3D_HWVERSION_REVISED :
 67					       SVGA_FIFO_3D_HWVERSION));
 68
 69	if (hwversion == 0)
 70		return false;
 71
 72	if (hwversion < SVGA3D_HWVERSION_WS8_B1)
 73		return false;
 74
 75	/* Legacy Display Unit does not support surfaces */
 76	if (dev_priv->active_display_unit == vmw_du_legacy)
 77		return false;
 78
 79	return true;
 80}
 81
 82bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
 83{
 84	uint32_t caps;
 85
 86	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
 87		return false;
 88
 89	caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
 90	if (caps & SVGA_FIFO_CAP_PITCHLOCK)
 91		return true;
 92
 93	return false;
 94}
 95
 96struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
 97{
 98	struct vmw_fifo_state *fifo;
 99	uint32_t max;
100	uint32_t min;
101
102	if (!dev_priv->fifo_mem)
103		return NULL;
104
105	fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
106	if (!fifo)
107		return ERR_PTR(-ENOMEM);
108	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
109	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
110	if (unlikely(fifo->static_buffer == NULL)) {
111		kfree(fifo);
112		return ERR_PTR(-ENOMEM);
113	}
114
115	fifo->dynamic_buffer = NULL;
116	fifo->reserved_size = 0;
117	fifo->using_bounce_buffer = false;
118
119	mutex_init(&fifo->fifo_mutex);
120	init_rwsem(&fifo->rwsem);
121	min = 4;
122	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
123		min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
124	min <<= 2;
125
126	if (min < PAGE_SIZE)
127		min = PAGE_SIZE;
128
129	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min);
130	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size);
131	wmb();
132	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min);
133	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min);
134	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0);
135	mb();
136
137	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
138
139	max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
140	min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
141	fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
142
143	drm_info(&dev_priv->drm,
144		 "Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
145		 (unsigned int) max,
146		 (unsigned int) min,
147		 (unsigned int) fifo->capabilities);
148
149	if (unlikely(min >= max)) {
150		drm_warn(&dev_priv->drm,
151			 "FIFO memory is not usable. Driver failed to initialize.");
152		return ERR_PTR(-ENXIO);
153	}
154
155	return fifo;
156}
157
158void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
159{
160	u32 *fifo_mem = dev_priv->fifo_mem;
161	if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
162		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
163
164}
165
166void vmw_fifo_destroy(struct vmw_private *dev_priv)
167{
168	struct vmw_fifo_state *fifo = dev_priv->fifo;
169
170	if (!fifo)
171		return;
172
173	if (likely(fifo->static_buffer != NULL)) {
174		vfree(fifo->static_buffer);
175		fifo->static_buffer = NULL;
176	}
177
178	if (likely(fifo->dynamic_buffer != NULL)) {
179		vfree(fifo->dynamic_buffer);
180		fifo->dynamic_buffer = NULL;
181	}
182	kfree(fifo);
183	dev_priv->fifo = NULL;
184}
185
186static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
187{
188	uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
189	uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
190	uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
191	uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
192
193	return ((max - next_cmd) + (stop - min) <= bytes);
194}
195
196static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
197			       uint32_t bytes, bool interruptible,
198			       unsigned long timeout)
199{
200	int ret = 0;
201	unsigned long end_jiffies = jiffies + timeout;
202	DEFINE_WAIT(__wait);
203
204	DRM_INFO("Fifo wait noirq.\n");
205
206	for (;;) {
207		prepare_to_wait(&dev_priv->fifo_queue, &__wait,
208				(interruptible) ?
209				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
210		if (!vmw_fifo_is_full(dev_priv, bytes))
211			break;
212		if (time_after_eq(jiffies, end_jiffies)) {
213			ret = -EBUSY;
214			DRM_ERROR("SVGA device lockup.\n");
215			break;
216		}
217		schedule_timeout(1);
218		if (interruptible && signal_pending(current)) {
219			ret = -ERESTARTSYS;
220			break;
221		}
222	}
223	finish_wait(&dev_priv->fifo_queue, &__wait);
224	wake_up_all(&dev_priv->fifo_queue);
225	DRM_INFO("Fifo noirq exit.\n");
226	return ret;
227}
228
229static int vmw_fifo_wait(struct vmw_private *dev_priv,
230			 uint32_t bytes, bool interruptible,
231			 unsigned long timeout)
232{
233	long ret = 1L;
234
235	if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
236		return 0;
237
238	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
239	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
240		return vmw_fifo_wait_noirq(dev_priv, bytes,
241					   interruptible, timeout);
242
243	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
244			       &dev_priv->fifo_queue_waiters);
245
246	if (interruptible)
247		ret = wait_event_interruptible_timeout
248		    (dev_priv->fifo_queue,
249		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
250	else
251		ret = wait_event_timeout
252		    (dev_priv->fifo_queue,
253		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
254
255	if (unlikely(ret == 0))
256		ret = -EBUSY;
257	else if (likely(ret > 0))
258		ret = 0;
259
260	vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
261				  &dev_priv->fifo_queue_waiters);
262
263	return ret;
264}
265
266/*
267 * Reserve @bytes number of bytes in the fifo.
268 *
269 * This function will return NULL (error) on two conditions:
270 *  If it timeouts waiting for fifo space, or if @bytes is larger than the
271 *   available fifo space.
272 *
273 * Returns:
274 *   Pointer to the fifo, or null on error (possible hardware hang).
275 */
276static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
277				    uint32_t bytes)
278{
279	struct vmw_fifo_state *fifo_state = dev_priv->fifo;
280	u32  *fifo_mem = dev_priv->fifo_mem;
281	uint32_t max;
282	uint32_t min;
283	uint32_t next_cmd;
284	uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
285	int ret;
286
287	mutex_lock(&fifo_state->fifo_mutex);
288	max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
289	min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
290	next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
291
292	if (unlikely(bytes >= (max - min)))
293		goto out_err;
294
295	BUG_ON(fifo_state->reserved_size != 0);
296	BUG_ON(fifo_state->dynamic_buffer != NULL);
297
298	fifo_state->reserved_size = bytes;
299
300	while (1) {
301		uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
302		bool need_bounce = false;
303		bool reserve_in_place = false;
304
305		if (next_cmd >= stop) {
306			if (likely((next_cmd + bytes < max ||
307				    (next_cmd + bytes == max && stop > min))))
308				reserve_in_place = true;
309
310			else if (vmw_fifo_is_full(dev_priv, bytes)) {
311				ret = vmw_fifo_wait(dev_priv, bytes,
312						    false, 3 * HZ);
313				if (unlikely(ret != 0))
314					goto out_err;
315			} else
316				need_bounce = true;
317
318		} else {
319
320			if (likely((next_cmd + bytes < stop)))
321				reserve_in_place = true;
322			else {
323				ret = vmw_fifo_wait(dev_priv, bytes,
324						    false, 3 * HZ);
325				if (unlikely(ret != 0))
326					goto out_err;
327			}
328		}
329
330		if (reserve_in_place) {
331			if (reserveable || bytes <= sizeof(uint32_t)) {
332				fifo_state->using_bounce_buffer = false;
333
334				if (reserveable)
335					vmw_fifo_mem_write(dev_priv,
336							   SVGA_FIFO_RESERVED,
337							   bytes);
338				return (void __force *) (fifo_mem +
339							 (next_cmd >> 2));
340			} else {
341				need_bounce = true;
342			}
343		}
344
345		if (need_bounce) {
346			fifo_state->using_bounce_buffer = true;
347			if (bytes < fifo_state->static_buffer_size)
348				return fifo_state->static_buffer;
349			else {
350				fifo_state->dynamic_buffer = vmalloc(bytes);
351				if (!fifo_state->dynamic_buffer)
352					goto out_err;
353				return fifo_state->dynamic_buffer;
354			}
355		}
356	}
357out_err:
358	fifo_state->reserved_size = 0;
359	mutex_unlock(&fifo_state->fifo_mutex);
360
361	return NULL;
362}
363
364void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes,
365			  int ctx_id)
366{
367	void *ret;
368
369	if (dev_priv->cman)
370		ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
371					 ctx_id, false, NULL);
372	else if (ctx_id == SVGA3D_INVALID_ID)
373		ret = vmw_local_fifo_reserve(dev_priv, bytes);
374	else {
375		WARN(1, "Command buffer has not been allocated.\n");
376		ret = NULL;
377	}
378	if (IS_ERR_OR_NULL(ret))
379		return NULL;
380
381	return ret;
382}
383
384static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
385			      struct vmw_private *vmw,
386			      uint32_t next_cmd,
387			      uint32_t max, uint32_t min, uint32_t bytes)
388{
389	u32 *fifo_mem = vmw->fifo_mem;
390	uint32_t chunk_size = max - next_cmd;
391	uint32_t rest;
392	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
393	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
394
395	if (bytes < chunk_size)
396		chunk_size = bytes;
397
398	vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes);
399	mb();
400	memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
401	rest = bytes - chunk_size;
402	if (rest)
403		memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
404}
405
406static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
407			       struct vmw_private *vmw,
408			       uint32_t next_cmd,
409			       uint32_t max, uint32_t min, uint32_t bytes)
410{
411	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
412	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
413
414	while (bytes > 0) {
415		vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++);
416		next_cmd += sizeof(uint32_t);
417		if (unlikely(next_cmd == max))
418			next_cmd = min;
419		mb();
420		vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd);
421		mb();
422		bytes -= sizeof(uint32_t);
423	}
424}
425
426static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
427{
428	struct vmw_fifo_state *fifo_state = dev_priv->fifo;
429	uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
430	uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
431	uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
432	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
433
434	BUG_ON((bytes & 3) != 0);
435	BUG_ON(bytes > fifo_state->reserved_size);
436
437	fifo_state->reserved_size = 0;
438
439	if (fifo_state->using_bounce_buffer) {
440		if (reserveable)
441			vmw_fifo_res_copy(fifo_state, dev_priv,
442					  next_cmd, max, min, bytes);
443		else
444			vmw_fifo_slow_copy(fifo_state, dev_priv,
445					   next_cmd, max, min, bytes);
446
447		if (fifo_state->dynamic_buffer) {
448			vfree(fifo_state->dynamic_buffer);
449			fifo_state->dynamic_buffer = NULL;
450		}
451
452	}
453
454	down_write(&fifo_state->rwsem);
455	if (fifo_state->using_bounce_buffer || reserveable) {
456		next_cmd += bytes;
457		if (next_cmd >= max)
458			next_cmd -= max - min;
459		mb();
460		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd);
461	}
462
463	if (reserveable)
464		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0);
465	mb();
466	up_write(&fifo_state->rwsem);
467	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
468	mutex_unlock(&fifo_state->fifo_mutex);
469}
470
471void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
472{
473	if (dev_priv->cman)
474		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
475	else
476		vmw_local_fifo_commit(dev_priv, bytes);
477}
478
479
480/**
481 * vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands.
482 *
483 * @dev_priv: Pointer to device private structure.
484 * @bytes: Number of bytes to commit.
485 */
486void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
487{
488	if (dev_priv->cman)
489		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
490	else
491		vmw_local_fifo_commit(dev_priv, bytes);
492}
493
494/**
495 * vmw_cmd_flush - Flush any buffered commands and make sure command processing
496 * starts.
497 *
498 * @dev_priv: Pointer to device private structure.
499 * @interruptible: Whether to wait interruptible if function needs to sleep.
500 */
501int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
502{
503	might_sleep();
504
505	if (dev_priv->cman)
506		return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
507	else
508		return 0;
509}
510
511int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
512{
513	struct svga_fifo_cmd_fence *cmd_fence;
514	u32 *fm;
515	int ret = 0;
516	uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
517
518	fm = VMW_CMD_RESERVE(dev_priv, bytes);
519	if (unlikely(fm == NULL)) {
520		*seqno = atomic_read(&dev_priv->marker_seq);
521		ret = -ENOMEM;
522		(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
523					false, 3*HZ);
524		goto out_err;
525	}
526
527	do {
528		*seqno = atomic_add_return(1, &dev_priv->marker_seq);
529	} while (*seqno == 0);
530
531	if (!vmw_has_fences(dev_priv)) {
532
533		/*
534		 * Don't request hardware to send a fence. The
535		 * waiting code in vmwgfx_irq.c will emulate this.
536		 */
537
538		vmw_cmd_commit(dev_priv, 0);
539		return 0;
540	}
541
542	*fm++ = SVGA_CMD_FENCE;
543	cmd_fence = (struct svga_fifo_cmd_fence *) fm;
544	cmd_fence->fence = *seqno;
545	vmw_cmd_commit_flush(dev_priv, bytes);
546	vmw_update_seqno(dev_priv);
547
548out_err:
549	return ret;
550}
551
552/**
553 * vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using
554 * legacy query commands.
555 *
556 * @dev_priv: The device private structure.
557 * @cid: The hardware context id used for the query.
558 *
559 * See the vmw_cmd_emit_dummy_query documentation.
560 */
561static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
562					    uint32_t cid)
563{
564	/*
565	 * A query wait without a preceding query end will
566	 * actually finish all queries for this cid
567	 * without writing to the query result structure.
568	 */
569
570	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
571	struct {
572		SVGA3dCmdHeader header;
573		SVGA3dCmdWaitForQuery body;
574	} *cmd;
575
576	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
577	if (unlikely(cmd == NULL))
578		return -ENOMEM;
579
580	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
581	cmd->header.size = sizeof(cmd->body);
582	cmd->body.cid = cid;
583	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
584
585	if (bo->resource->mem_type == TTM_PL_VRAM) {
586		cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
587		cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
588	} else {
589		cmd->body.guestResult.gmrId = bo->resource->start;
590		cmd->body.guestResult.offset = 0;
591	}
592
593	vmw_cmd_commit(dev_priv, sizeof(*cmd));
594
595	return 0;
596}
597
598/**
599 * vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using
600 * guest-backed resource query commands.
601 *
602 * @dev_priv: The device private structure.
603 * @cid: The hardware context id used for the query.
604 *
605 * See the vmw_cmd_emit_dummy_query documentation.
606 */
607static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
608				       uint32_t cid)
609{
610	/*
611	 * A query wait without a preceding query end will
612	 * actually finish all queries for this cid
613	 * without writing to the query result structure.
614	 */
615
616	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
617	struct {
618		SVGA3dCmdHeader header;
619		SVGA3dCmdWaitForGBQuery body;
620	} *cmd;
621
622	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
623	if (unlikely(cmd == NULL))
624		return -ENOMEM;
625
626	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
627	cmd->header.size = sizeof(cmd->body);
628	cmd->body.cid = cid;
629	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
630	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
631	cmd->body.mobid = bo->resource->start;
632	cmd->body.offset = 0;
633
634	vmw_cmd_commit(dev_priv, sizeof(*cmd));
635
636	return 0;
637}
638
639
640/**
641 * vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using
642 * appropriate resource query commands.
643 *
644 * @dev_priv: The device private structure.
645 * @cid: The hardware context id used for the query.
646 *
647 * This function is used to emit a dummy occlusion query with
648 * no primitives rendered between query begin and query end.
649 * It's used to provide a query barrier, in order to know that when
650 * this query is finished, all preceding queries are also finished.
651 *
652 * A Query results structure should have been initialized at the start
653 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
654 * must also be either reserved or pinned when this function is called.
655 *
656 * Returns -ENOMEM on failure to reserve fifo space.
657 */
658int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
659			      uint32_t cid)
660{
661	if (dev_priv->has_mob)
662		return vmw_cmd_emit_dummy_gb_query(dev_priv, cid);
663
664	return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid);
665}
666
667
668/**
669 * vmw_cmd_supported - returns true if the given device supports
670 * command queues.
671 *
672 * @vmw: The device private structure.
673 *
674 * Returns true if we can issue commands.
675 */
676bool vmw_cmd_supported(struct vmw_private *vmw)
677{
678	bool has_cmdbufs =
679		(vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
680				      SVGA_CAP_CMD_BUFFERS_2)) != 0;
681	if (vmw_is_svga_v3(vmw))
682		return (has_cmdbufs &&
683			(vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0);
684	/*
685	 * We have FIFO cmd's
686	 */
687	return has_cmdbufs || vmw->fifo_mem != NULL;
688}