Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/**************************************************************************
  3 *
  4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
 
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include <drm/ttm/ttm_bo_driver.h>
 29
 30#include "vmwgfx_drv.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31
 32#define VMW_PPN_SIZE (sizeof(unsigned long))
 33/* A future safe maximum remap size. */
 34#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
 35#define DMA_ADDR_INVALID ((dma_addr_t) 0)
 36#define DMA_PAGE_INVALID 0UL
 37
 38static int vmw_gmr2_bind(struct vmw_private *dev_priv,
 39			 struct vmw_piter *iter,
 40			 unsigned long num_pages,
 41			 int gmr_id)
 42{
 43	SVGAFifoCmdDefineGMR2 define_cmd;
 44	SVGAFifoCmdRemapGMR2 remap_cmd;
 45	uint32_t *cmd;
 46	uint32_t *cmd_orig;
 47	uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
 48	uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
 49	uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
 50	uint32_t remap_pos = 0;
 51	uint32_t cmd_size = define_size + remap_size;
 52	uint32_t i;
 53
 54	cmd_orig = cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
 55	if (unlikely(cmd == NULL))
 56		return -ENOMEM;
 57
 58	define_cmd.gmrId = gmr_id;
 59	define_cmd.numPages = num_pages;
 60
 61	*cmd++ = SVGA_CMD_DEFINE_GMR2;
 62	memcpy(cmd, &define_cmd, sizeof(define_cmd));
 63	cmd += sizeof(define_cmd) / sizeof(*cmd);
 64
 65	/*
 66	 * Need to split the command if there are too many
 67	 * pages that goes into the gmr.
 68	 */
 69
 70	remap_cmd.gmrId = gmr_id;
 71	remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
 72		SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
 73
 74	while (num_pages > 0) {
 75		unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
 76
 77		remap_cmd.offsetPages = remap_pos;
 78		remap_cmd.numPages = nr;
 79
 80		*cmd++ = SVGA_CMD_REMAP_GMR2;
 81		memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
 82		cmd += sizeof(remap_cmd) / sizeof(*cmd);
 83
 84		for (i = 0; i < nr; ++i) {
 85			if (VMW_PPN_SIZE <= 4)
 86				*cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
 87			else
 88				*((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
 89					PAGE_SHIFT;
 90
 91			cmd += VMW_PPN_SIZE / sizeof(*cmd);
 92			vmw_piter_next(iter);
 
 
 
 
 
 
 93		}
 94
 95		num_pages -= nr;
 96		remap_pos += nr;
 97	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 98
 99	BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
 
 
100
101	vmw_fifo_commit(dev_priv, cmd_size);
 
102
103	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104}
105
106static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
107			    int gmr_id)
108{
109	SVGAFifoCmdDefineGMR2 define_cmd;
110	uint32_t define_size = sizeof(define_cmd) + 4;
111	uint32_t *cmd;
112
113	cmd = VMW_FIFO_RESERVE(dev_priv, define_size);
114	if (unlikely(cmd == NULL))
115		return;
116
117	define_cmd.gmrId = gmr_id;
118	define_cmd.numPages = 0;
119
120	*cmd++ = SVGA_CMD_DEFINE_GMR2;
121	memcpy(cmd, &define_cmd, sizeof(define_cmd));
 
 
 
 
 
 
122
123	vmw_fifo_commit(dev_priv, define_size);
124}
125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
127int vmw_gmr_bind(struct vmw_private *dev_priv,
128		 const struct vmw_sg_table *vsgt,
129		 unsigned long num_pages,
130		 int gmr_id)
131{
132	struct vmw_piter data_iter;
 
133
134	vmw_piter_start(&data_iter, vsgt, 0);
135
136	if (unlikely(!vmw_piter_next(&data_iter)))
137		return 0;
138
139	if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2)))
 
140		return -EINVAL;
141
142	return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
 
 
 
 
 
 
 
 
 
143}
144
145
146void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
147{
148	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
149		vmw_gmr2_unbind(dev_priv, gmr_id);
 
 
 
 
150}
v3.1
 
  1/**************************************************************************
  2 *
  3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 
 
 28#include "vmwgfx_drv.h"
 29#include "drmP.h"
 30#include "ttm/ttm_bo_driver.h"
 31
 32/**
 33 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
 34 * the number of used descriptors.
 35 */
 36
 37static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
 38				     struct page *pages[],
 39				     unsigned long num_pages)
 40{
 41	struct page *page, *next;
 42	struct svga_guest_mem_descriptor *page_virtual = NULL;
 43	struct svga_guest_mem_descriptor *desc_virtual = NULL;
 44	unsigned int desc_per_page;
 45	unsigned long prev_pfn;
 46	unsigned long pfn;
 47	int ret;
 48
 49	desc_per_page = PAGE_SIZE /
 50	    sizeof(struct svga_guest_mem_descriptor) - 1;
 51
 52	while (likely(num_pages != 0)) {
 53		page = alloc_page(__GFP_HIGHMEM);
 54		if (unlikely(page == NULL)) {
 55			ret = -ENOMEM;
 56			goto out_err;
 57		}
 58
 59		list_add_tail(&page->lru, desc_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 60
 61		/*
 62		 * Point previous page terminating descriptor to this
 63		 * page before unmapping it.
 64		 */
 65
 66		if (likely(page_virtual != NULL)) {
 67			desc_virtual->ppn = page_to_pfn(page);
 68			kunmap_atomic(page_virtual, KM_USER0);
 69		}
 70
 71		page_virtual = kmap_atomic(page, KM_USER0);
 72		desc_virtual = page_virtual - 1;
 73		prev_pfn = ~(0UL);
 74
 75		while (likely(num_pages != 0)) {
 76			pfn = page_to_pfn(*pages);
 77
 78			if (pfn != prev_pfn + 1) {
 79
 80				if (desc_virtual - page_virtual ==
 81				    desc_per_page - 1)
 82					break;
 83
 84				(++desc_virtual)->ppn = cpu_to_le32(pfn);
 85				desc_virtual->num_pages = cpu_to_le32(1);
 86			} else {
 87				uint32_t tmp =
 88				    le32_to_cpu(desc_virtual->num_pages);
 89				desc_virtual->num_pages = cpu_to_le32(tmp + 1);
 90			}
 91			prev_pfn = pfn;
 92			--num_pages;
 93			++pages;
 94		}
 95
 96		(++desc_virtual)->ppn = cpu_to_le32(0);
 97		desc_virtual->num_pages = cpu_to_le32(0);
 98	}
 99
100	if (likely(page_virtual != NULL))
101		kunmap_atomic(page_virtual, KM_USER0);
102
103	return 0;
104out_err:
105	list_for_each_entry_safe(page, next, desc_pages, lru) {
106		list_del_init(&page->lru);
107		__free_page(page);
108	}
109	return ret;
110}
111
112static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
113{
114	struct page *page, *next;
115
116	list_for_each_entry_safe(page, next, desc_pages, lru) {
117		list_del_init(&page->lru);
118		__free_page(page);
119	}
120}
121
122static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
123				     int gmr_id, struct list_head *desc_pages)
124{
125	struct page *page;
 
 
126
127	if (unlikely(list_empty(desc_pages)))
 
128		return;
129
130	page = list_entry(desc_pages->next, struct page, lru);
 
131
132	mutex_lock(&dev_priv->hw_mutex);
133
134	vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
135	wmb();
136	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
137	mb();
138
139	mutex_unlock(&dev_priv->hw_mutex);
140
 
141}
142
143/**
144 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
145 * the number of used descriptors.
146 */
147
148static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
149					unsigned long num_pages)
150{
151	unsigned long prev_pfn = ~(0UL);
152	unsigned long pfn;
153	unsigned long descriptors = 0;
154
155	while (num_pages--) {
156		pfn = page_to_pfn(*pages++);
157		if (prev_pfn + 1 != pfn)
158			++descriptors;
159		prev_pfn = pfn;
160	}
161
162	return descriptors;
163}
164
165int vmw_gmr_bind(struct vmw_private *dev_priv,
166		 struct page *pages[],
167		 unsigned long num_pages,
168		 int gmr_id)
169{
170	struct list_head desc_pages;
171	int ret;
172
173	if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
174		return -EINVAL;
 
 
175
176	if (vmw_gmr_count_descriptors(pages, num_pages) >
177	    dev_priv->max_gmr_descriptors)
178		return -EINVAL;
179
180	INIT_LIST_HEAD(&desc_pages);
181
182	ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
183	if (unlikely(ret != 0))
184		return ret;
185
186	vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
187	vmw_gmr_free_descriptors(&desc_pages);
188
189	return 0;
190}
191
192
193void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
194{
195	mutex_lock(&dev_priv->hw_mutex);
196	vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
197	wmb();
198	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
199	mb();
200	mutex_unlock(&dev_priv->hw_mutex);
201}