Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2014-2018 Etnaviv Project
 
 
 
 
 
 
 
 
 
 
 
 
  4 */
  5
  6#include <linux/bitops.h>
  7#include <linux/dma-mapping.h>
  8#include <linux/platform_device.h>
  9#include <linux/sizes.h>
 10#include <linux/slab.h>
 
 
 11
 12#include "etnaviv_gpu.h"
 13#include "etnaviv_mmu.h"
 
 14#include "state_hi.xml.h"
 15
 16#define PT_SIZE		SZ_2M
 17#define PT_ENTRIES	(PT_SIZE / sizeof(u32))
 18
 19#define GPU_MEM_START	0x80000000
 20
 21struct etnaviv_iommuv1_context {
 22	struct etnaviv_iommu_context base;
 23	u32 *pgtable_cpu;
 24	dma_addr_t pgtable_dma;
 25};
 26
 27static struct etnaviv_iommuv1_context *
 28to_v1_context(struct etnaviv_iommu_context *context)
 
 
 
 
 
 
 
 
 29{
 30	return container_of(context, struct etnaviv_iommuv1_context, base);
 31}
 32
 33static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
 
 34{
 35	struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
 
 
 
 
 
 36
 37	drm_mm_takedown(&context->mm);
 
 
 
 
 38
 39	dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
 40		    v1_context->pgtable_dma);
 
 
 
 
 41
 42	context->global->v1.shared_context = NULL;
 43
 44	kfree(v1_context);
 45}
 46
 47static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
 48			       unsigned long iova, phys_addr_t paddr,
 49			       size_t size, int prot)
 50{
 51	struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
 52	unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
 53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54	if (size != SZ_4K)
 55		return -EINVAL;
 56
 57	v1_context->pgtable_cpu[index] = paddr;
 
 
 58
 59	return 0;
 60}
 61
 62static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
 63	unsigned long iova, size_t size)
 64{
 65	struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
 66	unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
 67
 68	if (size != SZ_4K)
 69		return -EINVAL;
 70
 71	v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
 
 
 
 72
 73	return SZ_4K;
 74}
 75
 76static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context)
 
 
 
 
 
 
 
 
 77{
 78	return PT_SIZE;
 79}
 80
 81static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
 82				 void *buf)
 83{
 84	struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
 85
 86	memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
 87}
 88
 89static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
 90			     struct etnaviv_iommu_context *context)
 
 
 
 
 
 
 
 
 
 
 
 
 91{
 92	struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
 93	u32 pgtable;
 94
 95	if (gpu->mmu_context)
 96		etnaviv_iommu_context_put(gpu->mmu_context);
 97	gpu->mmu_context = etnaviv_iommu_context_get(context);
 98
 99	/* set base addresses */
100	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
101	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
102	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, context->global->memory_base);
103	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, context->global->memory_base);
104	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, context->global->memory_base);
105
106	/* set page table address in MC */
107	pgtable = (u32)v1_context->pgtable_dma;
108
109	gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
110	gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
111	gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
112	gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
113	gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
114}
115
116
117const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
118	.free = etnaviv_iommuv1_free,
119	.map = etnaviv_iommuv1_map,
120	.unmap = etnaviv_iommuv1_unmap,
121	.dump_size = etnaviv_iommuv1_dump_size,
122	.dump = etnaviv_iommuv1_dump,
123	.restore = etnaviv_iommuv1_restore,
124};
125
126struct etnaviv_iommu_context *
127etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
128{
129	struct etnaviv_iommuv1_context *v1_context;
130	struct etnaviv_iommu_context *context;
131
132	mutex_lock(&global->lock);
133
134	/*
135	 * MMUv1 does not support switching between different contexts without
136	 * a stop the world operation, so we only support a single shared
137	 * context with this version.
138	 */
139	if (global->v1.shared_context) {
140		context = global->v1.shared_context;
141		etnaviv_iommu_context_get(context);
142		mutex_unlock(&global->lock);
143		return context;
144	}
145
146	v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
147	if (!v1_context) {
148		mutex_unlock(&global->lock);
149		return NULL;
150	}
151
152	v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
153					       &v1_context->pgtable_dma,
154					       GFP_KERNEL);
155	if (!v1_context->pgtable_cpu)
156		goto out_free;
157
158	memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
159
160	context = &v1_context->base;
161	context->global = global;
162	kref_init(&context->refcount);
163	mutex_init(&context->lock);
164	INIT_LIST_HEAD(&context->mappings);
165	drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
166	context->global->v1.shared_context = context;
167
168	mutex_unlock(&global->lock);
 
 
169
170	return context;
171
172out_free:
173	mutex_unlock(&global->lock);
174	kfree(v1_context);
175	return NULL;
176}
v4.6
 
  1/*
  2 * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms of the GNU General Public License version 2 as published by
  6 * the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful, but WITHOUT
  9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 11 * more details.
 12 *
 13 * You should have received a copy of the GNU General Public License along with
 14 * this program.  If not, see <http://www.gnu.org/licenses/>.
 15 */
 16
 17#include <linux/iommu.h>
 
 18#include <linux/platform_device.h>
 19#include <linux/sizes.h>
 20#include <linux/slab.h>
 21#include <linux/dma-mapping.h>
 22#include <linux/bitops.h>
 23
 24#include "etnaviv_gpu.h"
 25#include "etnaviv_mmu.h"
 26#include "etnaviv_iommu.h"
 27#include "state_hi.xml.h"
 28
 29#define PT_SIZE		SZ_2M
 30#define PT_ENTRIES	(PT_SIZE / sizeof(u32))
 31
 32#define GPU_MEM_START	0x80000000
 33
 34struct etnaviv_iommu_domain_pgtable {
 35	u32 *pgtable;
 36	dma_addr_t paddr;
 
 37};
 38
 39struct etnaviv_iommu_domain {
 40	struct iommu_domain domain;
 41	struct device *dev;
 42	void *bad_page_cpu;
 43	dma_addr_t bad_page_dma;
 44	struct etnaviv_iommu_domain_pgtable pgtable;
 45	spinlock_t map_lock;
 46};
 47
 48static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain)
 49{
 50	return container_of(domain, struct etnaviv_iommu_domain, domain);
 51}
 52
 53static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
 54			 size_t size)
 55{
 56	pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL);
 57	if (!pgtable->pgtable)
 58		return -ENOMEM;
 59
 60	return 0;
 61}
 62
 63static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
 64			 size_t size)
 65{
 66	dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
 67}
 68
 69static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
 70			   unsigned long iova)
 71{
 72	/* calcuate index into page table */
 73	unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
 74	phys_addr_t paddr;
 75
 76	paddr = pgtable->pgtable[index];
 77
 78	return paddr;
 79}
 80
 81static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
 82			  unsigned long iova, phys_addr_t paddr)
 
 83{
 84	/* calcuate index into page table */
 85	unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
 86
 87	pgtable->pgtable[index] = paddr;
 88}
 89
 90static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
 91{
 92	u32 *p;
 93	int ret, i;
 94
 95	etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
 96						  SZ_4K,
 97						  &etnaviv_domain->bad_page_dma,
 98						  GFP_KERNEL);
 99	if (!etnaviv_domain->bad_page_cpu)
100		return -ENOMEM;
101
102	p = etnaviv_domain->bad_page_cpu;
103	for (i = 0; i < SZ_4K / 4; i++)
104		*p++ = 0xdead55aa;
105
106	ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
107	if (ret < 0) {
108		dma_free_coherent(etnaviv_domain->dev, SZ_4K,
109				  etnaviv_domain->bad_page_cpu,
110				  etnaviv_domain->bad_page_dma);
111		return ret;
112	}
113
114	for (i = 0; i < PT_ENTRIES; i++)
115		etnaviv_domain->pgtable.pgtable[i] =
116			etnaviv_domain->bad_page_dma;
117
118	spin_lock_init(&etnaviv_domain->map_lock);
119
120	return 0;
121}
122
123static void etnaviv_domain_free(struct iommu_domain *domain)
124{
125	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
126
127	pgtable_free(&etnaviv_domain->pgtable, PT_SIZE);
128
129	dma_free_coherent(etnaviv_domain->dev, SZ_4K,
130			  etnaviv_domain->bad_page_cpu,
131			  etnaviv_domain->bad_page_dma);
132
133	kfree(etnaviv_domain);
134}
135
136static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova,
137	   phys_addr_t paddr, size_t size, int prot)
138{
139	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
140
141	if (size != SZ_4K)
142		return -EINVAL;
143
144	spin_lock(&etnaviv_domain->map_lock);
145	pgtable_write(&etnaviv_domain->pgtable, iova, paddr);
146	spin_unlock(&etnaviv_domain->map_lock);
147
148	return 0;
149}
150
151static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain,
152	unsigned long iova, size_t size)
153{
154	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
 
155
156	if (size != SZ_4K)
157		return -EINVAL;
158
159	spin_lock(&etnaviv_domain->map_lock);
160	pgtable_write(&etnaviv_domain->pgtable, iova,
161		      etnaviv_domain->bad_page_dma);
162	spin_unlock(&etnaviv_domain->map_lock);
163
164	return SZ_4K;
165}
166
167static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain,
168	dma_addr_t iova)
169{
170	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
171
172	return pgtable_read(&etnaviv_domain->pgtable, iova);
173}
174
175static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain)
176{
177	return PT_SIZE;
178}
179
180static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
 
181{
182	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
183
184	memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
185}
186
187static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
188	.ops = {
189		.domain_free = etnaviv_domain_free,
190		.map = etnaviv_iommuv1_map,
191		.unmap = etnaviv_iommuv1_unmap,
192		.iova_to_phys = etnaviv_iommu_iova_to_phys,
193		.pgsize_bitmap = SZ_4K,
194	},
195	.dump_size = etnaviv_iommuv1_dump_size,
196	.dump = etnaviv_iommuv1_dump,
197};
198
199void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
200	struct iommu_domain *domain)
201{
202	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
203	u32 pgtable;
204
 
 
 
 
 
 
 
 
 
 
 
205	/* set page table address in MC */
206	pgtable = (u32)etnaviv_domain->pgtable.paddr;
207
208	gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
209	gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
210	gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
211	gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
212	gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
213}
214
215struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
 
 
 
 
 
 
 
 
 
 
 
216{
217	struct etnaviv_iommu_domain *etnaviv_domain;
218	int ret;
 
 
219
220	etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
221	if (!etnaviv_domain)
 
 
 
 
 
 
 
 
 
 
 
 
 
222		return NULL;
 
223
224	etnaviv_domain->dev = gpu->dev;
 
 
 
 
 
 
225
226	etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
227	etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
228	etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
229	etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
 
 
 
230
231	ret = __etnaviv_iommu_init(etnaviv_domain);
232	if (ret)
233		goto out_free;
234
235	return &etnaviv_domain->domain;
236
237out_free:
238	kfree(etnaviv_domain);
 
239	return NULL;
240}