Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2013 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
  5 */
  6
  7#include <linux/adreno-smmu-priv.h>
  8#include <linux/io-pgtable.h>
  9#include "msm_drv.h"
 10#include "msm_mmu.h"
 11
 12struct msm_iommu {
 13	struct msm_mmu base;
 14	struct iommu_domain *domain;
 15	atomic_t pagetables;
 16};
 17
 18#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
 19
 20struct msm_iommu_pagetable {
 21	struct msm_mmu base;
 22	struct msm_mmu *parent;
 23	struct io_pgtable_ops *pgtbl_ops;
 24	phys_addr_t ttbr;
 25	u32 asid;
 26};
 27static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
 28{
 29	return container_of(mmu, struct msm_iommu_pagetable, base);
 30}
 31
 32static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
 33		size_t size)
 34{
 35	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
 36	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
 37	size_t unmapped = 0;
 38
 39	/* Unmap the block one page at a time */
 40	while (size) {
 41		unmapped += ops->unmap(ops, iova, 4096, NULL);
 42		iova += 4096;
 43		size -= 4096;
 44	}
 45
 46	iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
 47
 48	return (unmapped == size) ? 0 : -EINVAL;
 49}
 50
 51static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
 52		struct sg_table *sgt, size_t len, int prot)
 53{
 54	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
 55	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
 56	struct scatterlist *sg;
 57	size_t mapped = 0;
 58	u64 addr = iova;
 59	unsigned int i;
 60
 61	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 62		size_t size = sg->length;
 63		phys_addr_t phys = sg_phys(sg);
 64
 65		/* Map the block one page at a time */
 66		while (size) {
 67			if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
 68				msm_iommu_pagetable_unmap(mmu, iova, mapped);
 69				return -EINVAL;
 70			}
 71
 72			phys += 4096;
 73			addr += 4096;
 74			size -= 4096;
 75			mapped += 4096;
 76		}
 77	}
 78
 79	return 0;
 80}
 81
 82static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
 83{
 84	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
 85	struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
 86	struct adreno_smmu_priv *adreno_smmu =
 87		dev_get_drvdata(pagetable->parent->dev);
 88
 89	/*
 90	 * If this is the last attached pagetable for the parent,
 91	 * disable TTBR0 in the arm-smmu driver
 92	 */
 93	if (atomic_dec_return(&iommu->pagetables) == 0)
 94		adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
 95
 96	free_io_pgtable_ops(pagetable->pgtbl_ops);
 97	kfree(pagetable);
 98}
 99
100int msm_iommu_pagetable_params(struct msm_mmu *mmu,
101		phys_addr_t *ttbr, int *asid)
102{
103	struct msm_iommu_pagetable *pagetable;
104
105	if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
106		return -EINVAL;
107
108	pagetable = to_pagetable(mmu);
109
110	if (ttbr)
111		*ttbr = pagetable->ttbr;
112
113	if (asid)
114		*asid = pagetable->asid;
115
116	return 0;
117}
118
119static const struct msm_mmu_funcs pagetable_funcs = {
120		.map = msm_iommu_pagetable_map,
121		.unmap = msm_iommu_pagetable_unmap,
122		.destroy = msm_iommu_pagetable_destroy,
123};
124
125static void msm_iommu_tlb_flush_all(void *cookie)
126{
127}
128
129static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
130		size_t granule, void *cookie)
131{
132}
133
134static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
135		unsigned long iova, size_t granule, void *cookie)
136{
137}
138
139static const struct iommu_flush_ops null_tlb_ops = {
140	.tlb_flush_all = msm_iommu_tlb_flush_all,
141	.tlb_flush_walk = msm_iommu_tlb_flush_walk,
142	.tlb_add_page = msm_iommu_tlb_add_page,
143};
144
145static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
146		unsigned long iova, int flags, void *arg);
147
148struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
149{
150	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
151	struct msm_iommu *iommu = to_msm_iommu(parent);
152	struct msm_iommu_pagetable *pagetable;
153	const struct io_pgtable_cfg *ttbr1_cfg = NULL;
154	struct io_pgtable_cfg ttbr0_cfg;
155	int ret;
156
157	/* Get the pagetable configuration from the domain */
158	if (adreno_smmu->cookie)
159		ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
160	if (!ttbr1_cfg)
161		return ERR_PTR(-ENODEV);
162
163	/*
164	 * Defer setting the fault handler until we have a valid adreno_smmu
165	 * to avoid accidentially installing a GPU specific fault handler for
166	 * the display's iommu
167	 */
168	iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
169
170	pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
171	if (!pagetable)
172		return ERR_PTR(-ENOMEM);
173
174	msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
175		MSM_MMU_IOMMU_PAGETABLE);
176
177	/* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
178	ttbr0_cfg = *ttbr1_cfg;
179
180	/* The incoming cfg will have the TTBR1 quirk enabled */
181	ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
182	ttbr0_cfg.tlb = &null_tlb_ops;
183
184	pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
185		&ttbr0_cfg, iommu->domain);
186
187	if (!pagetable->pgtbl_ops) {
188		kfree(pagetable);
189		return ERR_PTR(-ENOMEM);
190	}
191
192	/*
193	 * If this is the first pagetable that we've allocated, send it back to
194	 * the arm-smmu driver as a trigger to set up TTBR0
195	 */
196	if (atomic_inc_return(&iommu->pagetables) == 1) {
197		/* Enable stall on iommu fault: */
198		adreno_smmu->set_stall(adreno_smmu->cookie, true);
199
200		ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
201		if (ret) {
202			free_io_pgtable_ops(pagetable->pgtbl_ops);
203			kfree(pagetable);
204			return ERR_PTR(ret);
205		}
206	}
207
208	/* Needed later for TLB flush */
209	pagetable->parent = parent;
210	pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
211
212	/*
213	 * TODO we would like each set of page tables to have a unique ASID
214	 * to optimize TLB invalidation.  But iommu_flush_iotlb_all() will
215	 * end up flushing the ASID used for TTBR1 pagetables, which is not
216	 * what we want.  So for now just use the same ASID as TTBR1.
217	 */
218	pagetable->asid = 0;
219
220	return &pagetable->base;
221}
222
223static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
224		unsigned long iova, int flags, void *arg)
225{
226	struct msm_iommu *iommu = arg;
227	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
228	struct adreno_smmu_fault_info info, *ptr = NULL;
229
230	if (adreno_smmu->get_fault_info) {
231		adreno_smmu->get_fault_info(adreno_smmu->cookie, &info);
232		ptr = &info;
233	}
234
235	if (iommu->base.handler)
236		return iommu->base.handler(iommu->base.arg, iova, flags, ptr);
237
238	pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
239	return 0;
240}
241
242static void msm_iommu_resume_translation(struct msm_mmu *mmu)
243{
244	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
245
246	adreno_smmu->resume_translation(adreno_smmu->cookie, true);
247}
248
249static void msm_iommu_detach(struct msm_mmu *mmu)
250{
251	struct msm_iommu *iommu = to_msm_iommu(mmu);
252
253	iommu_detach_device(iommu->domain, mmu->dev);
254}
255
256static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
257		struct sg_table *sgt, size_t len, int prot)
258{
259	struct msm_iommu *iommu = to_msm_iommu(mmu);
260	size_t ret;
261
262	/* The arm-smmu driver expects the addresses to be sign extended */
263	if (iova & BIT_ULL(48))
264		iova |= GENMASK_ULL(63, 49);
265
266	ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
267	WARN_ON(!ret);
268
269	return (ret == len) ? 0 : -EINVAL;
270}
271
272static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
273{
274	struct msm_iommu *iommu = to_msm_iommu(mmu);
275
276	if (iova & BIT_ULL(48))
277		iova |= GENMASK_ULL(63, 49);
278
279	iommu_unmap(iommu->domain, iova, len);
280
281	return 0;
282}
283
284static void msm_iommu_destroy(struct msm_mmu *mmu)
285{
286	struct msm_iommu *iommu = to_msm_iommu(mmu);
287	iommu_domain_free(iommu->domain);
288	kfree(iommu);
289}
290
291static const struct msm_mmu_funcs funcs = {
292		.detach = msm_iommu_detach,
293		.map = msm_iommu_map,
294		.unmap = msm_iommu_unmap,
295		.destroy = msm_iommu_destroy,
296		.resume_translation = msm_iommu_resume_translation,
297};
298
299struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
300{
301	struct msm_iommu *iommu;
302	int ret;
303
304	if (!domain)
305		return ERR_PTR(-ENODEV);
306
307	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
308	if (!iommu)
309		return ERR_PTR(-ENOMEM);
310
311	iommu->domain = domain;
312	msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
313
314	atomic_set(&iommu->pagetables, 0);
315
316	ret = iommu_attach_device(iommu->domain, dev);
317	if (ret) {
318		kfree(iommu);
319		return ERR_PTR(ret);
320	}
321
322	return &iommu->base;
323}