Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
 
 
 
 
 
 
 
 
 
 
 
 
  3 *
  4 * Copyright (C) 2013 Freescale Semiconductor, Inc.
  5 * Author: Varun Sethi <varun.sethi@freescale.com>
 
  6 */
  7
  8#define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
  9
 10#include "fsl_pamu_domain.h"
 11
 12#include <linux/platform_device.h>
 13#include <sysdev/fsl_pci.h>
 14
 15/*
 16 * Global spinlock that needs to be held while
 17 * configuring PAMU.
 18 */
 19static DEFINE_SPINLOCK(iommu_lock);
 20
 21static struct kmem_cache *fsl_pamu_domain_cache;
 22static struct kmem_cache *iommu_devinfo_cache;
 23static DEFINE_SPINLOCK(device_domain_lock);
 24
 25struct iommu_device pamu_iommu;	/* IOMMU core code handle */
 26
 27static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
 28{
 29	return container_of(dom, struct fsl_dma_domain, iommu_domain);
 30}
 31
 32static int __init iommu_init_mempool(void)
 33{
 34	fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
 35						  sizeof(struct fsl_dma_domain),
 36						  0,
 37						  SLAB_HWCACHE_ALIGN,
 38						  NULL);
 39	if (!fsl_pamu_domain_cache) {
 40		pr_debug("Couldn't create fsl iommu_domain cache\n");
 41		return -ENOMEM;
 42	}
 43
 44	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
 45						sizeof(struct device_domain_info),
 46						0,
 47						SLAB_HWCACHE_ALIGN,
 48						NULL);
 49	if (!iommu_devinfo_cache) {
 50		pr_debug("Couldn't create devinfo cache\n");
 51		kmem_cache_destroy(fsl_pamu_domain_cache);
 52		return -ENOMEM;
 53	}
 54
 55	return 0;
 56}
 57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
 59			      u32 val)
 60{
 61	int ret = 0;
 62	unsigned long flags;
 63
 64	spin_lock_irqsave(&iommu_lock, flags);
 65	ret = pamu_update_paace_stash(liodn, val);
 66	if (ret) {
 67		pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
 68		spin_unlock_irqrestore(&iommu_lock, flags);
 69		return ret;
 
 
 
 
 
 
 
 
 
 
 70	}
 71
 72	spin_unlock_irqrestore(&iommu_lock, flags);
 73
 74	return ret;
 75}
 76
 77/* Set the geometry parameters for a LIODN */
 78static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
 79			  int liodn)
 
 
 80{
 
 
 
 81	u32 omi_index = ~(u32)0;
 82	unsigned long flags;
 83	int ret;
 84
 85	/*
 86	 * Configure the omi_index at the geometry setup time.
 87	 * This is a static value which depends on the type of
 88	 * device and would not change thereafter.
 89	 */
 90	get_ome_index(&omi_index, dev);
 91
 
 
 
 92	spin_lock_irqsave(&iommu_lock, flags);
 93	ret = pamu_disable_liodn(liodn);
 94	if (ret)
 95		goto out_unlock;
 96	ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0);
 97	if (ret)
 98		goto out_unlock;
 99	ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id,
100				 PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
101out_unlock:
102	spin_unlock_irqrestore(&iommu_lock, flags);
103	if (ret) {
104		pr_debug("PAACE configuration failed for liodn %d\n",
105			 liodn);
 
106	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107	return ret;
108}
109
110static void remove_device_ref(struct device_domain_info *info)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111{
112	unsigned long flags;
113
114	list_del(&info->link);
115	spin_lock_irqsave(&iommu_lock, flags);
 
 
116	pamu_disable_liodn(info->liodn);
117	spin_unlock_irqrestore(&iommu_lock, flags);
118	spin_lock_irqsave(&device_domain_lock, flags);
119	dev_iommu_priv_set(info->dev, NULL);
120	kmem_cache_free(iommu_devinfo_cache, info);
121	spin_unlock_irqrestore(&device_domain_lock, flags);
122}
123
124static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
125{
126	struct device_domain_info *info, *tmp;
127	unsigned long flags;
128
129	spin_lock_irqsave(&dma_domain->domain_lock, flags);
130	/* Remove the device from the domain device list */
131	list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
132		if (!dev || (info->dev == dev))
133			remove_device_ref(info);
134	}
135	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
136}
137
138static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
139{
140	struct device_domain_info *info, *old_domain_info;
141	unsigned long flags;
142
143	spin_lock_irqsave(&device_domain_lock, flags);
144	/*
145	 * Check here if the device is already attached to domain or not.
146	 * If the device is already attached to a domain detach it.
147	 */
148	old_domain_info = dev_iommu_priv_get(dev);
149	if (old_domain_info && old_domain_info->domain != dma_domain) {
150		spin_unlock_irqrestore(&device_domain_lock, flags);
151		detach_device(dev, old_domain_info->domain);
152		spin_lock_irqsave(&device_domain_lock, flags);
153	}
154
155	info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
156
157	info->dev = dev;
158	info->liodn = liodn;
159	info->domain = dma_domain;
160
161	list_add(&info->link, &dma_domain->devices);
162	/*
163	 * In case of devices with multiple LIODNs just store
164	 * the info for the first LIODN as all
165	 * LIODNs share the same domain
166	 */
167	if (!dev_iommu_priv_get(dev))
168		dev_iommu_priv_set(dev, info);
169	spin_unlock_irqrestore(&device_domain_lock, flags);
170}
171
172static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
173					 dma_addr_t iova)
174{
 
 
175	if (iova < domain->geometry.aperture_start ||
176	    iova > domain->geometry.aperture_end)
177		return 0;
178	return iova;
 
179}
180
181static bool fsl_pamu_capable(struct device *dev, enum iommu_cap cap)
182{
183	return cap == IOMMU_CAP_CACHE_COHERENCY;
184}
185
186static void fsl_pamu_domain_free(struct iommu_domain *domain)
187{
188	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
189
190	/* remove all the devices from the device list */
191	detach_device(NULL, dma_domain);
 
 
 
 
192	kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
193}
194
195static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
196{
197	struct fsl_dma_domain *dma_domain;
198
199	/*
200	 * FIXME: This isn't creating an unmanaged domain since the
201	 * default_domain_ops do not have any map/unmap function it doesn't meet
202	 * the requirements for __IOMMU_DOMAIN_PAGING. The only purpose seems to
203	 * allow drivers/soc/fsl/qbman/qman_portal.c to do
204	 * fsl_pamu_configure_l1_stash()
205	 */
206	if (type != IOMMU_DOMAIN_UNMANAGED)
207		return NULL;
208
209	dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
210	if (!dma_domain)
 
211		return NULL;
212
213	dma_domain->stash_id = ~(u32)0;
214	INIT_LIST_HEAD(&dma_domain->devices);
215	spin_lock_init(&dma_domain->domain_lock);
216
217	/* default geometry 64 GB i.e. maximum system address */
218	dma_domain->iommu_domain. geometry.aperture_start = 0;
219	dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
220	dma_domain->iommu_domain.geometry.force_aperture = true;
221
222	return &dma_domain->iommu_domain;
223}
224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225/* Update stash destination for all LIODNs associated with the domain */
226static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
227{
228	struct device_domain_info *info;
229	int ret = 0;
230
231	list_for_each_entry(info, &dma_domain->devices, link) {
232		ret = update_liodn_stash(info->liodn, dma_domain, val);
233		if (ret)
234			break;
235	}
236
237	return ret;
238}
239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240static int fsl_pamu_attach_device(struct iommu_domain *domain,
241				  struct device *dev)
242{
243	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
244	unsigned long flags;
245	int len, ret = 0, i;
246	const u32 *liodn;
 
 
247	struct pci_dev *pdev = NULL;
248	struct pci_controller *pci_ctl;
249
250	/*
251	 * Use LIODN of the PCI controller while attaching a
252	 * PCI device.
253	 */
254	if (dev_is_pci(dev)) {
255		pdev = to_pci_dev(dev);
256		pci_ctl = pci_bus_to_host(pdev->bus);
257		/*
258		 * make dev point to pci controller device
259		 * so we can get the LIODN programmed by
260		 * u-boot.
261		 */
262		dev = pci_ctl->parent;
263	}
264
265	liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
266	if (!liodn) {
 
 
 
267		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
268		return -ENODEV;
269	}
270
271	spin_lock_irqsave(&dma_domain->domain_lock, flags);
272	for (i = 0; i < len / sizeof(u32); i++) {
273		/* Ensure that LIODN value is valid */
274		if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
275			pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
276				 liodn[i], dev->of_node);
277			ret = -ENODEV;
278			break;
279		}
280
281		attach_device(dma_domain, liodn[i], dev);
282		ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
283		if (ret)
284			break;
285		ret = pamu_enable_liodn(liodn[i]);
286		if (ret)
287			break;
288	}
289	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
290	return ret;
291}
292
293/*
294 * FIXME: fsl/pamu is completely broken in terms of how it works with the iommu
295 * API. Immediately after probe the HW is left in an IDENTITY translation and
296 * the driver provides a non-working UNMANAGED domain that it can switch over
297 * to. However it cannot switch back to an IDENTITY translation, instead it
298 * switches to what looks like BLOCKING.
299 */
300static int fsl_pamu_platform_attach(struct iommu_domain *platform_domain,
301				    struct device *dev)
302{
303	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
304	struct fsl_dma_domain *dma_domain;
305	const u32 *prop;
306	int len;
307	struct pci_dev *pdev = NULL;
308	struct pci_controller *pci_ctl;
309
310	/*
311	 * Hack to keep things working as they always have, only leaving an
312	 * UNMANAGED domain makes it BLOCKING.
313	 */
314	if (domain == platform_domain || !domain ||
315	    domain->type != IOMMU_DOMAIN_UNMANAGED)
316		return 0;
317
318	dma_domain = to_fsl_dma_domain(domain);
319
320	/*
321	 * Use LIODN of the PCI controller while detaching a
322	 * PCI device.
323	 */
324	if (dev_is_pci(dev)) {
325		pdev = to_pci_dev(dev);
326		pci_ctl = pci_bus_to_host(pdev->bus);
327		/*
328		 * make dev point to pci controller device
329		 * so we can get the LIODN programmed by
330		 * u-boot.
331		 */
332		dev = pci_ctl->parent;
333	}
334
335	prop = of_get_property(dev->of_node, "fsl,liodn", &len);
336	if (prop)
337		detach_device(dev, dma_domain);
338	else
339		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
340	return 0;
341}
342
343static struct iommu_domain_ops fsl_pamu_platform_ops = {
344	.attach_dev = fsl_pamu_platform_attach,
345};
 
 
 
346
347static struct iommu_domain fsl_pamu_platform_domain = {
348	.type = IOMMU_DOMAIN_PLATFORM,
349	.ops = &fsl_pamu_platform_ops,
350};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
352/* Set the domain stash attribute */
353int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
354{
355	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
356	unsigned long flags;
357	int ret;
358
359	spin_lock_irqsave(&dma_domain->domain_lock, flags);
360	dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
 
 
 
 
 
361	if (dma_domain->stash_id == ~(u32)0) {
362		pr_debug("Invalid stash attributes\n");
363		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
364		return -EINVAL;
365	}
 
366	ret = update_domain_stash(dma_domain, dma_domain->stash_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
368
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369	return ret;
370}
371
 
 
 
 
 
 
 
 
 
 
 
372static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
373{
374	u32 version;
375
376	/* Check the PCI controller version number by readding BRR1 register */
377	version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
378	version &= PCI_FSL_BRR1_VER;
379	/* If PCI controller version is >= 0x204 we can partition endpoints */
380	return version >= 0x204;
381}
382
383static struct iommu_group *fsl_pamu_device_group(struct device *dev)
 
384{
 
385	struct iommu_group *group;
386	struct pci_dev *pdev;
387
388	/*
389	 * For platform devices we allocate a separate group for each of the
390	 * devices.
391	 */
392	if (!dev_is_pci(dev))
393		return generic_device_group(dev);
 
 
 
 
 
 
394
395	/*
396	 * We can partition PCIe devices so assign device group to the device
397	 */
398	pdev = to_pci_dev(dev);
399	if (check_pci_ctl_endpt_part(pci_bus_to_host(pdev->bus)))
400		return pci_device_group(&pdev->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
401
402	/*
403	 * All devices connected to the controller will share the same device
404	 * group.
405	 *
406	 * Due to ordering between fsl_pamu_init() and fsl_pci_init() it is
407	 * guaranteed that the pci_ctl->parent platform_device will have the
408	 * iommu driver bound and will already have a group set. So we just
409	 * re-use this group as the group for every device in the hose.
410	 */
411	group = iommu_group_get(pci_bus_to_host(pdev->bus)->parent);
412	if (WARN_ON(!group))
413		return ERR_PTR(-EINVAL);
 
 
414	return group;
415}
416
417static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
418{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419	/*
420	 * uboot must fill the fsl,liodn for platform devices to be supported by
421	 * the iommu.
422	 */
423	if (!dev_is_pci(dev) &&
424	    !of_property_present(dev->of_node, "fsl,liodn"))
425		return ERR_PTR(-ENODEV);
 
 
426
427	return &pamu_iommu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428}
429
430static const struct iommu_ops fsl_pamu_ops = {
431	.default_domain = &fsl_pamu_platform_domain,
432	.capable	= fsl_pamu_capable,
433	.domain_alloc	= fsl_pamu_domain_alloc,
434	.probe_device	= fsl_pamu_probe_device,
 
 
 
 
 
 
 
 
 
 
 
435	.device_group   = fsl_pamu_device_group,
436	.default_domain_ops = &(const struct iommu_domain_ops) {
437		.attach_dev	= fsl_pamu_attach_device,
438		.iova_to_phys	= fsl_pamu_iova_to_phys,
439		.free		= fsl_pamu_domain_free,
440	}
441};
442
443int __init pamu_domain_init(void)
444{
445	int ret = 0;
446
447	ret = iommu_init_mempool();
448	if (ret)
449		return ret;
450
451	ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
452	if (ret)
453		return ret;
454
455	ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL);
 
 
456	if (ret) {
457		iommu_device_sysfs_remove(&pamu_iommu);
458		pr_err("Can't register iommu device\n");
 
459	}
 
 
 
460
461	return ret;
462}
v4.17
 
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
  16 * Author: Varun Sethi <varun.sethi@freescale.com>
  17 *
  18 */
  19
  20#define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
  21
  22#include "fsl_pamu_domain.h"
  23
 
  24#include <sysdev/fsl_pci.h>
  25
  26/*
  27 * Global spinlock that needs to be held while
  28 * configuring PAMU.
  29 */
  30static DEFINE_SPINLOCK(iommu_lock);
  31
  32static struct kmem_cache *fsl_pamu_domain_cache;
  33static struct kmem_cache *iommu_devinfo_cache;
  34static DEFINE_SPINLOCK(device_domain_lock);
  35
  36struct iommu_device pamu_iommu;	/* IOMMU core code handle */
  37
  38static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
  39{
  40	return container_of(dom, struct fsl_dma_domain, iommu_domain);
  41}
  42
  43static int __init iommu_init_mempool(void)
  44{
  45	fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
  46						  sizeof(struct fsl_dma_domain),
  47						  0,
  48						  SLAB_HWCACHE_ALIGN,
  49						  NULL);
  50	if (!fsl_pamu_domain_cache) {
  51		pr_debug("Couldn't create fsl iommu_domain cache\n");
  52		return -ENOMEM;
  53	}
  54
  55	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
  56						sizeof(struct device_domain_info),
  57						0,
  58						SLAB_HWCACHE_ALIGN,
  59						NULL);
  60	if (!iommu_devinfo_cache) {
  61		pr_debug("Couldn't create devinfo cache\n");
  62		kmem_cache_destroy(fsl_pamu_domain_cache);
  63		return -ENOMEM;
  64	}
  65
  66	return 0;
  67}
  68
  69static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
  70{
  71	u32 win_cnt = dma_domain->win_cnt;
  72	struct dma_window *win_ptr = &dma_domain->win_arr[0];
  73	struct iommu_domain_geometry *geom;
  74
  75	geom = &dma_domain->iommu_domain.geometry;
  76
  77	if (!win_cnt || !dma_domain->geom_size) {
  78		pr_debug("Number of windows/geometry not configured for the domain\n");
  79		return 0;
  80	}
  81
  82	if (win_cnt > 1) {
  83		u64 subwin_size;
  84		dma_addr_t subwin_iova;
  85		u32 wnd;
  86
  87		subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
  88		subwin_iova = iova & ~(subwin_size - 1);
  89		wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
  90		win_ptr = &dma_domain->win_arr[wnd];
  91	}
  92
  93	if (win_ptr->valid)
  94		return win_ptr->paddr + (iova & (win_ptr->size - 1));
  95
  96	return 0;
  97}
  98
  99static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
 100{
 101	struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
 102	int i, ret;
 103	unsigned long rpn, flags;
 104
 105	for (i = 0; i < dma_domain->win_cnt; i++) {
 106		if (sub_win_ptr[i].valid) {
 107			rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
 108			spin_lock_irqsave(&iommu_lock, flags);
 109			ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
 110						 sub_win_ptr[i].size,
 111						 ~(u32)0,
 112						 rpn,
 113						 dma_domain->snoop_id,
 114						 dma_domain->stash_id,
 115						 (i > 0) ? 1 : 0,
 116						 sub_win_ptr[i].prot);
 117			spin_unlock_irqrestore(&iommu_lock, flags);
 118			if (ret) {
 119				pr_debug("SPAACE configuration failed for liodn %d\n",
 120					 liodn);
 121				return ret;
 122			}
 123		}
 124	}
 125
 126	return ret;
 127}
 128
 129static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
 130{
 131	int ret;
 132	struct dma_window *wnd = &dma_domain->win_arr[0];
 133	phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
 134	unsigned long flags;
 135
 136	spin_lock_irqsave(&iommu_lock, flags);
 137	ret = pamu_config_ppaace(liodn, wnd_addr,
 138				 wnd->size,
 139				 ~(u32)0,
 140				 wnd->paddr >> PAMU_PAGE_SHIFT,
 141				 dma_domain->snoop_id, dma_domain->stash_id,
 142				 0, wnd->prot);
 143	spin_unlock_irqrestore(&iommu_lock, flags);
 144	if (ret)
 145		pr_debug("PAACE configuration failed for liodn %d\n", liodn);
 146
 147	return ret;
 148}
 149
 150/* Map the DMA window corresponding to the LIODN */
 151static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
 152{
 153	if (dma_domain->win_cnt > 1)
 154		return map_subwins(liodn, dma_domain);
 155	else
 156		return map_win(liodn, dma_domain);
 157}
 158
 159/* Update window/subwindow mapping for the LIODN */
 160static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
 161{
 162	int ret;
 163	struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
 164	unsigned long flags;
 165
 166	spin_lock_irqsave(&iommu_lock, flags);
 167	if (dma_domain->win_cnt > 1) {
 168		ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
 169					 wnd->size,
 170					 ~(u32)0,
 171					 wnd->paddr >> PAMU_PAGE_SHIFT,
 172					 dma_domain->snoop_id,
 173					 dma_domain->stash_id,
 174					 (wnd_nr > 0) ? 1 : 0,
 175					 wnd->prot);
 176		if (ret)
 177			pr_debug("Subwindow reconfiguration failed for liodn %d\n",
 178				 liodn);
 179	} else {
 180		phys_addr_t wnd_addr;
 181
 182		wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
 183
 184		ret = pamu_config_ppaace(liodn, wnd_addr,
 185					 wnd->size,
 186					 ~(u32)0,
 187					 wnd->paddr >> PAMU_PAGE_SHIFT,
 188					 dma_domain->snoop_id, dma_domain->stash_id,
 189					 0, wnd->prot);
 190		if (ret)
 191			pr_debug("Window reconfiguration failed for liodn %d\n",
 192				 liodn);
 193	}
 194
 195	spin_unlock_irqrestore(&iommu_lock, flags);
 196
 197	return ret;
 198}
 199
 200static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
 201			      u32 val)
 202{
 203	int ret = 0, i;
 204	unsigned long flags;
 205
 206	spin_lock_irqsave(&iommu_lock, flags);
 207	if (!dma_domain->win_arr) {
 208		pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
 209			 liodn);
 210		spin_unlock_irqrestore(&iommu_lock, flags);
 211		return -EINVAL;
 212	}
 213
 214	for (i = 0; i < dma_domain->win_cnt; i++) {
 215		ret = pamu_update_paace_stash(liodn, i, val);
 216		if (ret) {
 217			pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
 218				 i, liodn);
 219			spin_unlock_irqrestore(&iommu_lock, flags);
 220			return ret;
 221		}
 222	}
 223
 224	spin_unlock_irqrestore(&iommu_lock, flags);
 225
 226	return ret;
 227}
 228
 229/* Set the geometry parameters for a LIODN */
 230static int pamu_set_liodn(int liodn, struct device *dev,
 231			  struct fsl_dma_domain *dma_domain,
 232			  struct iommu_domain_geometry *geom_attr,
 233			  u32 win_cnt)
 234{
 235	phys_addr_t window_addr, window_size;
 236	phys_addr_t subwin_size;
 237	int ret = 0, i;
 238	u32 omi_index = ~(u32)0;
 239	unsigned long flags;
 
 240
 241	/*
 242	 * Configure the omi_index at the geometry setup time.
 243	 * This is a static value which depends on the type of
 244	 * device and would not change thereafter.
 245	 */
 246	get_ome_index(&omi_index, dev);
 247
 248	window_addr = geom_attr->aperture_start;
 249	window_size = dma_domain->geom_size;
 250
 251	spin_lock_irqsave(&iommu_lock, flags);
 252	ret = pamu_disable_liodn(liodn);
 253	if (!ret)
 254		ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
 255					 0, dma_domain->snoop_id,
 256					 dma_domain->stash_id, win_cnt, 0);
 
 
 
 
 257	spin_unlock_irqrestore(&iommu_lock, flags);
 258	if (ret) {
 259		pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
 260			 liodn, win_cnt);
 261		return ret;
 262	}
 263
 264	if (win_cnt > 1) {
 265		subwin_size = window_size >> ilog2(win_cnt);
 266		for (i = 0; i < win_cnt; i++) {
 267			spin_lock_irqsave(&iommu_lock, flags);
 268			ret = pamu_disable_spaace(liodn, i);
 269			if (!ret)
 270				ret = pamu_config_spaace(liodn, win_cnt, i,
 271							 subwin_size, omi_index,
 272							 0, dma_domain->snoop_id,
 273							 dma_domain->stash_id,
 274							 0, 0);
 275			spin_unlock_irqrestore(&iommu_lock, flags);
 276			if (ret) {
 277				pr_debug("SPAACE configuration failed for liodn %d\n",
 278					 liodn);
 279				return ret;
 280			}
 281		}
 282	}
 283
 284	return ret;
 285}
 286
 287static int check_size(u64 size, dma_addr_t iova)
 288{
 289	/*
 290	 * Size must be a power of two and at least be equal
 291	 * to PAMU page size.
 292	 */
 293	if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
 294		pr_debug("Size too small or not a power of two\n");
 295		return -EINVAL;
 296	}
 297
 298	/* iova must be page size aligned */
 299	if (iova & (size - 1)) {
 300		pr_debug("Address is not aligned with window size\n");
 301		return -EINVAL;
 302	}
 303
 304	return 0;
 305}
 306
 307static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
 308{
 309	struct fsl_dma_domain *domain;
 310
 311	domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
 312	if (!domain)
 313		return NULL;
 314
 315	domain->stash_id = ~(u32)0;
 316	domain->snoop_id = ~(u32)0;
 317	domain->win_cnt = pamu_get_max_subwin_cnt();
 318	domain->geom_size = 0;
 319
 320	INIT_LIST_HEAD(&domain->devices);
 321
 322	spin_lock_init(&domain->domain_lock);
 323
 324	return domain;
 325}
 326
 327static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
 328{
 329	unsigned long flags;
 330
 331	list_del(&info->link);
 332	spin_lock_irqsave(&iommu_lock, flags);
 333	if (win_cnt > 1)
 334		pamu_free_subwins(info->liodn);
 335	pamu_disable_liodn(info->liodn);
 336	spin_unlock_irqrestore(&iommu_lock, flags);
 337	spin_lock_irqsave(&device_domain_lock, flags);
 338	info->dev->archdata.iommu_domain = NULL;
 339	kmem_cache_free(iommu_devinfo_cache, info);
 340	spin_unlock_irqrestore(&device_domain_lock, flags);
 341}
 342
 343static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
 344{
 345	struct device_domain_info *info, *tmp;
 346	unsigned long flags;
 347
 348	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 349	/* Remove the device from the domain device list */
 350	list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
 351		if (!dev || (info->dev == dev))
 352			remove_device_ref(info, dma_domain->win_cnt);
 353	}
 354	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 355}
 356
 357static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
 358{
 359	struct device_domain_info *info, *old_domain_info;
 360	unsigned long flags;
 361
 362	spin_lock_irqsave(&device_domain_lock, flags);
 363	/*
 364	 * Check here if the device is already attached to domain or not.
 365	 * If the device is already attached to a domain detach it.
 366	 */
 367	old_domain_info = dev->archdata.iommu_domain;
 368	if (old_domain_info && old_domain_info->domain != dma_domain) {
 369		spin_unlock_irqrestore(&device_domain_lock, flags);
 370		detach_device(dev, old_domain_info->domain);
 371		spin_lock_irqsave(&device_domain_lock, flags);
 372	}
 373
 374	info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
 375
 376	info->dev = dev;
 377	info->liodn = liodn;
 378	info->domain = dma_domain;
 379
 380	list_add(&info->link, &dma_domain->devices);
 381	/*
 382	 * In case of devices with multiple LIODNs just store
 383	 * the info for the first LIODN as all
 384	 * LIODNs share the same domain
 385	 */
 386	if (!dev->archdata.iommu_domain)
 387		dev->archdata.iommu_domain = info;
 388	spin_unlock_irqrestore(&device_domain_lock, flags);
 389}
 390
 391static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
 392					 dma_addr_t iova)
 393{
 394	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 395
 396	if (iova < domain->geometry.aperture_start ||
 397	    iova > domain->geometry.aperture_end)
 398		return 0;
 399
 400	return get_phys_addr(dma_domain, iova);
 401}
 402
 403static bool fsl_pamu_capable(enum iommu_cap cap)
 404{
 405	return cap == IOMMU_CAP_CACHE_COHERENCY;
 406}
 407
 408static void fsl_pamu_domain_free(struct iommu_domain *domain)
 409{
 410	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 411
 412	/* remove all the devices from the device list */
 413	detach_device(NULL, dma_domain);
 414
 415	dma_domain->enabled = 0;
 416	dma_domain->mapped = 0;
 417
 418	kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
 419}
 420
 421static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
 422{
 423	struct fsl_dma_domain *dma_domain;
 424
 
 
 
 
 
 
 
 425	if (type != IOMMU_DOMAIN_UNMANAGED)
 426		return NULL;
 427
 428	dma_domain = iommu_alloc_dma_domain();
 429	if (!dma_domain) {
 430		pr_debug("dma_domain allocation failed\n");
 431		return NULL;
 432	}
 433	/* defaul geometry 64 GB i.e. maximum system address */
 
 
 
 
 434	dma_domain->iommu_domain. geometry.aperture_start = 0;
 435	dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
 436	dma_domain->iommu_domain.geometry.force_aperture = true;
 437
 438	return &dma_domain->iommu_domain;
 439}
 440
 441/* Configure geometry settings for all LIODNs associated with domain */
 442static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
 443				    struct iommu_domain_geometry *geom_attr,
 444				    u32 win_cnt)
 445{
 446	struct device_domain_info *info;
 447	int ret = 0;
 448
 449	list_for_each_entry(info, &dma_domain->devices, link) {
 450		ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
 451				     geom_attr, win_cnt);
 452		if (ret)
 453			break;
 454	}
 455
 456	return ret;
 457}
 458
 459/* Update stash destination for all LIODNs associated with the domain */
 460static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
 461{
 462	struct device_domain_info *info;
 463	int ret = 0;
 464
 465	list_for_each_entry(info, &dma_domain->devices, link) {
 466		ret = update_liodn_stash(info->liodn, dma_domain, val);
 467		if (ret)
 468			break;
 469	}
 470
 471	return ret;
 472}
 473
 474/* Update domain mappings for all LIODNs associated with the domain */
 475static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
 476{
 477	struct device_domain_info *info;
 478	int ret = 0;
 479
 480	list_for_each_entry(info, &dma_domain->devices, link) {
 481		ret = update_liodn(info->liodn, dma_domain, wnd_nr);
 482		if (ret)
 483			break;
 484	}
 485	return ret;
 486}
 487
 488static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
 489{
 490	struct device_domain_info *info;
 491	int ret = 0;
 492
 493	list_for_each_entry(info, &dma_domain->devices, link) {
 494		if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
 495			ret = pamu_disable_liodn(info->liodn);
 496			if (!ret)
 497				dma_domain->enabled = 0;
 498		} else {
 499			ret = pamu_disable_spaace(info->liodn, wnd_nr);
 500		}
 501	}
 502
 503	return ret;
 504}
 505
 506static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
 507{
 508	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 509	unsigned long flags;
 510	int ret;
 511
 512	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 513	if (!dma_domain->win_arr) {
 514		pr_debug("Number of windows not configured\n");
 515		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 516		return;
 517	}
 518
 519	if (wnd_nr >= dma_domain->win_cnt) {
 520		pr_debug("Invalid window index\n");
 521		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 522		return;
 523	}
 524
 525	if (dma_domain->win_arr[wnd_nr].valid) {
 526		ret = disable_domain_win(dma_domain, wnd_nr);
 527		if (!ret) {
 528			dma_domain->win_arr[wnd_nr].valid = 0;
 529			dma_domain->mapped--;
 530		}
 531	}
 532
 533	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 534}
 535
 536static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
 537				  phys_addr_t paddr, u64 size, int prot)
 538{
 539	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 540	struct dma_window *wnd;
 541	int pamu_prot = 0;
 542	int ret;
 543	unsigned long flags;
 544	u64 win_size;
 545
 546	if (prot & IOMMU_READ)
 547		pamu_prot |= PAACE_AP_PERMS_QUERY;
 548	if (prot & IOMMU_WRITE)
 549		pamu_prot |= PAACE_AP_PERMS_UPDATE;
 550
 551	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 552	if (!dma_domain->win_arr) {
 553		pr_debug("Number of windows not configured\n");
 554		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 555		return -ENODEV;
 556	}
 557
 558	if (wnd_nr >= dma_domain->win_cnt) {
 559		pr_debug("Invalid window index\n");
 560		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 561		return -EINVAL;
 562	}
 563
 564	win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
 565	if (size > win_size) {
 566		pr_debug("Invalid window size\n");
 567		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 568		return -EINVAL;
 569	}
 570
 571	if (dma_domain->win_cnt == 1) {
 572		if (dma_domain->enabled) {
 573			pr_debug("Disable the window before updating the mapping\n");
 574			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 575			return -EBUSY;
 576		}
 577
 578		ret = check_size(size, domain->geometry.aperture_start);
 579		if (ret) {
 580			pr_debug("Aperture start not aligned to the size\n");
 581			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 582			return -EINVAL;
 583		}
 584	}
 585
 586	wnd = &dma_domain->win_arr[wnd_nr];
 587	if (!wnd->valid) {
 588		wnd->paddr = paddr;
 589		wnd->size = size;
 590		wnd->prot = pamu_prot;
 591
 592		ret = update_domain_mapping(dma_domain, wnd_nr);
 593		if (!ret) {
 594			wnd->valid = 1;
 595			dma_domain->mapped++;
 596		}
 597	} else {
 598		pr_debug("Disable the window before updating the mapping\n");
 599		ret = -EBUSY;
 600	}
 601
 602	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 603
 604	return ret;
 605}
 606
 607/*
 608 * Attach the LIODN to the DMA domain and configure the geometry
 609 * and window mappings.
 610 */
 611static int handle_attach_device(struct fsl_dma_domain *dma_domain,
 612				struct device *dev, const u32 *liodn,
 613				int num)
 614{
 615	unsigned long flags;
 616	struct iommu_domain *domain = &dma_domain->iommu_domain;
 617	int ret = 0;
 618	int i;
 619
 620	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 621	for (i = 0; i < num; i++) {
 622		/* Ensure that LIODN value is valid */
 623		if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
 624			pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
 625				 liodn[i], dev->of_node);
 626			ret = -EINVAL;
 627			break;
 628		}
 629
 630		attach_device(dma_domain, liodn[i], dev);
 631		/*
 632		 * Check if geometry has already been configured
 633		 * for the domain. If yes, set the geometry for
 634		 * the LIODN.
 635		 */
 636		if (dma_domain->win_arr) {
 637			u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
 638
 639			ret = pamu_set_liodn(liodn[i], dev, dma_domain,
 640					     &domain->geometry, win_cnt);
 641			if (ret)
 642				break;
 643			if (dma_domain->mapped) {
 644				/*
 645				 * Create window/subwindow mapping for
 646				 * the LIODN.
 647				 */
 648				ret = map_liodn(liodn[i], dma_domain);
 649				if (ret)
 650					break;
 651			}
 652		}
 653	}
 654	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 655
 656	return ret;
 657}
 658
 659static int fsl_pamu_attach_device(struct iommu_domain *domain,
 660				  struct device *dev)
 661{
 662	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 
 
 663	const u32 *liodn;
 664	u32 liodn_cnt;
 665	int len, ret = 0;
 666	struct pci_dev *pdev = NULL;
 667	struct pci_controller *pci_ctl;
 668
 669	/*
 670	 * Use LIODN of the PCI controller while attaching a
 671	 * PCI device.
 672	 */
 673	if (dev_is_pci(dev)) {
 674		pdev = to_pci_dev(dev);
 675		pci_ctl = pci_bus_to_host(pdev->bus);
 676		/*
 677		 * make dev point to pci controller device
 678		 * so we can get the LIODN programmed by
 679		 * u-boot.
 680		 */
 681		dev = pci_ctl->parent;
 682	}
 683
 684	liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
 685	if (liodn) {
 686		liodn_cnt = len / sizeof(u32);
 687		ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
 688	} else {
 689		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
 690		ret = -EINVAL;
 691	}
 692
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 693	return ret;
 694}
 695
 696static void fsl_pamu_detach_device(struct iommu_domain *domain,
 697				   struct device *dev)
 
 
 
 
 
 
 
 698{
 699	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 
 700	const u32 *prop;
 701	int len;
 702	struct pci_dev *pdev = NULL;
 703	struct pci_controller *pci_ctl;
 704
 705	/*
 
 
 
 
 
 
 
 
 
 
 706	 * Use LIODN of the PCI controller while detaching a
 707	 * PCI device.
 708	 */
 709	if (dev_is_pci(dev)) {
 710		pdev = to_pci_dev(dev);
 711		pci_ctl = pci_bus_to_host(pdev->bus);
 712		/*
 713		 * make dev point to pci controller device
 714		 * so we can get the LIODN programmed by
 715		 * u-boot.
 716		 */
 717		dev = pci_ctl->parent;
 718	}
 719
 720	prop = of_get_property(dev->of_node, "fsl,liodn", &len);
 721	if (prop)
 722		detach_device(dev, dma_domain);
 723	else
 724		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
 
 725}
 726
 727static  int configure_domain_geometry(struct iommu_domain *domain, void *data)
 728{
 729	struct iommu_domain_geometry *geom_attr = data;
 730	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 731	dma_addr_t geom_size;
 732	unsigned long flags;
 733
 734	geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
 735	/*
 736	 * Sanity check the geometry size. Also, we do not support
 737	 * DMA outside of the geometry.
 738	 */
 739	if (check_size(geom_size, geom_attr->aperture_start) ||
 740	    !geom_attr->force_aperture) {
 741		pr_debug("Invalid PAMU geometry attributes\n");
 742		return -EINVAL;
 743	}
 744
 745	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 746	if (dma_domain->enabled) {
 747		pr_debug("Can't set geometry attributes as domain is active\n");
 748		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 749		return  -EBUSY;
 750	}
 751
 752	/* Copy the domain geometry information */
 753	memcpy(&domain->geometry, geom_attr,
 754	       sizeof(struct iommu_domain_geometry));
 755	dma_domain->geom_size = geom_size;
 756
 757	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 758
 759	return 0;
 760}
 761
 762/* Set the domain stash attribute */
 763static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
 764{
 765	struct pamu_stash_attribute *stash_attr = data;
 766	unsigned long flags;
 767	int ret;
 768
 769	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 770
 771	memcpy(&dma_domain->dma_stash, stash_attr,
 772	       sizeof(struct pamu_stash_attribute));
 773
 774	dma_domain->stash_id = get_stash_id(stash_attr->cache,
 775					    stash_attr->cpu);
 776	if (dma_domain->stash_id == ~(u32)0) {
 777		pr_debug("Invalid stash attributes\n");
 778		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 779		return -EINVAL;
 780	}
 781
 782	ret = update_domain_stash(dma_domain, dma_domain->stash_id);
 783
 784	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 785
 786	return ret;
 787}
 788
 789/* Configure domain dma state i.e. enable/disable DMA */
 790static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
 791{
 792	struct device_domain_info *info;
 793	unsigned long flags;
 794	int ret;
 795
 796	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 797
 798	if (enable && !dma_domain->mapped) {
 799		pr_debug("Can't enable DMA domain without valid mapping\n");
 800		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 801		return -ENODEV;
 802	}
 803
 804	dma_domain->enabled = enable;
 805	list_for_each_entry(info, &dma_domain->devices, link) {
 806		ret = (enable) ? pamu_enable_liodn(info->liodn) :
 807			pamu_disable_liodn(info->liodn);
 808		if (ret)
 809			pr_debug("Unable to set dma state for liodn %d",
 810				 info->liodn);
 811	}
 812	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 813
 814	return 0;
 815}
 816
 817static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
 818				    enum iommu_attr attr_type, void *data)
 819{
 820	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 821	int ret = 0;
 822
 823	switch (attr_type) {
 824	case DOMAIN_ATTR_GEOMETRY:
 825		ret = configure_domain_geometry(domain, data);
 826		break;
 827	case DOMAIN_ATTR_FSL_PAMU_STASH:
 828		ret = configure_domain_stash(dma_domain, data);
 829		break;
 830	case DOMAIN_ATTR_FSL_PAMU_ENABLE:
 831		ret = configure_domain_dma_state(dma_domain, *(int *)data);
 832		break;
 833	default:
 834		pr_debug("Unsupported attribute type\n");
 835		ret = -EINVAL;
 836		break;
 837	}
 838
 839	return ret;
 840}
 841
 842static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
 843				    enum iommu_attr attr_type, void *data)
 844{
 845	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 846	int ret = 0;
 847
 848	switch (attr_type) {
 849	case DOMAIN_ATTR_FSL_PAMU_STASH:
 850		memcpy(data, &dma_domain->dma_stash,
 851		       sizeof(struct pamu_stash_attribute));
 852		break;
 853	case DOMAIN_ATTR_FSL_PAMU_ENABLE:
 854		*(int *)data = dma_domain->enabled;
 855		break;
 856	case DOMAIN_ATTR_FSL_PAMUV1:
 857		*(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
 858		break;
 859	default:
 860		pr_debug("Unsupported attribute type\n");
 861		ret = -EINVAL;
 862		break;
 863	}
 864
 865	return ret;
 866}
 867
 868static struct iommu_group *get_device_iommu_group(struct device *dev)
 869{
 870	struct iommu_group *group;
 871
 872	group = iommu_group_get(dev);
 873	if (!group)
 874		group = iommu_group_alloc();
 875
 876	return group;
 877}
 878
 879static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
 880{
 881	u32 version;
 882
 883	/* Check the PCI controller version number by readding BRR1 register */
 884	version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
 885	version &= PCI_FSL_BRR1_VER;
 886	/* If PCI controller version is >= 0x204 we can partition endpoints */
 887	return version >= 0x204;
 888}
 889
 890/* Get iommu group information from peer devices or devices on the parent bus */
 891static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
 892{
 893	struct pci_dev *tmp;
 894	struct iommu_group *group;
 895	struct pci_bus *bus = pdev->bus;
 896
 897	/*
 898	 * Traverese the pci bus device list to get
 899	 * the shared iommu group.
 900	 */
 901	while (bus) {
 902		list_for_each_entry(tmp, &bus->devices, bus_list) {
 903			if (tmp == pdev)
 904				continue;
 905			group = iommu_group_get(&tmp->dev);
 906			if (group)
 907				return group;
 908		}
 909
 910		bus = bus->parent;
 911	}
 912
 913	return NULL;
 914}
 915
 916static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
 917{
 918	struct pci_controller *pci_ctl;
 919	bool pci_endpt_partioning;
 920	struct iommu_group *group = NULL;
 921
 922	pci_ctl = pci_bus_to_host(pdev->bus);
 923	pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
 924	/* We can partition PCIe devices so assign device group to the device */
 925	if (pci_endpt_partioning) {
 926		group = pci_device_group(&pdev->dev);
 927
 928		/*
 929		 * PCIe controller is not a paritionable entity
 930		 * free the controller device iommu_group.
 931		 */
 932		if (pci_ctl->parent->iommu_group)
 933			iommu_group_remove_device(pci_ctl->parent);
 934	} else {
 935		/*
 936		 * All devices connected to the controller will share the
 937		 * PCI controllers device group. If this is the first
 938		 * device to be probed for the pci controller, copy the
 939		 * device group information from the PCI controller device
 940		 * node and remove the PCI controller iommu group.
 941		 * For subsequent devices, the iommu group information can
 942		 * be obtained from sibling devices (i.e. from the bus_devices
 943		 * link list).
 944		 */
 945		if (pci_ctl->parent->iommu_group) {
 946			group = get_device_iommu_group(pci_ctl->parent);
 947			iommu_group_remove_device(pci_ctl->parent);
 948		} else {
 949			group = get_shared_pci_device_group(pdev);
 950		}
 951	}
 952
 953	if (!group)
 954		group = ERR_PTR(-ENODEV);
 955
 956	return group;
 957}
 958
 959static struct iommu_group *fsl_pamu_device_group(struct device *dev)
 960{
 961	struct iommu_group *group = ERR_PTR(-ENODEV);
 962	int len;
 963
 964	/*
 965	 * For platform devices we allocate a separate group for
 966	 * each of the devices.
 
 
 
 
 
 967	 */
 968	if (dev_is_pci(dev))
 969		group = get_pci_device_group(to_pci_dev(dev));
 970	else if (of_get_property(dev->of_node, "fsl,liodn", &len))
 971		group = get_device_iommu_group(dev);
 972
 973	return group;
 974}
 975
 976static int fsl_pamu_add_device(struct device *dev)
 977{
 978	struct iommu_group *group;
 979
 980	group = iommu_group_get_for_dev(dev);
 981	if (IS_ERR(group))
 982		return PTR_ERR(group);
 983
 984	iommu_group_put(group);
 985
 986	iommu_device_link(&pamu_iommu, dev);
 987
 988	return 0;
 989}
 990
 991static void fsl_pamu_remove_device(struct device *dev)
 992{
 993	iommu_device_unlink(&pamu_iommu, dev);
 994	iommu_group_remove_device(dev);
 995}
 996
 997static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
 998{
 999	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
1000	unsigned long flags;
1001	int ret;
1002
1003	spin_lock_irqsave(&dma_domain->domain_lock, flags);
1004	/* Ensure domain is inactive i.e. DMA should be disabled for the domain */
1005	if (dma_domain->enabled) {
1006		pr_debug("Can't set geometry attributes as domain is active\n");
1007		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1008		return  -EBUSY;
1009	}
1010
1011	/* Ensure that the geometry has been set for the domain */
1012	if (!dma_domain->geom_size) {
1013		pr_debug("Please configure geometry before setting the number of windows\n");
1014		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1015		return -EINVAL;
1016	}
1017
1018	/*
1019	 * Ensure we have valid window count i.e. it should be less than
1020	 * maximum permissible limit and should be a power of two.
1021	 */
1022	if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
1023		pr_debug("Invalid window count\n");
1024		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1025		return -EINVAL;
1026	}
1027
1028	ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
1029				       w_count > 1 ? w_count : 0);
1030	if (!ret) {
1031		kfree(dma_domain->win_arr);
1032		dma_domain->win_arr = kcalloc(w_count,
1033					      sizeof(*dma_domain->win_arr),
1034					      GFP_ATOMIC);
1035		if (!dma_domain->win_arr) {
1036			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1037			return -ENOMEM;
1038		}
1039		dma_domain->win_cnt = w_count;
1040	}
1041	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1042
1043	return ret;
1044}
1045
1046static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
1047{
1048	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
1049
1050	return dma_domain->win_cnt;
1051}
1052
1053static const struct iommu_ops fsl_pamu_ops = {
 
1054	.capable	= fsl_pamu_capable,
1055	.domain_alloc	= fsl_pamu_domain_alloc,
1056	.domain_free    = fsl_pamu_domain_free,
1057	.attach_dev	= fsl_pamu_attach_device,
1058	.detach_dev	= fsl_pamu_detach_device,
1059	.domain_window_enable = fsl_pamu_window_enable,
1060	.domain_window_disable = fsl_pamu_window_disable,
1061	.domain_get_windows = fsl_pamu_get_windows,
1062	.domain_set_windows = fsl_pamu_set_windows,
1063	.iova_to_phys	= fsl_pamu_iova_to_phys,
1064	.domain_set_attr = fsl_pamu_set_domain_attr,
1065	.domain_get_attr = fsl_pamu_get_domain_attr,
1066	.add_device	= fsl_pamu_add_device,
1067	.remove_device	= fsl_pamu_remove_device,
1068	.device_group   = fsl_pamu_device_group,
 
 
 
 
 
1069};
1070
1071int __init pamu_domain_init(void)
1072{
1073	int ret = 0;
1074
1075	ret = iommu_init_mempool();
1076	if (ret)
1077		return ret;
1078
1079	ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
1080	if (ret)
1081		return ret;
1082
1083	iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);
1084
1085	ret = iommu_device_register(&pamu_iommu);
1086	if (ret) {
1087		iommu_device_sysfs_remove(&pamu_iommu);
1088		pr_err("Can't register iommu device\n");
1089		return ret;
1090	}
1091
1092	bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
1093	bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
1094
1095	return ret;
1096}