Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
 
 
 
 
 
 
 
 
 
 
 
 
  3 *
  4 * Copyright (C) 2013 Freescale Semiconductor, Inc.
  5 * Author: Varun Sethi <varun.sethi@freescale.com>
 
  6 */
  7
  8#define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
  9
 10#include "fsl_pamu_domain.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 11
 12#include <linux/platform_device.h>
 13#include <sysdev/fsl_pci.h>
 14
 
 
 
 15/*
 16 * Global spinlock that needs to be held while
 17 * configuring PAMU.
 18 */
 19static DEFINE_SPINLOCK(iommu_lock);
 20
 21static struct kmem_cache *fsl_pamu_domain_cache;
 22static struct kmem_cache *iommu_devinfo_cache;
 23static DEFINE_SPINLOCK(device_domain_lock);
 24
 25struct iommu_device pamu_iommu;	/* IOMMU core code handle */
 26
 27static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
 28{
 29	return container_of(dom, struct fsl_dma_domain, iommu_domain);
 30}
 31
 32static int __init iommu_init_mempool(void)
 33{
 
 34	fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
 35						  sizeof(struct fsl_dma_domain),
 36						  0,
 37						  SLAB_HWCACHE_ALIGN,
 38						  NULL);
 
 39	if (!fsl_pamu_domain_cache) {
 40		pr_debug("Couldn't create fsl iommu_domain cache\n");
 41		return -ENOMEM;
 42	}
 43
 44	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
 45						sizeof(struct device_domain_info),
 46						0,
 47						SLAB_HWCACHE_ALIGN,
 48						NULL);
 49	if (!iommu_devinfo_cache) {
 50		pr_debug("Couldn't create devinfo cache\n");
 51		kmem_cache_destroy(fsl_pamu_domain_cache);
 52		return -ENOMEM;
 53	}
 54
 55	return 0;
 56}
 57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
 59			      u32 val)
 60{
 61	int ret = 0;
 62	unsigned long flags;
 63
 64	spin_lock_irqsave(&iommu_lock, flags);
 65	ret = pamu_update_paace_stash(liodn, val);
 66	if (ret) {
 67		pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
 68		spin_unlock_irqrestore(&iommu_lock, flags);
 69		return ret;
 
 
 
 
 
 
 
 
 
 70	}
 71
 72	spin_unlock_irqrestore(&iommu_lock, flags);
 73
 74	return ret;
 75}
 76
 77/* Set the geometry parameters for a LIODN */
 78static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
 79			  int liodn)
 80{
 
 
 
 
 
 81	u32 omi_index = ~(u32)0;
 82	unsigned long flags;
 83	int ret;
 84
 85	/*
 86	 * Configure the omi_index at the geometry setup time.
 87	 * This is a static value which depends on the type of
 88	 * device and would not change thereafter.
 89	 */
 90	get_ome_index(&omi_index, dev);
 91
 
 
 
 92	spin_lock_irqsave(&iommu_lock, flags);
 93	ret = pamu_disable_liodn(liodn);
 94	if (ret)
 95		goto out_unlock;
 96	ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0);
 97	if (ret)
 98		goto out_unlock;
 99	ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id,
100				 PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
101out_unlock:
102	spin_unlock_irqrestore(&iommu_lock, flags);
103	if (ret) {
104		pr_debug("PAACE configuration failed for liodn %d\n",
105			 liodn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106	}
 
107	return ret;
108}
109
110static void remove_device_ref(struct device_domain_info *info)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111{
112	unsigned long flags;
113
114	list_del(&info->link);
115	spin_lock_irqsave(&iommu_lock, flags);
 
 
116	pamu_disable_liodn(info->liodn);
117	spin_unlock_irqrestore(&iommu_lock, flags);
118	spin_lock_irqsave(&device_domain_lock, flags);
119	dev_iommu_priv_set(info->dev, NULL);
120	kmem_cache_free(iommu_devinfo_cache, info);
121	spin_unlock_irqrestore(&device_domain_lock, flags);
122}
123
124static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
125{
126	struct device_domain_info *info, *tmp;
127	unsigned long flags;
128
129	spin_lock_irqsave(&dma_domain->domain_lock, flags);
130	/* Remove the device from the domain device list */
131	list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
132		if (!dev || (info->dev == dev))
133			remove_device_ref(info);
134	}
135	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
136}
137
138static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
139{
140	struct device_domain_info *info, *old_domain_info;
141	unsigned long flags;
142
143	spin_lock_irqsave(&device_domain_lock, flags);
144	/*
145	 * Check here if the device is already attached to domain or not.
146	 * If the device is already attached to a domain detach it.
147	 */
148	old_domain_info = dev_iommu_priv_get(dev);
149	if (old_domain_info && old_domain_info->domain != dma_domain) {
150		spin_unlock_irqrestore(&device_domain_lock, flags);
151		detach_device(dev, old_domain_info->domain);
152		spin_lock_irqsave(&device_domain_lock, flags);
153	}
154
155	info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
156
157	info->dev = dev;
158	info->liodn = liodn;
159	info->domain = dma_domain;
160
161	list_add(&info->link, &dma_domain->devices);
162	/*
163	 * In case of devices with multiple LIODNs just store
164	 * the info for the first LIODN as all
165	 * LIODNs share the same domain
166	 */
167	if (!dev_iommu_priv_get(dev))
168		dev_iommu_priv_set(dev, info);
169	spin_unlock_irqrestore(&device_domain_lock, flags);
 
170}
171
172static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
173					 dma_addr_t iova)
174{
175	if (iova < domain->geometry.aperture_start ||
176	    iova > domain->geometry.aperture_end)
 
 
177		return 0;
178	return iova;
 
179}
180
181static bool fsl_pamu_capable(struct device *dev, enum iommu_cap cap)
 
182{
183	return cap == IOMMU_CAP_CACHE_COHERENCY;
184}
185
186static void fsl_pamu_domain_free(struct iommu_domain *domain)
187{
188	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 
 
189
190	/* remove all the devices from the device list */
191	detach_device(NULL, dma_domain);
 
 
 
 
192	kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
193}
194
195static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
196{
197	struct fsl_dma_domain *dma_domain;
198
199	if (type != IOMMU_DOMAIN_UNMANAGED)
200		return NULL;
 
 
 
 
 
 
 
 
 
201
202	dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
203	if (!dma_domain)
204		return NULL;
205
206	dma_domain->stash_id = ~(u32)0;
207	INIT_LIST_HEAD(&dma_domain->devices);
208	spin_lock_init(&dma_domain->domain_lock);
209
210	/* default geometry 64 GB i.e. maximum system address */
211	dma_domain->iommu_domain. geometry.aperture_start = 0;
212	dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
213	dma_domain->iommu_domain.geometry.force_aperture = true;
 
 
 
 
 
 
214
215	return &dma_domain->iommu_domain;
216}
217
218/* Update stash destination for all LIODNs associated with the domain */
219static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
220{
221	struct device_domain_info *info;
222	int ret = 0;
223
224	list_for_each_entry(info, &dma_domain->devices, link) {
225		ret = update_liodn_stash(info->liodn, dma_domain, val);
226		if (ret)
227			break;
228	}
229
230	return ret;
231}
232
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233static int fsl_pamu_attach_device(struct iommu_domain *domain,
234				  struct device *dev)
235{
236	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
237	unsigned long flags;
238	int len, ret = 0, i;
239	const u32 *liodn;
 
 
240	struct pci_dev *pdev = NULL;
241	struct pci_controller *pci_ctl;
242
243	/*
244	 * Use LIODN of the PCI controller while attaching a
245	 * PCI device.
246	 */
247	if (dev_is_pci(dev)) {
248		pdev = to_pci_dev(dev);
249		pci_ctl = pci_bus_to_host(pdev->bus);
250		/*
251		 * make dev point to pci controller device
252		 * so we can get the LIODN programmed by
253		 * u-boot.
254		 */
255		dev = pci_ctl->parent;
256	}
257
258	liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
259	if (!liodn) {
260		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
261		return -ENODEV;
 
 
 
 
 
262	}
263
264	spin_lock_irqsave(&dma_domain->domain_lock, flags);
265	for (i = 0; i < len / sizeof(u32); i++) {
266		/* Ensure that LIODN value is valid */
267		if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
268			pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
269				 liodn[i], dev->of_node);
270			ret = -ENODEV;
271			break;
272		}
273
274		attach_device(dma_domain, liodn[i], dev);
275		ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
276		if (ret)
277			break;
278		ret = pamu_enable_liodn(liodn[i]);
279		if (ret)
280			break;
281	}
282	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
283	return ret;
284}
285
286static void fsl_pamu_detach_device(struct iommu_domain *domain,
287				   struct device *dev)
288{
289	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
290	const u32 *prop;
291	int len;
292	struct pci_dev *pdev = NULL;
293	struct pci_controller *pci_ctl;
294
295	/*
296	 * Use LIODN of the PCI controller while detaching a
297	 * PCI device.
298	 */
299	if (dev_is_pci(dev)) {
300		pdev = to_pci_dev(dev);
301		pci_ctl = pci_bus_to_host(pdev->bus);
302		/*
303		 * make dev point to pci controller device
304		 * so we can get the LIODN programmed by
305		 * u-boot.
306		 */
307		dev = pci_ctl->parent;
308	}
309
310	prop = of_get_property(dev->of_node, "fsl,liodn", &len);
311	if (prop)
312		detach_device(dev, dma_domain);
313	else
314		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315}
316
317/* Set the domain stash attribute */
318int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
319{
320	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
321	unsigned long flags;
322	int ret;
323
324	spin_lock_irqsave(&dma_domain->domain_lock, flags);
325	dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
 
 
 
 
 
326	if (dma_domain->stash_id == ~(u32)0) {
327		pr_debug("Invalid stash attributes\n");
328		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
329		return -EINVAL;
330	}
 
331	ret = update_domain_stash(dma_domain, dma_domain->stash_id);
 
332	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
333
334	return ret;
335}
336
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337static struct iommu_group *get_device_iommu_group(struct device *dev)
338{
339	struct iommu_group *group;
340
341	group = iommu_group_get(dev);
342	if (!group)
343		group = iommu_group_alloc();
344
345	return group;
346}
347
348static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
349{
350	u32 version;
351
352	/* Check the PCI controller version number by readding BRR1 register */
353	version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
354	version &= PCI_FSL_BRR1_VER;
355	/* If PCI controller version is >= 0x204 we can partition endpoints */
356	return version >= 0x204;
 
 
 
357}
358
359/* Get iommu group information from peer devices or devices on the parent bus */
360static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
361{
362	struct pci_dev *tmp;
363	struct iommu_group *group;
364	struct pci_bus *bus = pdev->bus;
365
366	/*
367	 * Traverese the pci bus device list to get
368	 * the shared iommu group.
369	 */
370	while (bus) {
371		list_for_each_entry(tmp, &bus->devices, bus_list) {
372			if (tmp == pdev)
373				continue;
374			group = iommu_group_get(&tmp->dev);
375			if (group)
376				return group;
377		}
378
379		bus = bus->parent;
380	}
381
382	return NULL;
383}
384
385static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
386{
387	struct pci_controller *pci_ctl;
388	bool pci_endpt_partitioning;
389	struct iommu_group *group = NULL;
 
390
391	pci_ctl = pci_bus_to_host(pdev->bus);
392	pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
393	/* We can partition PCIe devices so assign device group to the device */
394	if (pci_endpt_partitioning) {
395		group = pci_device_group(&pdev->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
396
 
 
 
 
 
 
397		/*
398		 * PCIe controller is not a paritionable entity
399		 * free the controller device iommu_group.
400		 */
401		if (pci_ctl->parent->iommu_group)
402			iommu_group_remove_device(pci_ctl->parent);
403	} else {
404		/*
405		 * All devices connected to the controller will share the
406		 * PCI controllers device group. If this is the first
407		 * device to be probed for the pci controller, copy the
408		 * device group information from the PCI controller device
409		 * node and remove the PCI controller iommu group.
410		 * For subsequent devices, the iommu group information can
411		 * be obtained from sibling devices (i.e. from the bus_devices
412		 * link list).
413		 */
414		if (pci_ctl->parent->iommu_group) {
415			group = get_device_iommu_group(pci_ctl->parent);
416			iommu_group_remove_device(pci_ctl->parent);
417		} else {
418			group = get_shared_pci_device_group(pdev);
419		}
420	}
421
422	if (!group)
423		group = ERR_PTR(-ENODEV);
424
425	return group;
426}
427
428static struct iommu_group *fsl_pamu_device_group(struct device *dev)
429{
430	struct iommu_group *group = ERR_PTR(-ENODEV);
431	int len;
 
 
432
433	/*
434	 * For platform devices we allocate a separate group for
435	 * each of the devices.
436	 */
437	if (dev_is_pci(dev))
438		group = get_pci_device_group(to_pci_dev(dev));
439	else if (of_get_property(dev->of_node, "fsl,liodn", &len))
440		group = get_device_iommu_group(dev);
 
441
442	return group;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443}
444
445static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
446{
447	return &pamu_iommu;
448}
449
450static const struct iommu_ops fsl_pamu_ops = {
451	.capable	= fsl_pamu_capable,
452	.domain_alloc	= fsl_pamu_domain_alloc,
453	.probe_device	= fsl_pamu_probe_device,
454	.device_group   = fsl_pamu_device_group,
455	.default_domain_ops = &(const struct iommu_domain_ops) {
456		.attach_dev	= fsl_pamu_attach_device,
457		.detach_dev	= fsl_pamu_detach_device,
458		.iova_to_phys	= fsl_pamu_iova_to_phys,
459		.free		= fsl_pamu_domain_free,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
460	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461};
462
463int __init pamu_domain_init(void)
464{
465	int ret = 0;
466
467	ret = iommu_init_mempool();
468	if (ret)
469		return ret;
470
471	ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
472	if (ret)
473		return ret;
474
475	ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL);
476	if (ret) {
477		iommu_device_sysfs_remove(&pamu_iommu);
478		pr_err("Can't register iommu device\n");
479	}
480
481	return ret;
482}
v3.15
 
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
  16 * Author: Varun Sethi <varun.sethi@freescale.com>
  17 *
  18 */
  19
  20#define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
  21
  22#include <linux/init.h>
  23#include <linux/iommu.h>
  24#include <linux/notifier.h>
  25#include <linux/slab.h>
  26#include <linux/module.h>
  27#include <linux/types.h>
  28#include <linux/mm.h>
  29#include <linux/interrupt.h>
  30#include <linux/device.h>
  31#include <linux/of_platform.h>
  32#include <linux/bootmem.h>
  33#include <linux/err.h>
  34#include <asm/io.h>
  35#include <asm/bitops.h>
  36
  37#include <asm/pci-bridge.h>
  38#include <sysdev/fsl_pci.h>
  39
  40#include "fsl_pamu_domain.h"
  41#include "pci.h"
  42
  43/*
  44 * Global spinlock that needs to be held while
  45 * configuring PAMU.
  46 */
  47static DEFINE_SPINLOCK(iommu_lock);
  48
  49static struct kmem_cache *fsl_pamu_domain_cache;
  50static struct kmem_cache *iommu_devinfo_cache;
  51static DEFINE_SPINLOCK(device_domain_lock);
  52
 
 
 
 
 
 
 
  53static int __init iommu_init_mempool(void)
  54{
  55
  56	fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
  57					 sizeof(struct fsl_dma_domain),
  58					 0,
  59					 SLAB_HWCACHE_ALIGN,
  60
  61					 NULL);
  62	if (!fsl_pamu_domain_cache) {
  63		pr_debug("Couldn't create fsl iommu_domain cache\n");
  64		return -ENOMEM;
  65	}
  66
  67	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
  68					 sizeof(struct device_domain_info),
  69					 0,
  70					 SLAB_HWCACHE_ALIGN,
  71					 NULL);
  72	if (!iommu_devinfo_cache) {
  73		pr_debug("Couldn't create devinfo cache\n");
  74		kmem_cache_destroy(fsl_pamu_domain_cache);
  75		return -ENOMEM;
  76	}
  77
  78	return 0;
  79}
  80
  81static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
  82{
  83	u32 win_cnt = dma_domain->win_cnt;
  84	struct dma_window *win_ptr =
  85				&dma_domain->win_arr[0];
  86	struct iommu_domain_geometry *geom;
  87
  88	geom = &dma_domain->iommu_domain->geometry;
  89
  90	if (!win_cnt || !dma_domain->geom_size) {
  91		pr_debug("Number of windows/geometry not configured for the domain\n");
  92		return 0;
  93	}
  94
  95	if (win_cnt > 1) {
  96		u64 subwin_size;
  97		dma_addr_t subwin_iova;
  98		u32 wnd;
  99
 100		subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
 101		subwin_iova = iova & ~(subwin_size - 1);
 102		wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
 103		win_ptr = &dma_domain->win_arr[wnd];
 104	}
 105
 106	if (win_ptr->valid)
 107		return (win_ptr->paddr + (iova & (win_ptr->size - 1)));
 108
 109	return 0;
 110}
 111
 112static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
 113{
 114	struct dma_window *sub_win_ptr =
 115				&dma_domain->win_arr[0];
 116	int i, ret;
 117	unsigned long rpn, flags;
 118
 119	for (i = 0; i < dma_domain->win_cnt; i++) {
 120		if (sub_win_ptr[i].valid) {
 121			rpn = sub_win_ptr[i].paddr >>
 122				 PAMU_PAGE_SHIFT;
 123			spin_lock_irqsave(&iommu_lock, flags);
 124			ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
 125						 sub_win_ptr[i].size,
 126						 ~(u32)0,
 127						 rpn,
 128						 dma_domain->snoop_id,
 129						 dma_domain->stash_id,
 130						 (i > 0) ? 1 : 0,
 131						 sub_win_ptr[i].prot);
 132			spin_unlock_irqrestore(&iommu_lock, flags);
 133			if (ret) {
 134				pr_debug("PAMU SPAACE configuration failed for liodn %d\n",
 135					 liodn);
 136				return ret;
 137			}
 138		}
 139	}
 140
 141	return ret;
 142}
 143
 144static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
 145{
 146	int ret;
 147	struct dma_window *wnd = &dma_domain->win_arr[0];
 148	phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
 149	unsigned long flags;
 150
 151	spin_lock_irqsave(&iommu_lock, flags);
 152	ret = pamu_config_ppaace(liodn, wnd_addr,
 153				 wnd->size,
 154				 ~(u32)0,
 155				 wnd->paddr >> PAMU_PAGE_SHIFT,
 156				 dma_domain->snoop_id, dma_domain->stash_id,
 157				 0, wnd->prot);
 158	spin_unlock_irqrestore(&iommu_lock, flags);
 159	if (ret)
 160		pr_debug("PAMU PAACE configuration failed for liodn %d\n",
 161			liodn);
 162
 163	return ret;
 164}
 165
 166/* Map the DMA window corresponding to the LIODN */
 167static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
 168{
 169	if (dma_domain->win_cnt > 1)
 170		return map_subwins(liodn, dma_domain);
 171	else
 172		return map_win(liodn, dma_domain);
 173
 174}
 175
 176/* Update window/subwindow mapping for the LIODN */
 177static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
 178{
 179	int ret;
 180	struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
 181	unsigned long flags;
 182
 183	spin_lock_irqsave(&iommu_lock, flags);
 184	if (dma_domain->win_cnt > 1) {
 185		ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
 186					 wnd->size,
 187					 ~(u32)0,
 188					 wnd->paddr >> PAMU_PAGE_SHIFT,
 189					 dma_domain->snoop_id,
 190					 dma_domain->stash_id,
 191					 (wnd_nr > 0) ? 1 : 0,
 192					 wnd->prot);
 193		if (ret)
 194			pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn);
 195	} else {
 196		phys_addr_t wnd_addr;
 197
 198		wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
 199
 200		ret = pamu_config_ppaace(liodn, wnd_addr,
 201					 wnd->size,
 202					 ~(u32)0,
 203					 wnd->paddr >> PAMU_PAGE_SHIFT,
 204					dma_domain->snoop_id, dma_domain->stash_id,
 205					0, wnd->prot);
 206		if (ret)
 207			pr_debug("Window reconfiguration failed for liodn %d\n", liodn);
 208	}
 209
 210	spin_unlock_irqrestore(&iommu_lock, flags);
 211
 212	return ret;
 213}
 214
 215static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
 216				 u32 val)
 217{
 218	int ret = 0, i;
 219	unsigned long flags;
 220
 221	spin_lock_irqsave(&iommu_lock, flags);
 222	if (!dma_domain->win_arr) {
 223		pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn);
 
 224		spin_unlock_irqrestore(&iommu_lock, flags);
 225		return -EINVAL;
 226	}
 227
 228	for (i = 0; i < dma_domain->win_cnt; i++) {
 229		ret = pamu_update_paace_stash(liodn, i, val);
 230		if (ret) {
 231			pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn);
 232			spin_unlock_irqrestore(&iommu_lock, flags);
 233			return ret;
 234		}
 235	}
 236
 237	spin_unlock_irqrestore(&iommu_lock, flags);
 238
 239	return ret;
 240}
 241
 242/* Set the geometry parameters for a LIODN */
 243static int pamu_set_liodn(int liodn, struct device *dev,
 244			   struct fsl_dma_domain *dma_domain,
 245			   struct iommu_domain_geometry *geom_attr,
 246			   u32 win_cnt)
 247{
 248	phys_addr_t window_addr, window_size;
 249	phys_addr_t subwin_size;
 250	int ret = 0, i;
 251	u32 omi_index = ~(u32)0;
 252	unsigned long flags;
 
 253
 254	/*
 255	 * Configure the omi_index at the geometry setup time.
 256	 * This is a static value which depends on the type of
 257	 * device and would not change thereafter.
 258	 */
 259	get_ome_index(&omi_index, dev);
 260
 261	window_addr = geom_attr->aperture_start;
 262	window_size = dma_domain->geom_size;
 263
 264	spin_lock_irqsave(&iommu_lock, flags);
 265	ret = pamu_disable_liodn(liodn);
 266	if (!ret)
 267		ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
 268					 0, dma_domain->snoop_id,
 269					 dma_domain->stash_id, win_cnt, 0);
 
 
 
 
 270	spin_unlock_irqrestore(&iommu_lock, flags);
 271	if (ret) {
 272		pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt);
 273		return ret;
 274	}
 275
 276	if (win_cnt > 1) {
 277		subwin_size = window_size >> ilog2(win_cnt);
 278		for (i = 0; i < win_cnt; i++) {
 279			spin_lock_irqsave(&iommu_lock, flags);
 280			ret = pamu_disable_spaace(liodn, i);
 281			if (!ret)
 282				ret = pamu_config_spaace(liodn, win_cnt, i,
 283							 subwin_size, omi_index,
 284							 0, dma_domain->snoop_id,
 285							 dma_domain->stash_id,
 286							 0, 0);
 287			spin_unlock_irqrestore(&iommu_lock, flags);
 288			if (ret) {
 289				pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn);
 290				return ret;
 291			}
 292		}
 293	}
 294
 295	return ret;
 296}
 297
 298static int check_size(u64 size, dma_addr_t iova)
 299{
 300	/*
 301	 * Size must be a power of two and at least be equal
 302	 * to PAMU page size.
 303	 */
 304	if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) {
 305		pr_debug("%s: size too small or not a power of two\n", __func__);
 306		return -EINVAL;
 307	}
 308
 309	/* iova must be page size aligned*/
 310	if (iova & (size - 1)) {
 311		pr_debug("%s: address is not aligned with window size\n", __func__);
 312		return -EINVAL;
 313	}
 314
 315	return 0;
 316}
 317
 318static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
 319{
 320	struct fsl_dma_domain *domain;
 321
 322	domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
 323	if (!domain)
 324		return NULL;
 325
 326	domain->stash_id = ~(u32)0;
 327	domain->snoop_id = ~(u32)0;
 328	domain->win_cnt = pamu_get_max_subwin_cnt();
 329	domain->geom_size = 0;
 330
 331	INIT_LIST_HEAD(&domain->devices);
 332
 333	spin_lock_init(&domain->domain_lock);
 334
 335	return domain;
 336}
 337
 338static inline struct device_domain_info *find_domain(struct device *dev)
 339{
 340	return dev->archdata.iommu_domain;
 341}
 342
 343static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
 344{
 345	unsigned long flags;
 346
 347	list_del(&info->link);
 348	spin_lock_irqsave(&iommu_lock, flags);
 349	if (win_cnt > 1)
 350		pamu_free_subwins(info->liodn);
 351	pamu_disable_liodn(info->liodn);
 352	spin_unlock_irqrestore(&iommu_lock, flags);
 353	spin_lock_irqsave(&device_domain_lock, flags);
 354	info->dev->archdata.iommu_domain = NULL;
 355	kmem_cache_free(iommu_devinfo_cache, info);
 356	spin_unlock_irqrestore(&device_domain_lock, flags);
 357}
 358
 359static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
 360{
 361	struct device_domain_info *info, *tmp;
 362	unsigned long flags;
 363
 364	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 365	/* Remove the device from the domain device list */
 366	list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
 367		if (!dev || (info->dev == dev))
 368			remove_device_ref(info, dma_domain->win_cnt);
 369	}
 370	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 371}
 372
 373static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
 374{
 375	struct device_domain_info *info, *old_domain_info;
 376	unsigned long flags;
 377
 378	spin_lock_irqsave(&device_domain_lock, flags);
 379	/*
 380	 * Check here if the device is already attached to domain or not.
 381	 * If the device is already attached to a domain detach it.
 382	 */
 383	old_domain_info = find_domain(dev);
 384	if (old_domain_info && old_domain_info->domain != dma_domain) {
 385		spin_unlock_irqrestore(&device_domain_lock, flags);
 386		detach_device(dev, old_domain_info->domain);
 387		spin_lock_irqsave(&device_domain_lock, flags);
 388	}
 389
 390	info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
 391
 392	info->dev = dev;
 393	info->liodn = liodn;
 394	info->domain = dma_domain;
 395
 396	list_add(&info->link, &dma_domain->devices);
 397	/*
 398	 * In case of devices with multiple LIODNs just store
 399	 * the info for the first LIODN as all
 400	 * LIODNs share the same domain
 401	 */
 402	if (!old_domain_info)
 403		dev->archdata.iommu_domain = info;
 404	spin_unlock_irqrestore(&device_domain_lock, flags);
 405
 406}
 407
 408static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
 409					    dma_addr_t iova)
 410{
 411	struct fsl_dma_domain *dma_domain = domain->priv;
 412
 413	if ((iova < domain->geometry.aperture_start) ||
 414		iova > (domain->geometry.aperture_end))
 415		return 0;
 416
 417	return get_phys_addr(dma_domain, iova);
 418}
 419
 420static int fsl_pamu_domain_has_cap(struct iommu_domain *domain,
 421				      unsigned long cap)
 422{
 423	return cap == IOMMU_CAP_CACHE_COHERENCY;
 424}
 425
 426static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
 427{
 428	struct fsl_dma_domain *dma_domain = domain->priv;
 429
 430	domain->priv = NULL;
 431
 432	/* remove all the devices from the device list */
 433	detach_device(NULL, dma_domain);
 434
 435	dma_domain->enabled = 0;
 436	dma_domain->mapped = 0;
 437
 438	kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
 439}
 440
 441static int fsl_pamu_domain_init(struct iommu_domain *domain)
 442{
 443	struct fsl_dma_domain *dma_domain;
 444
 445	dma_domain = iommu_alloc_dma_domain();
 446	if (!dma_domain) {
 447		pr_debug("dma_domain allocation failed\n");
 448		return -ENOMEM;
 449	}
 450	domain->priv = dma_domain;
 451	dma_domain->iommu_domain = domain;
 452	/* defaul geometry 64 GB i.e. maximum system address */
 453	domain->geometry.aperture_start = 0;
 454	domain->geometry.aperture_end = (1ULL << 36) - 1;
 455	domain->geometry.force_aperture = true;
 456
 457	return 0;
 458}
 
 459
 460/* Configure geometry settings for all LIODNs associated with domain */
 461static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
 462				    struct iommu_domain_geometry *geom_attr,
 463				    u32 win_cnt)
 464{
 465	struct device_domain_info *info;
 466	int ret = 0;
 467
 468	list_for_each_entry(info, &dma_domain->devices, link) {
 469		ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
 470				      geom_attr, win_cnt);
 471		if (ret)
 472			break;
 473	}
 474
 475	return ret;
 476}
 477
 478/* Update stash destination for all LIODNs associated with the domain */
 479static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
 480{
 481	struct device_domain_info *info;
 482	int ret = 0;
 483
 484	list_for_each_entry(info, &dma_domain->devices, link) {
 485		ret = update_liodn_stash(info->liodn, dma_domain, val);
 486		if (ret)
 487			break;
 488	}
 489
 490	return ret;
 491}
 492
 493/* Update domain mappings for all LIODNs associated with the domain */
 494static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
 495{
 496	struct device_domain_info *info;
 497	int ret = 0;
 498
 499	list_for_each_entry(info, &dma_domain->devices, link) {
 500		ret = update_liodn(info->liodn, dma_domain, wnd_nr);
 501		if (ret)
 502			break;
 503	}
 504	return ret;
 505}
 506
 507static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
 508{
 509	struct device_domain_info *info;
 510	int ret = 0;
 511
 512	list_for_each_entry(info, &dma_domain->devices, link) {
 513		if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
 514			ret = pamu_disable_liodn(info->liodn);
 515			if (!ret)
 516				dma_domain->enabled = 0;
 517		} else {
 518			ret = pamu_disable_spaace(info->liodn, wnd_nr);
 519		}
 520	}
 521
 522	return ret;
 523}
 524
 525static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
 526{
 527	struct fsl_dma_domain *dma_domain = domain->priv;
 528	unsigned long flags;
 529	int ret;
 530
 531	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 532	if (!dma_domain->win_arr) {
 533		pr_debug("Number of windows not configured\n");
 534		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 535		return;
 536	}
 537
 538	if (wnd_nr >= dma_domain->win_cnt) {
 539		pr_debug("Invalid window index\n");
 540		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 541		return;
 542	}
 543
 544	if (dma_domain->win_arr[wnd_nr].valid) {
 545		ret = disable_domain_win(dma_domain, wnd_nr);
 546		if (!ret) {
 547			dma_domain->win_arr[wnd_nr].valid = 0;
 548			dma_domain->mapped--;
 549		}
 550	}
 551
 552	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 553
 554}
 555
 556static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
 557				  phys_addr_t paddr, u64 size, int prot)
 558{
 559	struct fsl_dma_domain *dma_domain = domain->priv;
 560	struct dma_window *wnd;
 561	int pamu_prot = 0;
 562	int ret;
 563	unsigned long flags;
 564	u64 win_size;
 565
 566	if (prot & IOMMU_READ)
 567		pamu_prot |= PAACE_AP_PERMS_QUERY;
 568	if (prot & IOMMU_WRITE)
 569		pamu_prot |= PAACE_AP_PERMS_UPDATE;
 570
 571	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 572	if (!dma_domain->win_arr) {
 573		pr_debug("Number of windows not configured\n");
 574		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 575		return -ENODEV;
 576	}
 577
 578	if (wnd_nr >= dma_domain->win_cnt) {
 579		pr_debug("Invalid window index\n");
 580		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 581		return -EINVAL;
 582	}
 583
 584	win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
 585	if (size > win_size) {
 586		pr_debug("Invalid window size \n");
 587		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 588		return -EINVAL;
 589	}
 590
 591	if (dma_domain->win_cnt == 1) {
 592		if (dma_domain->enabled) {
 593			pr_debug("Disable the window before updating the mapping\n");
 594			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 595			return -EBUSY;
 596		}
 597
 598		ret = check_size(size, domain->geometry.aperture_start);
 599		if (ret) {
 600			pr_debug("Aperture start not aligned to the size\n");
 601			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 602			return -EINVAL;
 603		}
 604	}
 605
 606	wnd = &dma_domain->win_arr[wnd_nr];
 607	if (!wnd->valid) {
 608		wnd->paddr = paddr;
 609		wnd->size = size;
 610		wnd->prot = pamu_prot;
 611
 612		ret = update_domain_mapping(dma_domain, wnd_nr);
 613		if (!ret) {
 614			wnd->valid = 1;
 615			dma_domain->mapped++;
 616		}
 617	} else {
 618		pr_debug("Disable the window before updating the mapping\n");
 619		ret = -EBUSY;
 620	}
 621
 622	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 623
 624	return ret;
 625}
 626
 627/*
 628 * Attach the LIODN to the DMA domain and configure the geometry
 629 * and window mappings.
 630 */
 631static int handle_attach_device(struct fsl_dma_domain *dma_domain,
 632				 struct device *dev, const u32 *liodn,
 633				 int num)
 634{
 635	unsigned long flags;
 636	struct iommu_domain *domain = dma_domain->iommu_domain;
 637	int ret = 0;
 638	int i;
 639
 640	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 641	for (i = 0; i < num; i++) {
 642
 643		/* Ensure that LIODN value is valid */
 644		if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
 645			pr_debug("Invalid liodn %d, attach device failed for %s\n",
 646				liodn[i], dev->of_node->full_name);
 647			ret = -EINVAL;
 648			break;
 649		}
 650
 651		attach_device(dma_domain, liodn[i], dev);
 652		/*
 653		 * Check if geometry has already been configured
 654		 * for the domain. If yes, set the geometry for
 655		 * the LIODN.
 656		 */
 657		if (dma_domain->win_arr) {
 658			u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
 659			ret = pamu_set_liodn(liodn[i], dev, dma_domain,
 660					      &domain->geometry,
 661					      win_cnt);
 662			if (ret)
 663				break;
 664			if (dma_domain->mapped) {
 665				/*
 666				 * Create window/subwindow mapping for
 667				 * the LIODN.
 668				 */
 669				ret = map_liodn(liodn[i], dma_domain);
 670				if (ret)
 671					break;
 672			}
 673		}
 674	}
 675	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 676
 677	return ret;
 678}
 679
 680static int fsl_pamu_attach_device(struct iommu_domain *domain,
 681				  struct device *dev)
 682{
 683	struct fsl_dma_domain *dma_domain = domain->priv;
 
 
 684	const u32 *liodn;
 685	u32 liodn_cnt;
 686	int len, ret = 0;
 687	struct pci_dev *pdev = NULL;
 688	struct pci_controller *pci_ctl;
 689
 690	/*
 691	 * Use LIODN of the PCI controller while attaching a
 692	 * PCI device.
 693	 */
 694	if (dev_is_pci(dev)) {
 695		pdev = to_pci_dev(dev);
 696		pci_ctl = pci_bus_to_host(pdev->bus);
 697		/*
 698		 * make dev point to pci controller device
 699		 * so we can get the LIODN programmed by
 700		 * u-boot.
 701		 */
 702		dev = pci_ctl->parent;
 703	}
 704
 705	liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
 706	if (liodn) {
 707		liodn_cnt = len / sizeof(u32);
 708		ret = handle_attach_device(dma_domain, dev,
 709					 liodn, liodn_cnt);
 710	} else {
 711		pr_debug("missing fsl,liodn property at %s\n",
 712		          dev->of_node->full_name);
 713			ret = -EINVAL;
 714	}
 715
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 716	return ret;
 717}
 718
 719static void fsl_pamu_detach_device(struct iommu_domain *domain,
 720				      struct device *dev)
 721{
 722	struct fsl_dma_domain *dma_domain = domain->priv;
 723	const u32 *prop;
 724	int len;
 725	struct pci_dev *pdev = NULL;
 726	struct pci_controller *pci_ctl;
 727
 728	/*
 729	 * Use LIODN of the PCI controller while detaching a
 730	 * PCI device.
 731	 */
 732	if (dev_is_pci(dev)) {
 733		pdev = to_pci_dev(dev);
 734		pci_ctl = pci_bus_to_host(pdev->bus);
 735		/*
 736		 * make dev point to pci controller device
 737		 * so we can get the LIODN programmed by
 738		 * u-boot.
 739		 */
 740		dev = pci_ctl->parent;
 741	}
 742
 743	prop = of_get_property(dev->of_node, "fsl,liodn", &len);
 744	if (prop)
 745		detach_device(dev, dma_domain);
 746	else
 747		pr_debug("missing fsl,liodn property at %s\n",
 748		          dev->of_node->full_name);
 749}
 750
 751static  int configure_domain_geometry(struct iommu_domain *domain, void *data)
 752{
 753	struct iommu_domain_geometry *geom_attr = data;
 754	struct fsl_dma_domain *dma_domain = domain->priv;
 755	dma_addr_t geom_size;
 756	unsigned long flags;
 757
 758	geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
 759	/*
 760	 * Sanity check the geometry size. Also, we do not support
 761	 * DMA outside of the geometry.
 762	 */
 763	if (check_size(geom_size, geom_attr->aperture_start) ||
 764		!geom_attr->force_aperture) {
 765			pr_debug("Invalid PAMU geometry attributes\n");
 766			return -EINVAL;
 767		}
 768
 769	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 770	if (dma_domain->enabled) {
 771		pr_debug("Can't set geometry attributes as domain is active\n");
 772		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 773		return  -EBUSY;
 774	}
 775
 776	/* Copy the domain geometry information */
 777	memcpy(&domain->geometry, geom_attr,
 778	       sizeof(struct iommu_domain_geometry));
 779	dma_domain->geom_size = geom_size;
 780
 781	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 782
 783	return 0;
 784}
 785
 786/* Set the domain stash attribute */
 787static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
 788{
 789	struct pamu_stash_attribute *stash_attr = data;
 790	unsigned long flags;
 791	int ret;
 792
 793	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 794
 795	memcpy(&dma_domain->dma_stash, stash_attr,
 796		 sizeof(struct pamu_stash_attribute));
 797
 798	dma_domain->stash_id = get_stash_id(stash_attr->cache,
 799					    stash_attr->cpu);
 800	if (dma_domain->stash_id == ~(u32)0) {
 801		pr_debug("Invalid stash attributes\n");
 802		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 803		return -EINVAL;
 804	}
 805
 806	ret = update_domain_stash(dma_domain, dma_domain->stash_id);
 807
 808	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 809
 810	return ret;
 811}
 812
 813/* Configure domain dma state i.e. enable/disable DMA*/
 814static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
 815{
 816	struct device_domain_info *info;
 817	unsigned long flags;
 818	int ret;
 819
 820	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 821
 822	if (enable && !dma_domain->mapped) {
 823		pr_debug("Can't enable DMA domain without valid mapping\n");
 824		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 825		return -ENODEV;
 826	}
 827
 828	dma_domain->enabled = enable;
 829	list_for_each_entry(info, &dma_domain->devices,
 830				 link) {
 831		ret = (enable) ? pamu_enable_liodn(info->liodn) :
 832			pamu_disable_liodn(info->liodn);
 833		if (ret)
 834			pr_debug("Unable to set dma state for liodn %d",
 835				 info->liodn);
 836	}
 837	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 838
 839	return 0;
 840}
 841
 842static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
 843				 enum iommu_attr attr_type, void *data)
 844{
 845	struct fsl_dma_domain *dma_domain = domain->priv;
 846	int ret = 0;
 847
 848
 849	switch (attr_type) {
 850	case DOMAIN_ATTR_GEOMETRY:
 851		ret = configure_domain_geometry(domain, data);
 852		break;
 853	case DOMAIN_ATTR_FSL_PAMU_STASH:
 854		ret = configure_domain_stash(dma_domain, data);
 855		break;
 856	case DOMAIN_ATTR_FSL_PAMU_ENABLE:
 857		ret = configure_domain_dma_state(dma_domain, *(int *)data);
 858		break;
 859	default:
 860		pr_debug("Unsupported attribute type\n");
 861		ret = -EINVAL;
 862		break;
 863	};
 864
 865	return ret;
 866}
 867
 868static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
 869				 enum iommu_attr attr_type, void *data)
 870{
 871	struct fsl_dma_domain *dma_domain = domain->priv;
 872	int ret = 0;
 873
 874
 875	switch (attr_type) {
 876	case DOMAIN_ATTR_FSL_PAMU_STASH:
 877		memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash,
 878				 sizeof(struct pamu_stash_attribute));
 879		break;
 880	case DOMAIN_ATTR_FSL_PAMU_ENABLE:
 881		*(int *)data = dma_domain->enabled;
 882		break;
 883	case DOMAIN_ATTR_FSL_PAMUV1:
 884		*(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
 885		break;
 886	default:
 887		pr_debug("Unsupported attribute type\n");
 888		ret = -EINVAL;
 889		break;
 890	};
 891
 892	return ret;
 893}
 894
 895#define REQ_ACS_FLAGS	(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
 896
 897static struct iommu_group *get_device_iommu_group(struct device *dev)
 898{
 899	struct iommu_group *group;
 900
 901	group = iommu_group_get(dev);
 902	if (!group)
 903		group = iommu_group_alloc();
 904
 905	return group;
 906}
 907
 908static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
 909{
 910	u32 version;
 911
 912	/* Check the PCI controller version number by readding BRR1 register */
 913	version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
 914	version &= PCI_FSL_BRR1_VER;
 915	/* If PCI controller version is >= 0x204 we can partition endpoints*/
 916	if (version >= 0x204)
 917		return 1;
 918
 919	return 0;
 920}
 921
 922/* Get iommu group information from peer devices or devices on the parent bus */
 923static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
 924{
 925	struct pci_dev *tmp;
 926	struct iommu_group *group;
 927	struct pci_bus *bus = pdev->bus;
 928
 929	/*
 930	 * Traverese the pci bus device list to get
 931	 * the shared iommu group.
 932	 */
 933	while (bus) {
 934		list_for_each_entry(tmp, &bus->devices, bus_list) {
 935			if (tmp == pdev)
 936				continue;
 937			group = iommu_group_get(&tmp->dev);
 938			if (group)
 939				return group;
 940		}
 941
 942		bus = bus->parent;
 943	}
 944
 945	return NULL;
 946}
 947
 948static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
 949{
 950	struct pci_controller *pci_ctl;
 951	bool pci_endpt_partioning;
 952	struct iommu_group *group = NULL;
 953	struct pci_dev *bridge, *dma_pdev = NULL;
 954
 955	pci_ctl = pci_bus_to_host(pdev->bus);
 956	pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
 957	/* We can partition PCIe devices so assign device group to the device */
 958	if (pci_endpt_partioning) {
 959		bridge = pci_find_upstream_pcie_bridge(pdev);
 960		if (bridge) {
 961			if (pci_is_pcie(bridge))
 962				dma_pdev = pci_get_domain_bus_and_slot(
 963						pci_domain_nr(pdev->bus),
 964						bridge->subordinate->number, 0);
 965			if (!dma_pdev)
 966				dma_pdev = pci_dev_get(bridge);
 967		} else
 968			dma_pdev = pci_dev_get(pdev);
 969
 970		/* Account for quirked devices */
 971		swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
 972
 973		/*
 974		 * If it's a multifunction device that does not support our
 975		 * required ACS flags, add to the same group as lowest numbered
 976		 * function that also does not suport the required ACS flags.
 977		 */
 978		if (dma_pdev->multifunction &&
 979		    !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
 980			u8 i, slot = PCI_SLOT(dma_pdev->devfn);
 981
 982			for (i = 0; i < 8; i++) {
 983				struct pci_dev *tmp;
 984
 985				tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
 986				if (!tmp)
 987					continue;
 988
 989				if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
 990					swap_pci_ref(&dma_pdev, tmp);
 991					break;
 992				}
 993				pci_dev_put(tmp);
 994			}
 995		}
 996
 997		/*
 998		 * Devices on the root bus go through the iommu.  If that's not us,
 999		 * find the next upstream device and test ACS up to the root bus.
1000		 * Finding the next device may require skipping virtual buses.
1001		 */
1002		while (!pci_is_root_bus(dma_pdev->bus)) {
1003			struct pci_bus *bus = dma_pdev->bus;
1004
1005			while (!bus->self) {
1006				if (!pci_is_root_bus(bus))
1007					bus = bus->parent;
1008				else
1009					goto root_bus;
1010			}
1011
1012			if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1013				break;
1014
1015			swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
1016		}
1017
1018root_bus:
1019		group = get_device_iommu_group(&dma_pdev->dev);
1020		pci_dev_put(dma_pdev);
1021		/*
1022		 * PCIe controller is not a paritionable entity
1023		 * free the controller device iommu_group.
1024		 */
1025		if (pci_ctl->parent->iommu_group)
1026			iommu_group_remove_device(pci_ctl->parent);
1027	} else {
1028		/*
1029		 * All devices connected to the controller will share the
1030		 * PCI controllers device group. If this is the first
1031		 * device to be probed for the pci controller, copy the
1032		 * device group information from the PCI controller device
1033		 * node and remove the PCI controller iommu group.
1034		 * For subsequent devices, the iommu group information can
1035		 * be obtained from sibling devices (i.e. from the bus_devices
1036		 * link list).
1037		 */
1038		if (pci_ctl->parent->iommu_group) {
1039			group = get_device_iommu_group(pci_ctl->parent);
1040			iommu_group_remove_device(pci_ctl->parent);
1041		} else
1042			group = get_shared_pci_device_group(pdev);
 
1043	}
1044
 
 
 
1045	return group;
1046}
1047
1048static int fsl_pamu_add_device(struct device *dev)
1049{
1050	struct iommu_group *group = NULL;
1051	struct pci_dev *pdev;
1052	const u32 *prop;
1053	int ret, len;
1054
1055	/*
1056	 * For platform devices we allocate a separate group for
1057	 * each of the devices.
1058	 */
1059	if (dev_is_pci(dev)) {
1060		pdev = to_pci_dev(dev);
1061		/* Don't create device groups for virtual PCI bridges */
1062		if (pdev->subordinate)
1063			return 0;
1064
1065		group = get_pci_device_group(pdev);
1066
1067	} else {
1068		prop = of_get_property(dev->of_node, "fsl,liodn", &len);
1069		if (prop)
1070			group = get_device_iommu_group(dev);
1071	}
1072
1073	if (!group || IS_ERR(group))
1074		return PTR_ERR(group);
1075
1076	ret = iommu_group_add_device(group, dev);
1077
1078	iommu_group_put(group);
1079	return ret;
1080}
1081
1082static void fsl_pamu_remove_device(struct device *dev)
1083{
1084	iommu_group_remove_device(dev);
1085}
1086
1087static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
1088{
1089	struct fsl_dma_domain *dma_domain = domain->priv;
1090	unsigned long flags;
1091	int ret;
1092
1093	spin_lock_irqsave(&dma_domain->domain_lock, flags);
1094	/* Ensure domain is inactive i.e. DMA should be disabled for the domain */
1095	if (dma_domain->enabled) {
1096		pr_debug("Can't set geometry attributes as domain is active\n");
1097		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1098		return  -EBUSY;
1099	}
1100
1101	/* Ensure that the geometry has been set for the domain */
1102	if (!dma_domain->geom_size) {
1103		pr_debug("Please configure geometry before setting the number of windows\n");
1104		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1105		return -EINVAL;
1106	}
1107
1108	/*
1109	 * Ensure we have valid window count i.e. it should be less than
1110	 * maximum permissible limit and should be a power of two.
1111	 */
1112	if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
1113		pr_debug("Invalid window count\n");
1114		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1115		return -EINVAL;
1116	}
1117
1118	ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
1119				((w_count > 1) ? w_count : 0));
1120	if (!ret) {
1121		if (dma_domain->win_arr)
1122			kfree(dma_domain->win_arr);
1123		dma_domain->win_arr = kzalloc(sizeof(struct dma_window) *
1124							  w_count, GFP_ATOMIC);
1125		if (!dma_domain->win_arr) {
1126			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1127			return -ENOMEM;
1128		}
1129		dma_domain->win_cnt = w_count;
1130	}
1131	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1132
1133	return ret;
1134}
1135
1136static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
1137{
1138	struct fsl_dma_domain *dma_domain = domain->priv;
1139
1140	return dma_domain->win_cnt;
1141}
1142
1143static struct iommu_ops fsl_pamu_ops = {
1144	.domain_init	= fsl_pamu_domain_init,
1145	.domain_destroy = fsl_pamu_domain_destroy,
1146	.attach_dev	= fsl_pamu_attach_device,
1147	.detach_dev	= fsl_pamu_detach_device,
1148	.domain_window_enable = fsl_pamu_window_enable,
1149	.domain_window_disable = fsl_pamu_window_disable,
1150	.domain_get_windows = fsl_pamu_get_windows,
1151	.domain_set_windows = fsl_pamu_set_windows,
1152	.iova_to_phys	= fsl_pamu_iova_to_phys,
1153	.domain_has_cap = fsl_pamu_domain_has_cap,
1154	.domain_set_attr = fsl_pamu_set_domain_attr,
1155	.domain_get_attr = fsl_pamu_get_domain_attr,
1156	.add_device	= fsl_pamu_add_device,
1157	.remove_device	= fsl_pamu_remove_device,
1158};
1159
1160int pamu_domain_init()
1161{
1162	int ret = 0;
1163
1164	ret = iommu_init_mempool();
1165	if (ret)
1166		return ret;
1167
1168	bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
1169	bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
 
 
 
 
 
 
 
1170
1171	return ret;
1172}