Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *
  4 * Copyright (C) 2013 Freescale Semiconductor, Inc.
  5 * Author: Varun Sethi <varun.sethi@freescale.com>
  6 */
  7
  8#define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
  9
 10#include "fsl_pamu_domain.h"
 11
 12#include <linux/platform_device.h>
 13#include <sysdev/fsl_pci.h>
 14
 15/*
 16 * Global spinlock that needs to be held while
 17 * configuring PAMU.
 18 */
 19static DEFINE_SPINLOCK(iommu_lock);
 20
 21static struct kmem_cache *fsl_pamu_domain_cache;
 22static struct kmem_cache *iommu_devinfo_cache;
 23static DEFINE_SPINLOCK(device_domain_lock);
 24
 25struct iommu_device pamu_iommu;	/* IOMMU core code handle */
 26
 27static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
 28{
 29	return container_of(dom, struct fsl_dma_domain, iommu_domain);
 30}
 31
 32static int __init iommu_init_mempool(void)
 33{
 34	fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
 35						  sizeof(struct fsl_dma_domain),
 36						  0,
 37						  SLAB_HWCACHE_ALIGN,
 38						  NULL);
 39	if (!fsl_pamu_domain_cache) {
 40		pr_debug("Couldn't create fsl iommu_domain cache\n");
 41		return -ENOMEM;
 42	}
 43
 44	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
 45						sizeof(struct device_domain_info),
 46						0,
 47						SLAB_HWCACHE_ALIGN,
 48						NULL);
 49	if (!iommu_devinfo_cache) {
 50		pr_debug("Couldn't create devinfo cache\n");
 51		kmem_cache_destroy(fsl_pamu_domain_cache);
 52		return -ENOMEM;
 53	}
 54
 55	return 0;
 56}
 57
 58static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
 59			      u32 val)
 60{
 61	int ret = 0;
 62	unsigned long flags;
 63
 64	spin_lock_irqsave(&iommu_lock, flags);
 65	ret = pamu_update_paace_stash(liodn, val);
 66	if (ret) {
 67		pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
 68		spin_unlock_irqrestore(&iommu_lock, flags);
 69		return ret;
 70	}
 71
 72	spin_unlock_irqrestore(&iommu_lock, flags);
 73
 74	return ret;
 75}
 76
 77/* Set the geometry parameters for a LIODN */
 78static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
 79			  int liodn)
 80{
 81	u32 omi_index = ~(u32)0;
 82	unsigned long flags;
 83	int ret;
 84
 85	/*
 86	 * Configure the omi_index at the geometry setup time.
 87	 * This is a static value which depends on the type of
 88	 * device and would not change thereafter.
 89	 */
 90	get_ome_index(&omi_index, dev);
 91
 92	spin_lock_irqsave(&iommu_lock, flags);
 93	ret = pamu_disable_liodn(liodn);
 94	if (ret)
 95		goto out_unlock;
 96	ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0);
 97	if (ret)
 98		goto out_unlock;
 99	ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id,
100				 PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
101out_unlock:
102	spin_unlock_irqrestore(&iommu_lock, flags);
103	if (ret) {
104		pr_debug("PAACE configuration failed for liodn %d\n",
105			 liodn);
106	}
107	return ret;
108}
109
110static void remove_device_ref(struct device_domain_info *info)
111{
112	unsigned long flags;
113
114	list_del(&info->link);
115	spin_lock_irqsave(&iommu_lock, flags);
116	pamu_disable_liodn(info->liodn);
117	spin_unlock_irqrestore(&iommu_lock, flags);
118	spin_lock_irqsave(&device_domain_lock, flags);
119	dev_iommu_priv_set(info->dev, NULL);
120	kmem_cache_free(iommu_devinfo_cache, info);
121	spin_unlock_irqrestore(&device_domain_lock, flags);
122}
123
124static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
125{
126	struct device_domain_info *info, *tmp;
127	unsigned long flags;
128
129	spin_lock_irqsave(&dma_domain->domain_lock, flags);
130	/* Remove the device from the domain device list */
131	list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
132		if (!dev || (info->dev == dev))
133			remove_device_ref(info);
134	}
135	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
136}
137
138static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
139{
140	struct device_domain_info *info, *old_domain_info;
141	unsigned long flags;
142
143	spin_lock_irqsave(&device_domain_lock, flags);
144	/*
145	 * Check here if the device is already attached to domain or not.
146	 * If the device is already attached to a domain detach it.
147	 */
148	old_domain_info = dev_iommu_priv_get(dev);
149	if (old_domain_info && old_domain_info->domain != dma_domain) {
150		spin_unlock_irqrestore(&device_domain_lock, flags);
151		detach_device(dev, old_domain_info->domain);
152		spin_lock_irqsave(&device_domain_lock, flags);
153	}
154
155	info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
156
157	info->dev = dev;
158	info->liodn = liodn;
159	info->domain = dma_domain;
160
161	list_add(&info->link, &dma_domain->devices);
162	/*
163	 * In case of devices with multiple LIODNs just store
164	 * the info for the first LIODN as all
165	 * LIODNs share the same domain
166	 */
167	if (!dev_iommu_priv_get(dev))
168		dev_iommu_priv_set(dev, info);
169	spin_unlock_irqrestore(&device_domain_lock, flags);
170}
171
172static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
173					 dma_addr_t iova)
174{
175	if (iova < domain->geometry.aperture_start ||
176	    iova > domain->geometry.aperture_end)
177		return 0;
178	return iova;
179}
180
181static bool fsl_pamu_capable(struct device *dev, enum iommu_cap cap)
182{
183	return cap == IOMMU_CAP_CACHE_COHERENCY;
184}
185
186static void fsl_pamu_domain_free(struct iommu_domain *domain)
187{
188	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
189
190	/* remove all the devices from the device list */
191	detach_device(NULL, dma_domain);
192	kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
193}
194
195static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
196{
197	struct fsl_dma_domain *dma_domain;
198
199	if (type != IOMMU_DOMAIN_UNMANAGED)
200		return NULL;
201
202	dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
203	if (!dma_domain)
204		return NULL;
205
206	dma_domain->stash_id = ~(u32)0;
207	INIT_LIST_HEAD(&dma_domain->devices);
208	spin_lock_init(&dma_domain->domain_lock);
209
210	/* default geometry 64 GB i.e. maximum system address */
211	dma_domain->iommu_domain. geometry.aperture_start = 0;
212	dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
213	dma_domain->iommu_domain.geometry.force_aperture = true;
214
215	return &dma_domain->iommu_domain;
216}
217
218/* Update stash destination for all LIODNs associated with the domain */
219static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
220{
221	struct device_domain_info *info;
222	int ret = 0;
223
224	list_for_each_entry(info, &dma_domain->devices, link) {
225		ret = update_liodn_stash(info->liodn, dma_domain, val);
226		if (ret)
227			break;
228	}
229
230	return ret;
231}
232
233static int fsl_pamu_attach_device(struct iommu_domain *domain,
234				  struct device *dev)
235{
236	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
237	unsigned long flags;
238	int len, ret = 0, i;
239	const u32 *liodn;
240	struct pci_dev *pdev = NULL;
241	struct pci_controller *pci_ctl;
242
243	/*
244	 * Use LIODN of the PCI controller while attaching a
245	 * PCI device.
246	 */
247	if (dev_is_pci(dev)) {
248		pdev = to_pci_dev(dev);
249		pci_ctl = pci_bus_to_host(pdev->bus);
250		/*
251		 * make dev point to pci controller device
252		 * so we can get the LIODN programmed by
253		 * u-boot.
254		 */
255		dev = pci_ctl->parent;
256	}
257
258	liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
259	if (!liodn) {
260		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
261		return -ENODEV;
262	}
263
264	spin_lock_irqsave(&dma_domain->domain_lock, flags);
265	for (i = 0; i < len / sizeof(u32); i++) {
266		/* Ensure that LIODN value is valid */
267		if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
268			pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
269				 liodn[i], dev->of_node);
270			ret = -ENODEV;
271			break;
272		}
273
274		attach_device(dma_domain, liodn[i], dev);
275		ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
276		if (ret)
277			break;
278		ret = pamu_enable_liodn(liodn[i]);
279		if (ret)
280			break;
281	}
282	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
283	return ret;
284}
285
286static void fsl_pamu_detach_device(struct iommu_domain *domain,
287				   struct device *dev)
288{
289	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
290	const u32 *prop;
291	int len;
292	struct pci_dev *pdev = NULL;
293	struct pci_controller *pci_ctl;
294
295	/*
296	 * Use LIODN of the PCI controller while detaching a
297	 * PCI device.
298	 */
299	if (dev_is_pci(dev)) {
300		pdev = to_pci_dev(dev);
301		pci_ctl = pci_bus_to_host(pdev->bus);
302		/*
303		 * make dev point to pci controller device
304		 * so we can get the LIODN programmed by
305		 * u-boot.
306		 */
307		dev = pci_ctl->parent;
308	}
309
310	prop = of_get_property(dev->of_node, "fsl,liodn", &len);
311	if (prop)
312		detach_device(dev, dma_domain);
313	else
314		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
315}
316
317/* Set the domain stash attribute */
318int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
319{
320	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
321	unsigned long flags;
322	int ret;
323
324	spin_lock_irqsave(&dma_domain->domain_lock, flags);
325	dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
326	if (dma_domain->stash_id == ~(u32)0) {
327		pr_debug("Invalid stash attributes\n");
328		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
329		return -EINVAL;
330	}
331	ret = update_domain_stash(dma_domain, dma_domain->stash_id);
332	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
333
334	return ret;
335}
336
337static struct iommu_group *get_device_iommu_group(struct device *dev)
338{
339	struct iommu_group *group;
340
341	group = iommu_group_get(dev);
342	if (!group)
343		group = iommu_group_alloc();
344
345	return group;
346}
347
348static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
349{
350	u32 version;
351
352	/* Check the PCI controller version number by readding BRR1 register */
353	version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
354	version &= PCI_FSL_BRR1_VER;
355	/* If PCI controller version is >= 0x204 we can partition endpoints */
356	return version >= 0x204;
357}
358
359/* Get iommu group information from peer devices or devices on the parent bus */
360static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
361{
362	struct pci_dev *tmp;
363	struct iommu_group *group;
364	struct pci_bus *bus = pdev->bus;
365
366	/*
367	 * Traverese the pci bus device list to get
368	 * the shared iommu group.
369	 */
370	while (bus) {
371		list_for_each_entry(tmp, &bus->devices, bus_list) {
372			if (tmp == pdev)
373				continue;
374			group = iommu_group_get(&tmp->dev);
375			if (group)
376				return group;
377		}
378
379		bus = bus->parent;
380	}
381
382	return NULL;
383}
384
385static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
386{
387	struct pci_controller *pci_ctl;
388	bool pci_endpt_partitioning;
389	struct iommu_group *group = NULL;
390
391	pci_ctl = pci_bus_to_host(pdev->bus);
392	pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
393	/* We can partition PCIe devices so assign device group to the device */
394	if (pci_endpt_partitioning) {
395		group = pci_device_group(&pdev->dev);
396
397		/*
398		 * PCIe controller is not a paritionable entity
399		 * free the controller device iommu_group.
400		 */
401		if (pci_ctl->parent->iommu_group)
402			iommu_group_remove_device(pci_ctl->parent);
403	} else {
404		/*
405		 * All devices connected to the controller will share the
406		 * PCI controllers device group. If this is the first
407		 * device to be probed for the pci controller, copy the
408		 * device group information from the PCI controller device
409		 * node and remove the PCI controller iommu group.
410		 * For subsequent devices, the iommu group information can
411		 * be obtained from sibling devices (i.e. from the bus_devices
412		 * link list).
413		 */
414		if (pci_ctl->parent->iommu_group) {
415			group = get_device_iommu_group(pci_ctl->parent);
416			iommu_group_remove_device(pci_ctl->parent);
417		} else {
418			group = get_shared_pci_device_group(pdev);
419		}
420	}
421
422	if (!group)
423		group = ERR_PTR(-ENODEV);
424
425	return group;
426}
427
428static struct iommu_group *fsl_pamu_device_group(struct device *dev)
429{
430	struct iommu_group *group = ERR_PTR(-ENODEV);
431	int len;
432
433	/*
434	 * For platform devices we allocate a separate group for
435	 * each of the devices.
436	 */
437	if (dev_is_pci(dev))
438		group = get_pci_device_group(to_pci_dev(dev));
439	else if (of_get_property(dev->of_node, "fsl,liodn", &len))
440		group = get_device_iommu_group(dev);
441
442	return group;
443}
444
445static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
446{
447	return &pamu_iommu;
448}
449
 
 
 
 
450static const struct iommu_ops fsl_pamu_ops = {
451	.capable	= fsl_pamu_capable,
452	.domain_alloc	= fsl_pamu_domain_alloc,
 
 
 
 
453	.probe_device	= fsl_pamu_probe_device,
 
454	.device_group   = fsl_pamu_device_group,
455	.default_domain_ops = &(const struct iommu_domain_ops) {
456		.attach_dev	= fsl_pamu_attach_device,
457		.detach_dev	= fsl_pamu_detach_device,
458		.iova_to_phys	= fsl_pamu_iova_to_phys,
459		.free		= fsl_pamu_domain_free,
460	}
461};
462
463int __init pamu_domain_init(void)
464{
465	int ret = 0;
466
467	ret = iommu_init_mempool();
468	if (ret)
469		return ret;
470
471	ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
472	if (ret)
473		return ret;
474
475	ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL);
476	if (ret) {
477		iommu_device_sysfs_remove(&pamu_iommu);
478		pr_err("Can't register iommu device\n");
 
479	}
 
 
 
480
481	return ret;
482}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *
  4 * Copyright (C) 2013 Freescale Semiconductor, Inc.
  5 * Author: Varun Sethi <varun.sethi@freescale.com>
  6 */
  7
  8#define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
  9
 10#include "fsl_pamu_domain.h"
 11
 
 12#include <sysdev/fsl_pci.h>
 13
 14/*
 15 * Global spinlock that needs to be held while
 16 * configuring PAMU.
 17 */
 18static DEFINE_SPINLOCK(iommu_lock);
 19
 20static struct kmem_cache *fsl_pamu_domain_cache;
 21static struct kmem_cache *iommu_devinfo_cache;
 22static DEFINE_SPINLOCK(device_domain_lock);
 23
 24struct iommu_device pamu_iommu;	/* IOMMU core code handle */
 25
 26static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
 27{
 28	return container_of(dom, struct fsl_dma_domain, iommu_domain);
 29}
 30
 31static int __init iommu_init_mempool(void)
 32{
 33	fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
 34						  sizeof(struct fsl_dma_domain),
 35						  0,
 36						  SLAB_HWCACHE_ALIGN,
 37						  NULL);
 38	if (!fsl_pamu_domain_cache) {
 39		pr_debug("Couldn't create fsl iommu_domain cache\n");
 40		return -ENOMEM;
 41	}
 42
 43	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
 44						sizeof(struct device_domain_info),
 45						0,
 46						SLAB_HWCACHE_ALIGN,
 47						NULL);
 48	if (!iommu_devinfo_cache) {
 49		pr_debug("Couldn't create devinfo cache\n");
 50		kmem_cache_destroy(fsl_pamu_domain_cache);
 51		return -ENOMEM;
 52	}
 53
 54	return 0;
 55}
 56
 57static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
 58			      u32 val)
 59{
 60	int ret = 0;
 61	unsigned long flags;
 62
 63	spin_lock_irqsave(&iommu_lock, flags);
 64	ret = pamu_update_paace_stash(liodn, val);
 65	if (ret) {
 66		pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
 67		spin_unlock_irqrestore(&iommu_lock, flags);
 68		return ret;
 69	}
 70
 71	spin_unlock_irqrestore(&iommu_lock, flags);
 72
 73	return ret;
 74}
 75
 76/* Set the geometry parameters for a LIODN */
 77static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
 78			  int liodn)
 79{
 80	u32 omi_index = ~(u32)0;
 81	unsigned long flags;
 82	int ret;
 83
 84	/*
 85	 * Configure the omi_index at the geometry setup time.
 86	 * This is a static value which depends on the type of
 87	 * device and would not change thereafter.
 88	 */
 89	get_ome_index(&omi_index, dev);
 90
 91	spin_lock_irqsave(&iommu_lock, flags);
 92	ret = pamu_disable_liodn(liodn);
 93	if (ret)
 94		goto out_unlock;
 95	ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0);
 96	if (ret)
 97		goto out_unlock;
 98	ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id,
 99				 PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
100out_unlock:
101	spin_unlock_irqrestore(&iommu_lock, flags);
102	if (ret) {
103		pr_debug("PAACE configuration failed for liodn %d\n",
104			 liodn);
105	}
106	return ret;
107}
108
109static void remove_device_ref(struct device_domain_info *info)
110{
111	unsigned long flags;
112
113	list_del(&info->link);
114	spin_lock_irqsave(&iommu_lock, flags);
115	pamu_disable_liodn(info->liodn);
116	spin_unlock_irqrestore(&iommu_lock, flags);
117	spin_lock_irqsave(&device_domain_lock, flags);
118	dev_iommu_priv_set(info->dev, NULL);
119	kmem_cache_free(iommu_devinfo_cache, info);
120	spin_unlock_irqrestore(&device_domain_lock, flags);
121}
122
123static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
124{
125	struct device_domain_info *info, *tmp;
126	unsigned long flags;
127
128	spin_lock_irqsave(&dma_domain->domain_lock, flags);
129	/* Remove the device from the domain device list */
130	list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
131		if (!dev || (info->dev == dev))
132			remove_device_ref(info);
133	}
134	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
135}
136
137static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
138{
139	struct device_domain_info *info, *old_domain_info;
140	unsigned long flags;
141
142	spin_lock_irqsave(&device_domain_lock, flags);
143	/*
144	 * Check here if the device is already attached to domain or not.
145	 * If the device is already attached to a domain detach it.
146	 */
147	old_domain_info = dev_iommu_priv_get(dev);
148	if (old_domain_info && old_domain_info->domain != dma_domain) {
149		spin_unlock_irqrestore(&device_domain_lock, flags);
150		detach_device(dev, old_domain_info->domain);
151		spin_lock_irqsave(&device_domain_lock, flags);
152	}
153
154	info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
155
156	info->dev = dev;
157	info->liodn = liodn;
158	info->domain = dma_domain;
159
160	list_add(&info->link, &dma_domain->devices);
161	/*
162	 * In case of devices with multiple LIODNs just store
163	 * the info for the first LIODN as all
164	 * LIODNs share the same domain
165	 */
166	if (!dev_iommu_priv_get(dev))
167		dev_iommu_priv_set(dev, info);
168	spin_unlock_irqrestore(&device_domain_lock, flags);
169}
170
171static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
172					 dma_addr_t iova)
173{
174	if (iova < domain->geometry.aperture_start ||
175	    iova > domain->geometry.aperture_end)
176		return 0;
177	return iova;
178}
179
180static bool fsl_pamu_capable(enum iommu_cap cap)
181{
182	return cap == IOMMU_CAP_CACHE_COHERENCY;
183}
184
185static void fsl_pamu_domain_free(struct iommu_domain *domain)
186{
187	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
188
189	/* remove all the devices from the device list */
190	detach_device(NULL, dma_domain);
191	kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
192}
193
194static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
195{
196	struct fsl_dma_domain *dma_domain;
197
198	if (type != IOMMU_DOMAIN_UNMANAGED)
199		return NULL;
200
201	dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
202	if (!dma_domain)
203		return NULL;
204
205	dma_domain->stash_id = ~(u32)0;
206	INIT_LIST_HEAD(&dma_domain->devices);
207	spin_lock_init(&dma_domain->domain_lock);
208
209	/* default geometry 64 GB i.e. maximum system address */
210	dma_domain->iommu_domain. geometry.aperture_start = 0;
211	dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
212	dma_domain->iommu_domain.geometry.force_aperture = true;
213
214	return &dma_domain->iommu_domain;
215}
216
217/* Update stash destination for all LIODNs associated with the domain */
218static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
219{
220	struct device_domain_info *info;
221	int ret = 0;
222
223	list_for_each_entry(info, &dma_domain->devices, link) {
224		ret = update_liodn_stash(info->liodn, dma_domain, val);
225		if (ret)
226			break;
227	}
228
229	return ret;
230}
231
232static int fsl_pamu_attach_device(struct iommu_domain *domain,
233				  struct device *dev)
234{
235	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
236	unsigned long flags;
237	int len, ret = 0, i;
238	const u32 *liodn;
239	struct pci_dev *pdev = NULL;
240	struct pci_controller *pci_ctl;
241
242	/*
243	 * Use LIODN of the PCI controller while attaching a
244	 * PCI device.
245	 */
246	if (dev_is_pci(dev)) {
247		pdev = to_pci_dev(dev);
248		pci_ctl = pci_bus_to_host(pdev->bus);
249		/*
250		 * make dev point to pci controller device
251		 * so we can get the LIODN programmed by
252		 * u-boot.
253		 */
254		dev = pci_ctl->parent;
255	}
256
257	liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
258	if (!liodn) {
259		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
260		return -EINVAL;
261	}
262
263	spin_lock_irqsave(&dma_domain->domain_lock, flags);
264	for (i = 0; i < len / sizeof(u32); i++) {
265		/* Ensure that LIODN value is valid */
266		if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
267			pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
268				 liodn[i], dev->of_node);
269			ret = -EINVAL;
270			break;
271		}
272
273		attach_device(dma_domain, liodn[i], dev);
274		ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
275		if (ret)
276			break;
277		ret = pamu_enable_liodn(liodn[i]);
278		if (ret)
279			break;
280	}
281	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
282	return ret;
283}
284
285static void fsl_pamu_detach_device(struct iommu_domain *domain,
286				   struct device *dev)
287{
288	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
289	const u32 *prop;
290	int len;
291	struct pci_dev *pdev = NULL;
292	struct pci_controller *pci_ctl;
293
294	/*
295	 * Use LIODN of the PCI controller while detaching a
296	 * PCI device.
297	 */
298	if (dev_is_pci(dev)) {
299		pdev = to_pci_dev(dev);
300		pci_ctl = pci_bus_to_host(pdev->bus);
301		/*
302		 * make dev point to pci controller device
303		 * so we can get the LIODN programmed by
304		 * u-boot.
305		 */
306		dev = pci_ctl->parent;
307	}
308
309	prop = of_get_property(dev->of_node, "fsl,liodn", &len);
310	if (prop)
311		detach_device(dev, dma_domain);
312	else
313		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
314}
315
316/* Set the domain stash attribute */
317int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
318{
319	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
320	unsigned long flags;
321	int ret;
322
323	spin_lock_irqsave(&dma_domain->domain_lock, flags);
324	dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
325	if (dma_domain->stash_id == ~(u32)0) {
326		pr_debug("Invalid stash attributes\n");
327		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
328		return -EINVAL;
329	}
330	ret = update_domain_stash(dma_domain, dma_domain->stash_id);
331	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
332
333	return ret;
334}
335
336static struct iommu_group *get_device_iommu_group(struct device *dev)
337{
338	struct iommu_group *group;
339
340	group = iommu_group_get(dev);
341	if (!group)
342		group = iommu_group_alloc();
343
344	return group;
345}
346
347static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
348{
349	u32 version;
350
351	/* Check the PCI controller version number by readding BRR1 register */
352	version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
353	version &= PCI_FSL_BRR1_VER;
354	/* If PCI controller version is >= 0x204 we can partition endpoints */
355	return version >= 0x204;
356}
357
358/* Get iommu group information from peer devices or devices on the parent bus */
359static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
360{
361	struct pci_dev *tmp;
362	struct iommu_group *group;
363	struct pci_bus *bus = pdev->bus;
364
365	/*
366	 * Traverese the pci bus device list to get
367	 * the shared iommu group.
368	 */
369	while (bus) {
370		list_for_each_entry(tmp, &bus->devices, bus_list) {
371			if (tmp == pdev)
372				continue;
373			group = iommu_group_get(&tmp->dev);
374			if (group)
375				return group;
376		}
377
378		bus = bus->parent;
379	}
380
381	return NULL;
382}
383
384static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
385{
386	struct pci_controller *pci_ctl;
387	bool pci_endpt_partitioning;
388	struct iommu_group *group = NULL;
389
390	pci_ctl = pci_bus_to_host(pdev->bus);
391	pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
392	/* We can partition PCIe devices so assign device group to the device */
393	if (pci_endpt_partitioning) {
394		group = pci_device_group(&pdev->dev);
395
396		/*
397		 * PCIe controller is not a paritionable entity
398		 * free the controller device iommu_group.
399		 */
400		if (pci_ctl->parent->iommu_group)
401			iommu_group_remove_device(pci_ctl->parent);
402	} else {
403		/*
404		 * All devices connected to the controller will share the
405		 * PCI controllers device group. If this is the first
406		 * device to be probed for the pci controller, copy the
407		 * device group information from the PCI controller device
408		 * node and remove the PCI controller iommu group.
409		 * For subsequent devices, the iommu group information can
410		 * be obtained from sibling devices (i.e. from the bus_devices
411		 * link list).
412		 */
413		if (pci_ctl->parent->iommu_group) {
414			group = get_device_iommu_group(pci_ctl->parent);
415			iommu_group_remove_device(pci_ctl->parent);
416		} else {
417			group = get_shared_pci_device_group(pdev);
418		}
419	}
420
421	if (!group)
422		group = ERR_PTR(-ENODEV);
423
424	return group;
425}
426
427static struct iommu_group *fsl_pamu_device_group(struct device *dev)
428{
429	struct iommu_group *group = ERR_PTR(-ENODEV);
430	int len;
431
432	/*
433	 * For platform devices we allocate a separate group for
434	 * each of the devices.
435	 */
436	if (dev_is_pci(dev))
437		group = get_pci_device_group(to_pci_dev(dev));
438	else if (of_get_property(dev->of_node, "fsl,liodn", &len))
439		group = get_device_iommu_group(dev);
440
441	return group;
442}
443
444static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
445{
446	return &pamu_iommu;
447}
448
449static void fsl_pamu_release_device(struct device *dev)
450{
451}
452
453static const struct iommu_ops fsl_pamu_ops = {
454	.capable	= fsl_pamu_capable,
455	.domain_alloc	= fsl_pamu_domain_alloc,
456	.domain_free    = fsl_pamu_domain_free,
457	.attach_dev	= fsl_pamu_attach_device,
458	.detach_dev	= fsl_pamu_detach_device,
459	.iova_to_phys	= fsl_pamu_iova_to_phys,
460	.probe_device	= fsl_pamu_probe_device,
461	.release_device	= fsl_pamu_release_device,
462	.device_group   = fsl_pamu_device_group,
 
 
 
 
 
 
463};
464
465int __init pamu_domain_init(void)
466{
467	int ret = 0;
468
469	ret = iommu_init_mempool();
470	if (ret)
471		return ret;
472
473	ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
474	if (ret)
475		return ret;
476
477	ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL);
478	if (ret) {
479		iommu_device_sysfs_remove(&pamu_iommu);
480		pr_err("Can't register iommu device\n");
481		return ret;
482	}
483
484	bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
485	bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
486
487	return ret;
488}