Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright (C) 2013 Freescale Semiconductor, Inc.
   5 * Author: Varun Sethi <varun.sethi@freescale.com>
   6 */
   7
   8#define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
   9
  10#include "fsl_pamu_domain.h"
  11
 
  12#include <sysdev/fsl_pci.h>
  13
  14/*
  15 * Global spinlock that needs to be held while
  16 * configuring PAMU.
  17 */
  18static DEFINE_SPINLOCK(iommu_lock);
  19
  20static struct kmem_cache *fsl_pamu_domain_cache;
  21static struct kmem_cache *iommu_devinfo_cache;
  22static DEFINE_SPINLOCK(device_domain_lock);
  23
  24struct iommu_device pamu_iommu;	/* IOMMU core code handle */
  25
  26static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
  27{
  28	return container_of(dom, struct fsl_dma_domain, iommu_domain);
  29}
  30
  31static int __init iommu_init_mempool(void)
  32{
  33	fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
  34						  sizeof(struct fsl_dma_domain),
  35						  0,
  36						  SLAB_HWCACHE_ALIGN,
  37						  NULL);
  38	if (!fsl_pamu_domain_cache) {
  39		pr_debug("Couldn't create fsl iommu_domain cache\n");
  40		return -ENOMEM;
  41	}
  42
  43	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
  44						sizeof(struct device_domain_info),
  45						0,
  46						SLAB_HWCACHE_ALIGN,
  47						NULL);
  48	if (!iommu_devinfo_cache) {
  49		pr_debug("Couldn't create devinfo cache\n");
  50		kmem_cache_destroy(fsl_pamu_domain_cache);
  51		return -ENOMEM;
  52	}
  53
  54	return 0;
  55}
  56
  57static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
  58{
  59	u32 win_cnt = dma_domain->win_cnt;
  60	struct dma_window *win_ptr = &dma_domain->win_arr[0];
  61	struct iommu_domain_geometry *geom;
  62
  63	geom = &dma_domain->iommu_domain.geometry;
  64
  65	if (!win_cnt || !dma_domain->geom_size) {
  66		pr_debug("Number of windows/geometry not configured for the domain\n");
  67		return 0;
  68	}
  69
  70	if (win_cnt > 1) {
  71		u64 subwin_size;
  72		dma_addr_t subwin_iova;
  73		u32 wnd;
  74
  75		subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
  76		subwin_iova = iova & ~(subwin_size - 1);
  77		wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
  78		win_ptr = &dma_domain->win_arr[wnd];
  79	}
  80
  81	if (win_ptr->valid)
  82		return win_ptr->paddr + (iova & (win_ptr->size - 1));
  83
  84	return 0;
  85}
  86
  87static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
  88{
  89	struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
  90	int i, ret;
  91	unsigned long rpn, flags;
  92
  93	for (i = 0; i < dma_domain->win_cnt; i++) {
  94		if (sub_win_ptr[i].valid) {
  95			rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
  96			spin_lock_irqsave(&iommu_lock, flags);
  97			ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
  98						 sub_win_ptr[i].size,
  99						 ~(u32)0,
 100						 rpn,
 101						 dma_domain->snoop_id,
 102						 dma_domain->stash_id,
 103						 (i > 0) ? 1 : 0,
 104						 sub_win_ptr[i].prot);
 105			spin_unlock_irqrestore(&iommu_lock, flags);
 106			if (ret) {
 107				pr_debug("SPAACE configuration failed for liodn %d\n",
 108					 liodn);
 109				return ret;
 110			}
 111		}
 112	}
 113
 114	return ret;
 115}
 116
 117static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
 118{
 119	int ret;
 120	struct dma_window *wnd = &dma_domain->win_arr[0];
 121	phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
 122	unsigned long flags;
 123
 124	spin_lock_irqsave(&iommu_lock, flags);
 125	ret = pamu_config_ppaace(liodn, wnd_addr,
 126				 wnd->size,
 127				 ~(u32)0,
 128				 wnd->paddr >> PAMU_PAGE_SHIFT,
 129				 dma_domain->snoop_id, dma_domain->stash_id,
 130				 0, wnd->prot);
 131	spin_unlock_irqrestore(&iommu_lock, flags);
 132	if (ret)
 133		pr_debug("PAACE configuration failed for liodn %d\n", liodn);
 134
 135	return ret;
 136}
 137
 138/* Map the DMA window corresponding to the LIODN */
 139static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
 140{
 141	if (dma_domain->win_cnt > 1)
 142		return map_subwins(liodn, dma_domain);
 143	else
 144		return map_win(liodn, dma_domain);
 145}
 146
 147/* Update window/subwindow mapping for the LIODN */
 148static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
 149{
 150	int ret;
 151	struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
 152	unsigned long flags;
 153
 154	spin_lock_irqsave(&iommu_lock, flags);
 155	if (dma_domain->win_cnt > 1) {
 156		ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
 157					 wnd->size,
 158					 ~(u32)0,
 159					 wnd->paddr >> PAMU_PAGE_SHIFT,
 160					 dma_domain->snoop_id,
 161					 dma_domain->stash_id,
 162					 (wnd_nr > 0) ? 1 : 0,
 163					 wnd->prot);
 164		if (ret)
 165			pr_debug("Subwindow reconfiguration failed for liodn %d\n",
 166				 liodn);
 167	} else {
 168		phys_addr_t wnd_addr;
 169
 170		wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
 171
 172		ret = pamu_config_ppaace(liodn, wnd_addr,
 173					 wnd->size,
 174					 ~(u32)0,
 175					 wnd->paddr >> PAMU_PAGE_SHIFT,
 176					 dma_domain->snoop_id, dma_domain->stash_id,
 177					 0, wnd->prot);
 178		if (ret)
 179			pr_debug("Window reconfiguration failed for liodn %d\n",
 180				 liodn);
 181	}
 182
 183	spin_unlock_irqrestore(&iommu_lock, flags);
 184
 185	return ret;
 186}
 187
 188static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
 189			      u32 val)
 190{
 191	int ret = 0, i;
 192	unsigned long flags;
 193
 194	spin_lock_irqsave(&iommu_lock, flags);
 195	if (!dma_domain->win_arr) {
 196		pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
 197			 liodn);
 198		spin_unlock_irqrestore(&iommu_lock, flags);
 199		return -EINVAL;
 200	}
 201
 202	for (i = 0; i < dma_domain->win_cnt; i++) {
 203		ret = pamu_update_paace_stash(liodn, i, val);
 204		if (ret) {
 205			pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
 206				 i, liodn);
 207			spin_unlock_irqrestore(&iommu_lock, flags);
 208			return ret;
 209		}
 210	}
 211
 212	spin_unlock_irqrestore(&iommu_lock, flags);
 213
 214	return ret;
 215}
 216
 217/* Set the geometry parameters for a LIODN */
 218static int pamu_set_liodn(int liodn, struct device *dev,
 219			  struct fsl_dma_domain *dma_domain,
 220			  struct iommu_domain_geometry *geom_attr,
 221			  u32 win_cnt)
 222{
 223	phys_addr_t window_addr, window_size;
 224	phys_addr_t subwin_size;
 225	int ret = 0, i;
 226	u32 omi_index = ~(u32)0;
 227	unsigned long flags;
 
 228
 229	/*
 230	 * Configure the omi_index at the geometry setup time.
 231	 * This is a static value which depends on the type of
 232	 * device and would not change thereafter.
 233	 */
 234	get_ome_index(&omi_index, dev);
 235
 236	window_addr = geom_attr->aperture_start;
 237	window_size = dma_domain->geom_size;
 238
 239	spin_lock_irqsave(&iommu_lock, flags);
 240	ret = pamu_disable_liodn(liodn);
 241	if (!ret)
 242		ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
 243					 0, dma_domain->snoop_id,
 244					 dma_domain->stash_id, win_cnt, 0);
 
 
 
 
 245	spin_unlock_irqrestore(&iommu_lock, flags);
 246	if (ret) {
 247		pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
 248			 liodn, win_cnt);
 249		return ret;
 250	}
 251
 252	if (win_cnt > 1) {
 253		subwin_size = window_size >> ilog2(win_cnt);
 254		for (i = 0; i < win_cnt; i++) {
 255			spin_lock_irqsave(&iommu_lock, flags);
 256			ret = pamu_disable_spaace(liodn, i);
 257			if (!ret)
 258				ret = pamu_config_spaace(liodn, win_cnt, i,
 259							 subwin_size, omi_index,
 260							 0, dma_domain->snoop_id,
 261							 dma_domain->stash_id,
 262							 0, 0);
 263			spin_unlock_irqrestore(&iommu_lock, flags);
 264			if (ret) {
 265				pr_debug("SPAACE configuration failed for liodn %d\n",
 266					 liodn);
 267				return ret;
 268			}
 269		}
 270	}
 271
 272	return ret;
 273}
 274
 275static int check_size(u64 size, dma_addr_t iova)
 276{
 277	/*
 278	 * Size must be a power of two and at least be equal
 279	 * to PAMU page size.
 280	 */
 281	if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
 282		pr_debug("Size too small or not a power of two\n");
 283		return -EINVAL;
 284	}
 285
 286	/* iova must be page size aligned */
 287	if (iova & (size - 1)) {
 288		pr_debug("Address is not aligned with window size\n");
 289		return -EINVAL;
 290	}
 291
 292	return 0;
 293}
 294
 295static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
 296{
 297	struct fsl_dma_domain *domain;
 298
 299	domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
 300	if (!domain)
 301		return NULL;
 302
 303	domain->stash_id = ~(u32)0;
 304	domain->snoop_id = ~(u32)0;
 305	domain->win_cnt = pamu_get_max_subwin_cnt();
 306	domain->geom_size = 0;
 307
 308	INIT_LIST_HEAD(&domain->devices);
 309
 310	spin_lock_init(&domain->domain_lock);
 311
 312	return domain;
 313}
 314
 315static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
 316{
 317	unsigned long flags;
 318
 319	list_del(&info->link);
 320	spin_lock_irqsave(&iommu_lock, flags);
 321	if (win_cnt > 1)
 322		pamu_free_subwins(info->liodn);
 323	pamu_disable_liodn(info->liodn);
 324	spin_unlock_irqrestore(&iommu_lock, flags);
 325	spin_lock_irqsave(&device_domain_lock, flags);
 326	dev_iommu_priv_set(info->dev, NULL);
 327	kmem_cache_free(iommu_devinfo_cache, info);
 328	spin_unlock_irqrestore(&device_domain_lock, flags);
 329}
 330
 331static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
 332{
 333	struct device_domain_info *info, *tmp;
 334	unsigned long flags;
 335
 336	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 337	/* Remove the device from the domain device list */
 338	list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
 339		if (!dev || (info->dev == dev))
 340			remove_device_ref(info, dma_domain->win_cnt);
 341	}
 342	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 343}
 344
 345static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
 346{
 347	struct device_domain_info *info, *old_domain_info;
 348	unsigned long flags;
 349
 350	spin_lock_irqsave(&device_domain_lock, flags);
 351	/*
 352	 * Check here if the device is already attached to domain or not.
 353	 * If the device is already attached to a domain detach it.
 354	 */
 355	old_domain_info = dev_iommu_priv_get(dev);
 356	if (old_domain_info && old_domain_info->domain != dma_domain) {
 357		spin_unlock_irqrestore(&device_domain_lock, flags);
 358		detach_device(dev, old_domain_info->domain);
 359		spin_lock_irqsave(&device_domain_lock, flags);
 360	}
 361
 362	info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
 363
 364	info->dev = dev;
 365	info->liodn = liodn;
 366	info->domain = dma_domain;
 367
 368	list_add(&info->link, &dma_domain->devices);
 369	/*
 370	 * In case of devices with multiple LIODNs just store
 371	 * the info for the first LIODN as all
 372	 * LIODNs share the same domain
 373	 */
 374	if (!dev_iommu_priv_get(dev))
 375		dev_iommu_priv_set(dev, info);
 376	spin_unlock_irqrestore(&device_domain_lock, flags);
 377}
 378
 379static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
 380					 dma_addr_t iova)
 381{
 382	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 383
 384	if (iova < domain->geometry.aperture_start ||
 385	    iova > domain->geometry.aperture_end)
 386		return 0;
 387
 388	return get_phys_addr(dma_domain, iova);
 389}
 390
 391static bool fsl_pamu_capable(enum iommu_cap cap)
 392{
 393	return cap == IOMMU_CAP_CACHE_COHERENCY;
 394}
 395
 396static void fsl_pamu_domain_free(struct iommu_domain *domain)
 397{
 398	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 399
 400	/* remove all the devices from the device list */
 401	detach_device(NULL, dma_domain);
 402
 403	dma_domain->enabled = 0;
 404	dma_domain->mapped = 0;
 405
 406	kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
 407}
 408
 409static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
 410{
 411	struct fsl_dma_domain *dma_domain;
 412
 413	if (type != IOMMU_DOMAIN_UNMANAGED)
 414		return NULL;
 415
 416	dma_domain = iommu_alloc_dma_domain();
 417	if (!dma_domain) {
 418		pr_debug("dma_domain allocation failed\n");
 419		return NULL;
 420	}
 421	/* defaul geometry 64 GB i.e. maximum system address */
 
 
 
 
 422	dma_domain->iommu_domain. geometry.aperture_start = 0;
 423	dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
 424	dma_domain->iommu_domain.geometry.force_aperture = true;
 425
 426	return &dma_domain->iommu_domain;
 427}
 428
 429/* Configure geometry settings for all LIODNs associated with domain */
 430static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
 431				    struct iommu_domain_geometry *geom_attr,
 432				    u32 win_cnt)
 433{
 434	struct device_domain_info *info;
 435	int ret = 0;
 436
 437	list_for_each_entry(info, &dma_domain->devices, link) {
 438		ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
 439				     geom_attr, win_cnt);
 440		if (ret)
 441			break;
 442	}
 443
 444	return ret;
 445}
 446
 447/* Update stash destination for all LIODNs associated with the domain */
 448static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
 449{
 450	struct device_domain_info *info;
 451	int ret = 0;
 452
 453	list_for_each_entry(info, &dma_domain->devices, link) {
 454		ret = update_liodn_stash(info->liodn, dma_domain, val);
 455		if (ret)
 456			break;
 457	}
 458
 459	return ret;
 460}
 461
 462/* Update domain mappings for all LIODNs associated with the domain */
 463static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
 464{
 465	struct device_domain_info *info;
 466	int ret = 0;
 467
 468	list_for_each_entry(info, &dma_domain->devices, link) {
 469		ret = update_liodn(info->liodn, dma_domain, wnd_nr);
 470		if (ret)
 471			break;
 472	}
 473	return ret;
 474}
 475
 476static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
 477{
 478	struct device_domain_info *info;
 479	int ret = 0;
 480
 481	list_for_each_entry(info, &dma_domain->devices, link) {
 482		if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
 483			ret = pamu_disable_liodn(info->liodn);
 484			if (!ret)
 485				dma_domain->enabled = 0;
 486		} else {
 487			ret = pamu_disable_spaace(info->liodn, wnd_nr);
 488		}
 489	}
 490
 491	return ret;
 492}
 493
 494static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
 495{
 496	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 497	unsigned long flags;
 498	int ret;
 499
 500	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 501	if (!dma_domain->win_arr) {
 502		pr_debug("Number of windows not configured\n");
 503		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 504		return;
 505	}
 506
 507	if (wnd_nr >= dma_domain->win_cnt) {
 508		pr_debug("Invalid window index\n");
 509		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 510		return;
 511	}
 512
 513	if (dma_domain->win_arr[wnd_nr].valid) {
 514		ret = disable_domain_win(dma_domain, wnd_nr);
 515		if (!ret) {
 516			dma_domain->win_arr[wnd_nr].valid = 0;
 517			dma_domain->mapped--;
 518		}
 519	}
 520
 521	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 522}
 523
 524static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
 525				  phys_addr_t paddr, u64 size, int prot)
 526{
 527	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 528	struct dma_window *wnd;
 529	int pamu_prot = 0;
 530	int ret;
 531	unsigned long flags;
 532	u64 win_size;
 533
 534	if (prot & IOMMU_READ)
 535		pamu_prot |= PAACE_AP_PERMS_QUERY;
 536	if (prot & IOMMU_WRITE)
 537		pamu_prot |= PAACE_AP_PERMS_UPDATE;
 538
 539	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 540	if (!dma_domain->win_arr) {
 541		pr_debug("Number of windows not configured\n");
 542		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 543		return -ENODEV;
 544	}
 545
 546	if (wnd_nr >= dma_domain->win_cnt) {
 547		pr_debug("Invalid window index\n");
 548		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 549		return -EINVAL;
 550	}
 551
 552	win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
 553	if (size > win_size) {
 554		pr_debug("Invalid window size\n");
 555		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 556		return -EINVAL;
 557	}
 558
 559	if (dma_domain->win_cnt == 1) {
 560		if (dma_domain->enabled) {
 561			pr_debug("Disable the window before updating the mapping\n");
 562			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 563			return -EBUSY;
 564		}
 565
 566		ret = check_size(size, domain->geometry.aperture_start);
 567		if (ret) {
 568			pr_debug("Aperture start not aligned to the size\n");
 569			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 570			return -EINVAL;
 571		}
 572	}
 573
 574	wnd = &dma_domain->win_arr[wnd_nr];
 575	if (!wnd->valid) {
 576		wnd->paddr = paddr;
 577		wnd->size = size;
 578		wnd->prot = pamu_prot;
 579
 580		ret = update_domain_mapping(dma_domain, wnd_nr);
 581		if (!ret) {
 582			wnd->valid = 1;
 583			dma_domain->mapped++;
 584		}
 585	} else {
 586		pr_debug("Disable the window before updating the mapping\n");
 587		ret = -EBUSY;
 588	}
 589
 590	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 591
 592	return ret;
 593}
 594
 595/*
 596 * Attach the LIODN to the DMA domain and configure the geometry
 597 * and window mappings.
 598 */
 599static int handle_attach_device(struct fsl_dma_domain *dma_domain,
 600				struct device *dev, const u32 *liodn,
 601				int num)
 602{
 603	unsigned long flags;
 604	struct iommu_domain *domain = &dma_domain->iommu_domain;
 605	int ret = 0;
 606	int i;
 607
 608	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 609	for (i = 0; i < num; i++) {
 610		/* Ensure that LIODN value is valid */
 611		if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
 612			pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
 613				 liodn[i], dev->of_node);
 614			ret = -EINVAL;
 615			break;
 616		}
 617
 618		attach_device(dma_domain, liodn[i], dev);
 619		/*
 620		 * Check if geometry has already been configured
 621		 * for the domain. If yes, set the geometry for
 622		 * the LIODN.
 623		 */
 624		if (dma_domain->win_arr) {
 625			u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
 626
 627			ret = pamu_set_liodn(liodn[i], dev, dma_domain,
 628					     &domain->geometry, win_cnt);
 629			if (ret)
 630				break;
 631			if (dma_domain->mapped) {
 632				/*
 633				 * Create window/subwindow mapping for
 634				 * the LIODN.
 635				 */
 636				ret = map_liodn(liodn[i], dma_domain);
 637				if (ret)
 638					break;
 639			}
 640		}
 641	}
 642	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 643
 644	return ret;
 645}
 646
 647static int fsl_pamu_attach_device(struct iommu_domain *domain,
 648				  struct device *dev)
 649{
 650	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 
 
 651	const u32 *liodn;
 652	u32 liodn_cnt;
 653	int len, ret = 0;
 654	struct pci_dev *pdev = NULL;
 655	struct pci_controller *pci_ctl;
 656
 657	/*
 658	 * Use LIODN of the PCI controller while attaching a
 659	 * PCI device.
 660	 */
 661	if (dev_is_pci(dev)) {
 662		pdev = to_pci_dev(dev);
 663		pci_ctl = pci_bus_to_host(pdev->bus);
 664		/*
 665		 * make dev point to pci controller device
 666		 * so we can get the LIODN programmed by
 667		 * u-boot.
 668		 */
 669		dev = pci_ctl->parent;
 670	}
 671
 672	liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
 673	if (liodn) {
 674		liodn_cnt = len / sizeof(u32);
 675		ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
 676	} else {
 677		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
 678		ret = -EINVAL;
 679	}
 680
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 681	return ret;
 682}
 683
 684static void fsl_pamu_detach_device(struct iommu_domain *domain,
 685				   struct device *dev)
 686{
 687	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 688	const u32 *prop;
 689	int len;
 690	struct pci_dev *pdev = NULL;
 691	struct pci_controller *pci_ctl;
 692
 693	/*
 694	 * Use LIODN of the PCI controller while detaching a
 695	 * PCI device.
 696	 */
 697	if (dev_is_pci(dev)) {
 698		pdev = to_pci_dev(dev);
 699		pci_ctl = pci_bus_to_host(pdev->bus);
 700		/*
 701		 * make dev point to pci controller device
 702		 * so we can get the LIODN programmed by
 703		 * u-boot.
 704		 */
 705		dev = pci_ctl->parent;
 706	}
 707
 708	prop = of_get_property(dev->of_node, "fsl,liodn", &len);
 709	if (prop)
 710		detach_device(dev, dma_domain);
 711	else
 712		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
 713}
 714
 715static  int configure_domain_geometry(struct iommu_domain *domain, void *data)
 716{
 717	struct iommu_domain_geometry *geom_attr = data;
 718	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 719	dma_addr_t geom_size;
 720	unsigned long flags;
 721
 722	geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
 723	/*
 724	 * Sanity check the geometry size. Also, we do not support
 725	 * DMA outside of the geometry.
 726	 */
 727	if (check_size(geom_size, geom_attr->aperture_start) ||
 728	    !geom_attr->force_aperture) {
 729		pr_debug("Invalid PAMU geometry attributes\n");
 730		return -EINVAL;
 731	}
 732
 733	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 734	if (dma_domain->enabled) {
 735		pr_debug("Can't set geometry attributes as domain is active\n");
 736		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 737		return  -EBUSY;
 738	}
 739
 740	/* Copy the domain geometry information */
 741	memcpy(&domain->geometry, geom_attr,
 742	       sizeof(struct iommu_domain_geometry));
 743	dma_domain->geom_size = geom_size;
 744
 745	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 746
 747	return 0;
 748}
 749
 750/* Set the domain stash attribute */
 751static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
 752{
 753	struct pamu_stash_attribute *stash_attr = data;
 754	unsigned long flags;
 755	int ret;
 756
 757	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 758
 759	memcpy(&dma_domain->dma_stash, stash_attr,
 760	       sizeof(struct pamu_stash_attribute));
 761
 762	dma_domain->stash_id = get_stash_id(stash_attr->cache,
 763					    stash_attr->cpu);
 764	if (dma_domain->stash_id == ~(u32)0) {
 765		pr_debug("Invalid stash attributes\n");
 766		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 767		return -EINVAL;
 768	}
 769
 770	ret = update_domain_stash(dma_domain, dma_domain->stash_id);
 771
 772	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 773
 774	return ret;
 775}
 776
 777/* Configure domain dma state i.e. enable/disable DMA */
 778static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
 779{
 780	struct device_domain_info *info;
 781	unsigned long flags;
 782	int ret;
 783
 784	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 785
 786	if (enable && !dma_domain->mapped) {
 787		pr_debug("Can't enable DMA domain without valid mapping\n");
 788		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 789		return -ENODEV;
 790	}
 791
 792	dma_domain->enabled = enable;
 793	list_for_each_entry(info, &dma_domain->devices, link) {
 794		ret = (enable) ? pamu_enable_liodn(info->liodn) :
 795			pamu_disable_liodn(info->liodn);
 796		if (ret)
 797			pr_debug("Unable to set dma state for liodn %d",
 798				 info->liodn);
 799	}
 800	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 801
 802	return 0;
 803}
 804
 805static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
 806{
 807	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 808	unsigned long flags;
 809	int ret;
 810
 811	spin_lock_irqsave(&dma_domain->domain_lock, flags);
 812	/* Ensure domain is inactive i.e. DMA should be disabled for the domain */
 813	if (dma_domain->enabled) {
 814		pr_debug("Can't set geometry attributes as domain is active\n");
 815		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 816		return  -EBUSY;
 817	}
 818
 819	/* Ensure that the geometry has been set for the domain */
 820	if (!dma_domain->geom_size) {
 821		pr_debug("Please configure geometry before setting the number of windows\n");
 822		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 823		return -EINVAL;
 824	}
 825
 826	/*
 827	 * Ensure we have valid window count i.e. it should be less than
 828	 * maximum permissible limit and should be a power of two.
 829	 */
 830	if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
 831		pr_debug("Invalid window count\n");
 832		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 833		return -EINVAL;
 834	}
 835
 836	ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
 837				       w_count > 1 ? w_count : 0);
 838	if (!ret) {
 839		kfree(dma_domain->win_arr);
 840		dma_domain->win_arr = kcalloc(w_count,
 841					      sizeof(*dma_domain->win_arr),
 842					      GFP_ATOMIC);
 843		if (!dma_domain->win_arr) {
 844			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 845			return -ENOMEM;
 846		}
 847		dma_domain->win_cnt = w_count;
 848	}
 849	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
 850
 851	return ret;
 852}
 853
 854static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
 855				    enum iommu_attr attr_type, void *data)
 856{
 857	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 858	int ret = 0;
 859
 860	switch (attr_type) {
 861	case DOMAIN_ATTR_GEOMETRY:
 862		ret = configure_domain_geometry(domain, data);
 863		break;
 864	case DOMAIN_ATTR_FSL_PAMU_STASH:
 865		ret = configure_domain_stash(dma_domain, data);
 866		break;
 867	case DOMAIN_ATTR_FSL_PAMU_ENABLE:
 868		ret = configure_domain_dma_state(dma_domain, *(int *)data);
 869		break;
 870	case DOMAIN_ATTR_WINDOWS:
 871		ret = fsl_pamu_set_windows(domain, *(u32 *)data);
 872		break;
 873	default:
 874		pr_debug("Unsupported attribute type\n");
 875		ret = -EINVAL;
 876		break;
 877	}
 878
 879	return ret;
 880}
 881
 882static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
 883				    enum iommu_attr attr_type, void *data)
 884{
 885	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
 886	int ret = 0;
 887
 888	switch (attr_type) {
 889	case DOMAIN_ATTR_FSL_PAMU_STASH:
 890		memcpy(data, &dma_domain->dma_stash,
 891		       sizeof(struct pamu_stash_attribute));
 892		break;
 893	case DOMAIN_ATTR_FSL_PAMU_ENABLE:
 894		*(int *)data = dma_domain->enabled;
 895		break;
 896	case DOMAIN_ATTR_FSL_PAMUV1:
 897		*(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
 898		break;
 899	case DOMAIN_ATTR_WINDOWS:
 900		*(u32 *)data = dma_domain->win_cnt;
 901		break;
 902	default:
 903		pr_debug("Unsupported attribute type\n");
 904		ret = -EINVAL;
 905		break;
 906	}
 907
 908	return ret;
 909}
 910
 911static struct iommu_group *get_device_iommu_group(struct device *dev)
 912{
 913	struct iommu_group *group;
 914
 915	group = iommu_group_get(dev);
 916	if (!group)
 917		group = iommu_group_alloc();
 918
 919	return group;
 920}
 921
 922static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
 923{
 924	u32 version;
 925
 926	/* Check the PCI controller version number by readding BRR1 register */
 927	version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
 928	version &= PCI_FSL_BRR1_VER;
 929	/* If PCI controller version is >= 0x204 we can partition endpoints */
 930	return version >= 0x204;
 931}
 932
 933/* Get iommu group information from peer devices or devices on the parent bus */
 934static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
 935{
 936	struct pci_dev *tmp;
 937	struct iommu_group *group;
 938	struct pci_bus *bus = pdev->bus;
 939
 940	/*
 941	 * Traverese the pci bus device list to get
 942	 * the shared iommu group.
 943	 */
 944	while (bus) {
 945		list_for_each_entry(tmp, &bus->devices, bus_list) {
 946			if (tmp == pdev)
 947				continue;
 948			group = iommu_group_get(&tmp->dev);
 949			if (group)
 950				return group;
 951		}
 952
 953		bus = bus->parent;
 954	}
 955
 956	return NULL;
 957}
 958
 959static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
 960{
 961	struct pci_controller *pci_ctl;
 962	bool pci_endpt_partitioning;
 963	struct iommu_group *group = NULL;
 964
 965	pci_ctl = pci_bus_to_host(pdev->bus);
 966	pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
 967	/* We can partition PCIe devices so assign device group to the device */
 968	if (pci_endpt_partitioning) {
 969		group = pci_device_group(&pdev->dev);
 970
 971		/*
 972		 * PCIe controller is not a paritionable entity
 973		 * free the controller device iommu_group.
 974		 */
 975		if (pci_ctl->parent->iommu_group)
 976			iommu_group_remove_device(pci_ctl->parent);
 977	} else {
 978		/*
 979		 * All devices connected to the controller will share the
 980		 * PCI controllers device group. If this is the first
 981		 * device to be probed for the pci controller, copy the
 982		 * device group information from the PCI controller device
 983		 * node and remove the PCI controller iommu group.
 984		 * For subsequent devices, the iommu group information can
 985		 * be obtained from sibling devices (i.e. from the bus_devices
 986		 * link list).
 987		 */
 988		if (pci_ctl->parent->iommu_group) {
 989			group = get_device_iommu_group(pci_ctl->parent);
 990			iommu_group_remove_device(pci_ctl->parent);
 991		} else {
 992			group = get_shared_pci_device_group(pdev);
 993		}
 994	}
 995
 996	if (!group)
 997		group = ERR_PTR(-ENODEV);
 998
 999	return group;
1000}
1001
1002static struct iommu_group *fsl_pamu_device_group(struct device *dev)
1003{
1004	struct iommu_group *group = ERR_PTR(-ENODEV);
1005	int len;
1006
1007	/*
1008	 * For platform devices we allocate a separate group for
1009	 * each of the devices.
1010	 */
1011	if (dev_is_pci(dev))
1012		group = get_pci_device_group(to_pci_dev(dev));
1013	else if (of_get_property(dev->of_node, "fsl,liodn", &len))
1014		group = get_device_iommu_group(dev);
1015
1016	return group;
1017}
1018
1019static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
1020{
1021	return &pamu_iommu;
1022}
1023
1024static void fsl_pamu_release_device(struct device *dev)
1025{
1026}
1027
1028static const struct iommu_ops fsl_pamu_ops = {
1029	.capable	= fsl_pamu_capable,
1030	.domain_alloc	= fsl_pamu_domain_alloc,
1031	.domain_free    = fsl_pamu_domain_free,
1032	.attach_dev	= fsl_pamu_attach_device,
1033	.detach_dev	= fsl_pamu_detach_device,
1034	.domain_window_enable = fsl_pamu_window_enable,
1035	.domain_window_disable = fsl_pamu_window_disable,
1036	.iova_to_phys	= fsl_pamu_iova_to_phys,
1037	.domain_set_attr = fsl_pamu_set_domain_attr,
1038	.domain_get_attr = fsl_pamu_get_domain_attr,
1039	.probe_device	= fsl_pamu_probe_device,
1040	.release_device	= fsl_pamu_release_device,
1041	.device_group   = fsl_pamu_device_group,
 
 
 
 
 
 
1042};
1043
1044int __init pamu_domain_init(void)
1045{
1046	int ret = 0;
1047
1048	ret = iommu_init_mempool();
1049	if (ret)
1050		return ret;
1051
1052	ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
1053	if (ret)
1054		return ret;
1055
1056	iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);
1057
1058	ret = iommu_device_register(&pamu_iommu);
1059	if (ret) {
1060		iommu_device_sysfs_remove(&pamu_iommu);
1061		pr_err("Can't register iommu device\n");
1062		return ret;
1063	}
1064
1065	bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
1066	bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
1067
1068	return ret;
1069}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *
  4 * Copyright (C) 2013 Freescale Semiconductor, Inc.
  5 * Author: Varun Sethi <varun.sethi@freescale.com>
  6 */
  7
  8#define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
  9
 10#include "fsl_pamu_domain.h"
 11
 12#include <linux/platform_device.h>
 13#include <sysdev/fsl_pci.h>
 14
 15/*
 16 * Global spinlock that needs to be held while
 17 * configuring PAMU.
 18 */
 19static DEFINE_SPINLOCK(iommu_lock);
 20
 21static struct kmem_cache *fsl_pamu_domain_cache;
 22static struct kmem_cache *iommu_devinfo_cache;
 23static DEFINE_SPINLOCK(device_domain_lock);
 24
 25struct iommu_device pamu_iommu;	/* IOMMU core code handle */
 26
 27static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
 28{
 29	return container_of(dom, struct fsl_dma_domain, iommu_domain);
 30}
 31
 32static int __init iommu_init_mempool(void)
 33{
 34	fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
 35						  sizeof(struct fsl_dma_domain),
 36						  0,
 37						  SLAB_HWCACHE_ALIGN,
 38						  NULL);
 39	if (!fsl_pamu_domain_cache) {
 40		pr_debug("Couldn't create fsl iommu_domain cache\n");
 41		return -ENOMEM;
 42	}
 43
 44	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
 45						sizeof(struct device_domain_info),
 46						0,
 47						SLAB_HWCACHE_ALIGN,
 48						NULL);
 49	if (!iommu_devinfo_cache) {
 50		pr_debug("Couldn't create devinfo cache\n");
 51		kmem_cache_destroy(fsl_pamu_domain_cache);
 52		return -ENOMEM;
 53	}
 54
 55	return 0;
 56}
 57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
 59			      u32 val)
 60{
 61	int ret = 0;
 62	unsigned long flags;
 63
 64	spin_lock_irqsave(&iommu_lock, flags);
 65	ret = pamu_update_paace_stash(liodn, val);
 66	if (ret) {
 67		pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
 68		spin_unlock_irqrestore(&iommu_lock, flags);
 69		return ret;
 
 
 
 
 
 
 
 
 
 
 70	}
 71
 72	spin_unlock_irqrestore(&iommu_lock, flags);
 73
 74	return ret;
 75}
 76
 77/* Set the geometry parameters for a LIODN */
 78static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
 79			  int liodn)
 80{
 
 
 
 
 
 81	u32 omi_index = ~(u32)0;
 82	unsigned long flags;
 83	int ret;
 84
 85	/*
 86	 * Configure the omi_index at the geometry setup time.
 87	 * This is a static value which depends on the type of
 88	 * device and would not change thereafter.
 89	 */
 90	get_ome_index(&omi_index, dev);
 91
 
 
 
 92	spin_lock_irqsave(&iommu_lock, flags);
 93	ret = pamu_disable_liodn(liodn);
 94	if (ret)
 95		goto out_unlock;
 96	ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0);
 97	if (ret)
 98		goto out_unlock;
 99	ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id,
100				 PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
101out_unlock:
102	spin_unlock_irqrestore(&iommu_lock, flags);
103	if (ret) {
104		pr_debug("PAACE configuration failed for liodn %d\n",
105			 liodn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106	}
 
107	return ret;
108}
109
110static void remove_device_ref(struct device_domain_info *info)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111{
112	unsigned long flags;
113
114	list_del(&info->link);
115	spin_lock_irqsave(&iommu_lock, flags);
 
 
116	pamu_disable_liodn(info->liodn);
117	spin_unlock_irqrestore(&iommu_lock, flags);
118	spin_lock_irqsave(&device_domain_lock, flags);
119	dev_iommu_priv_set(info->dev, NULL);
120	kmem_cache_free(iommu_devinfo_cache, info);
121	spin_unlock_irqrestore(&device_domain_lock, flags);
122}
123
124static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
125{
126	struct device_domain_info *info, *tmp;
127	unsigned long flags;
128
129	spin_lock_irqsave(&dma_domain->domain_lock, flags);
130	/* Remove the device from the domain device list */
131	list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
132		if (!dev || (info->dev == dev))
133			remove_device_ref(info);
134	}
135	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
136}
137
138static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
139{
140	struct device_domain_info *info, *old_domain_info;
141	unsigned long flags;
142
143	spin_lock_irqsave(&device_domain_lock, flags);
144	/*
145	 * Check here if the device is already attached to domain or not.
146	 * If the device is already attached to a domain detach it.
147	 */
148	old_domain_info = dev_iommu_priv_get(dev);
149	if (old_domain_info && old_domain_info->domain != dma_domain) {
150		spin_unlock_irqrestore(&device_domain_lock, flags);
151		detach_device(dev, old_domain_info->domain);
152		spin_lock_irqsave(&device_domain_lock, flags);
153	}
154
155	info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
156
157	info->dev = dev;
158	info->liodn = liodn;
159	info->domain = dma_domain;
160
161	list_add(&info->link, &dma_domain->devices);
162	/*
163	 * In case of devices with multiple LIODNs just store
164	 * the info for the first LIODN as all
165	 * LIODNs share the same domain
166	 */
167	if (!dev_iommu_priv_get(dev))
168		dev_iommu_priv_set(dev, info);
169	spin_unlock_irqrestore(&device_domain_lock, flags);
170}
171
172static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
173					 dma_addr_t iova)
174{
 
 
175	if (iova < domain->geometry.aperture_start ||
176	    iova > domain->geometry.aperture_end)
177		return 0;
178	return iova;
 
179}
180
181static bool fsl_pamu_capable(struct device *dev, enum iommu_cap cap)
182{
183	return cap == IOMMU_CAP_CACHE_COHERENCY;
184}
185
186static void fsl_pamu_domain_free(struct iommu_domain *domain)
187{
188	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
189
190	/* remove all the devices from the device list */
191	detach_device(NULL, dma_domain);
 
 
 
 
192	kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
193}
194
195static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
196{
197	struct fsl_dma_domain *dma_domain;
198
199	if (type != IOMMU_DOMAIN_UNMANAGED)
200		return NULL;
201
202	dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
203	if (!dma_domain)
 
204		return NULL;
205
206	dma_domain->stash_id = ~(u32)0;
207	INIT_LIST_HEAD(&dma_domain->devices);
208	spin_lock_init(&dma_domain->domain_lock);
209
210	/* default geometry 64 GB i.e. maximum system address */
211	dma_domain->iommu_domain. geometry.aperture_start = 0;
212	dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
213	dma_domain->iommu_domain.geometry.force_aperture = true;
214
215	return &dma_domain->iommu_domain;
216}
217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218/* Update stash destination for all LIODNs associated with the domain */
219static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
220{
221	struct device_domain_info *info;
222	int ret = 0;
223
224	list_for_each_entry(info, &dma_domain->devices, link) {
225		ret = update_liodn_stash(info->liodn, dma_domain, val);
226		if (ret)
227			break;
228	}
229
230	return ret;
231}
232
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233static int fsl_pamu_attach_device(struct iommu_domain *domain,
234				  struct device *dev)
235{
236	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
237	unsigned long flags;
238	int len, ret = 0, i;
239	const u32 *liodn;
 
 
240	struct pci_dev *pdev = NULL;
241	struct pci_controller *pci_ctl;
242
243	/*
244	 * Use LIODN of the PCI controller while attaching a
245	 * PCI device.
246	 */
247	if (dev_is_pci(dev)) {
248		pdev = to_pci_dev(dev);
249		pci_ctl = pci_bus_to_host(pdev->bus);
250		/*
251		 * make dev point to pci controller device
252		 * so we can get the LIODN programmed by
253		 * u-boot.
254		 */
255		dev = pci_ctl->parent;
256	}
257
258	liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
259	if (!liodn) {
 
 
 
260		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
261		return -ENODEV;
262	}
263
264	spin_lock_irqsave(&dma_domain->domain_lock, flags);
265	for (i = 0; i < len / sizeof(u32); i++) {
266		/* Ensure that LIODN value is valid */
267		if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
268			pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
269				 liodn[i], dev->of_node);
270			ret = -ENODEV;
271			break;
272		}
273
274		attach_device(dma_domain, liodn[i], dev);
275		ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
276		if (ret)
277			break;
278		ret = pamu_enable_liodn(liodn[i]);
279		if (ret)
280			break;
281	}
282	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
283	return ret;
284}
285
286static void fsl_pamu_detach_device(struct iommu_domain *domain,
287				   struct device *dev)
288{
289	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
290	const u32 *prop;
291	int len;
292	struct pci_dev *pdev = NULL;
293	struct pci_controller *pci_ctl;
294
295	/*
296	 * Use LIODN of the PCI controller while detaching a
297	 * PCI device.
298	 */
299	if (dev_is_pci(dev)) {
300		pdev = to_pci_dev(dev);
301		pci_ctl = pci_bus_to_host(pdev->bus);
302		/*
303		 * make dev point to pci controller device
304		 * so we can get the LIODN programmed by
305		 * u-boot.
306		 */
307		dev = pci_ctl->parent;
308	}
309
310	prop = of_get_property(dev->of_node, "fsl,liodn", &len);
311	if (prop)
312		detach_device(dev, dma_domain);
313	else
314		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
315}
316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317/* Set the domain stash attribute */
318int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
319{
320	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
321	unsigned long flags;
322	int ret;
323
324	spin_lock_irqsave(&dma_domain->domain_lock, flags);
325	dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
 
 
 
 
 
326	if (dma_domain->stash_id == ~(u32)0) {
327		pr_debug("Invalid stash attributes\n");
328		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
329		return -EINVAL;
330	}
 
331	ret = update_domain_stash(dma_domain, dma_domain->stash_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
333
334	return ret;
335}
336
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337static struct iommu_group *get_device_iommu_group(struct device *dev)
338{
339	struct iommu_group *group;
340
341	group = iommu_group_get(dev);
342	if (!group)
343		group = iommu_group_alloc();
344
345	return group;
346}
347
348static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
349{
350	u32 version;
351
352	/* Check the PCI controller version number by readding BRR1 register */
353	version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
354	version &= PCI_FSL_BRR1_VER;
355	/* If PCI controller version is >= 0x204 we can partition endpoints */
356	return version >= 0x204;
357}
358
359/* Get iommu group information from peer devices or devices on the parent bus */
360static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
361{
362	struct pci_dev *tmp;
363	struct iommu_group *group;
364	struct pci_bus *bus = pdev->bus;
365
366	/*
367	 * Traverese the pci bus device list to get
368	 * the shared iommu group.
369	 */
370	while (bus) {
371		list_for_each_entry(tmp, &bus->devices, bus_list) {
372			if (tmp == pdev)
373				continue;
374			group = iommu_group_get(&tmp->dev);
375			if (group)
376				return group;
377		}
378
379		bus = bus->parent;
380	}
381
382	return NULL;
383}
384
385static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
386{
387	struct pci_controller *pci_ctl;
388	bool pci_endpt_partitioning;
389	struct iommu_group *group = NULL;
390
391	pci_ctl = pci_bus_to_host(pdev->bus);
392	pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
393	/* We can partition PCIe devices so assign device group to the device */
394	if (pci_endpt_partitioning) {
395		group = pci_device_group(&pdev->dev);
396
397		/*
398		 * PCIe controller is not a paritionable entity
399		 * free the controller device iommu_group.
400		 */
401		if (pci_ctl->parent->iommu_group)
402			iommu_group_remove_device(pci_ctl->parent);
403	} else {
404		/*
405		 * All devices connected to the controller will share the
406		 * PCI controllers device group. If this is the first
407		 * device to be probed for the pci controller, copy the
408		 * device group information from the PCI controller device
409		 * node and remove the PCI controller iommu group.
410		 * For subsequent devices, the iommu group information can
411		 * be obtained from sibling devices (i.e. from the bus_devices
412		 * link list).
413		 */
414		if (pci_ctl->parent->iommu_group) {
415			group = get_device_iommu_group(pci_ctl->parent);
416			iommu_group_remove_device(pci_ctl->parent);
417		} else {
418			group = get_shared_pci_device_group(pdev);
419		}
420	}
421
422	if (!group)
423		group = ERR_PTR(-ENODEV);
424
425	return group;
426}
427
428static struct iommu_group *fsl_pamu_device_group(struct device *dev)
429{
430	struct iommu_group *group = ERR_PTR(-ENODEV);
431	int len;
432
433	/*
434	 * For platform devices we allocate a separate group for
435	 * each of the devices.
436	 */
437	if (dev_is_pci(dev))
438		group = get_pci_device_group(to_pci_dev(dev));
439	else if (of_get_property(dev->of_node, "fsl,liodn", &len))
440		group = get_device_iommu_group(dev);
441
442	return group;
443}
444
445static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
446{
447	return &pamu_iommu;
448}
449
 
 
 
 
450static const struct iommu_ops fsl_pamu_ops = {
451	.capable	= fsl_pamu_capable,
452	.domain_alloc	= fsl_pamu_domain_alloc,
 
 
 
 
 
 
 
 
453	.probe_device	= fsl_pamu_probe_device,
 
454	.device_group   = fsl_pamu_device_group,
455	.default_domain_ops = &(const struct iommu_domain_ops) {
456		.attach_dev	= fsl_pamu_attach_device,
457		.detach_dev	= fsl_pamu_detach_device,
458		.iova_to_phys	= fsl_pamu_iova_to_phys,
459		.free		= fsl_pamu_domain_free,
460	}
461};
462
463int __init pamu_domain_init(void)
464{
465	int ret = 0;
466
467	ret = iommu_init_mempool();
468	if (ret)
469		return ret;
470
471	ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
472	if (ret)
473		return ret;
474
475	ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL);
 
 
476	if (ret) {
477		iommu_device_sysfs_remove(&pamu_iommu);
478		pr_err("Can't register iommu device\n");
 
479	}
 
 
 
480
481	return ret;
482}