Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * IOMMU API for Renesas VMSA-compatible IPMMU
   4 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
   5 *
   6 * Copyright (C) 2014-2020 Renesas Electronics Corporation
   7 */
   8
   9#include <linux/bitmap.h>
  10#include <linux/delay.h>
 
  11#include <linux/dma-mapping.h>
  12#include <linux/err.h>
  13#include <linux/export.h>
  14#include <linux/init.h>
  15#include <linux/interrupt.h>
  16#include <linux/io.h>
  17#include <linux/io-pgtable.h>
  18#include <linux/iommu.h>
  19#include <linux/of.h>
  20#include <linux/of_device.h>
  21#include <linux/of_platform.h>
  22#include <linux/platform_device.h>
  23#include <linux/sizes.h>
  24#include <linux/slab.h>
  25#include <linux/sys_soc.h>
  26
  27#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
  28#include <asm/dma-iommu.h>
  29#else
  30#define arm_iommu_create_mapping(...)	NULL
  31#define arm_iommu_attach_device(...)	-ENODEV
  32#define arm_iommu_release_mapping(...)	do {} while (0)
  33#define arm_iommu_detach_device(...)	do {} while (0)
  34#endif
  35
  36#define IPMMU_CTX_MAX		16U
  37#define IPMMU_CTX_INVALID	-1
  38
  39#define IPMMU_UTLB_MAX		64U
  40
  41struct ipmmu_features {
  42	bool use_ns_alias_offset;
  43	bool has_cache_leaf_nodes;
  44	unsigned int number_of_contexts;
  45	unsigned int num_utlbs;
  46	bool setup_imbuscr;
  47	bool twobit_imttbcr_sl0;
  48	bool reserved_context;
  49	bool cache_snoop;
  50	unsigned int ctx_offset_base;
  51	unsigned int ctx_offset_stride;
  52	unsigned int utlb_offset_base;
  53};
  54
  55struct ipmmu_vmsa_device {
  56	struct device *dev;
  57	void __iomem *base;
  58	struct iommu_device iommu;
  59	struct ipmmu_vmsa_device *root;
  60	const struct ipmmu_features *features;
  61	unsigned int num_ctx;
  62	spinlock_t lock;			/* Protects ctx and domains[] */
  63	DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
  64	struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
  65	s8 utlb_ctx[IPMMU_UTLB_MAX];
  66
  67	struct iommu_group *group;
  68	struct dma_iommu_mapping *mapping;
  69};
  70
  71struct ipmmu_vmsa_domain {
  72	struct ipmmu_vmsa_device *mmu;
  73	struct iommu_domain io_domain;
  74
  75	struct io_pgtable_cfg cfg;
  76	struct io_pgtable_ops *iop;
  77
  78	unsigned int context_id;
  79	struct mutex mutex;			/* Protects mappings */
  80};
  81
  82static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
  83{
  84	return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
  85}
  86
  87static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
  88{
  89	return dev_iommu_priv_get(dev);
  90}
  91
  92#define TLB_LOOP_TIMEOUT		100	/* 100us */
  93
  94/* -----------------------------------------------------------------------------
  95 * Registers Definition
  96 */
  97
  98#define IM_NS_ALIAS_OFFSET		0x800
  99
 100/* MMU "context" registers */
 101#define IMCTR				0x0000		/* R-Car Gen2/3 */
 102#define IMCTR_INTEN			(1 << 2)	/* R-Car Gen2/3 */
 103#define IMCTR_FLUSH			(1 << 1)	/* R-Car Gen2/3 */
 104#define IMCTR_MMUEN			(1 << 0)	/* R-Car Gen2/3 */
 105
 106#define IMTTBCR				0x0008		/* R-Car Gen2/3 */
 107#define IMTTBCR_EAE			(1 << 31)	/* R-Car Gen2/3 */
 108#define IMTTBCR_SH0_INNER_SHAREABLE	(3 << 12)	/* R-Car Gen2 only */
 109#define IMTTBCR_ORGN0_WB_WA		(1 << 10)	/* R-Car Gen2 only */
 110#define IMTTBCR_IRGN0_WB_WA		(1 << 8)	/* R-Car Gen2 only */
 111#define IMTTBCR_SL0_TWOBIT_LVL_1	(2 << 6)	/* R-Car Gen3 only */
 112#define IMTTBCR_SL0_LVL_1		(1 << 4)	/* R-Car Gen2 only */
 113
 114#define IMBUSCR				0x000c		/* R-Car Gen2 only */
 115#define IMBUSCR_DVM			(1 << 2)	/* R-Car Gen2 only */
 116#define IMBUSCR_BUSSEL_MASK		(3 << 0)	/* R-Car Gen2 only */
 117
 118#define IMTTLBR0			0x0010		/* R-Car Gen2/3 */
 119#define IMTTUBR0			0x0014		/* R-Car Gen2/3 */
 120
 121#define IMSTR				0x0020		/* R-Car Gen2/3 */
 122#define IMSTR_MHIT			(1 << 4)	/* R-Car Gen2/3 */
 123#define IMSTR_ABORT			(1 << 2)	/* R-Car Gen2/3 */
 124#define IMSTR_PF			(1 << 1)	/* R-Car Gen2/3 */
 125#define IMSTR_TF			(1 << 0)	/* R-Car Gen2/3 */
 126
 127#define IMMAIR0				0x0028		/* R-Car Gen2/3 */
 128
 129#define IMELAR				0x0030		/* R-Car Gen2/3, IMEAR on R-Car Gen2 */
 130#define IMEUAR				0x0034		/* R-Car Gen3 only */
 131
 132/* uTLB registers */
 133#define IMUCTR(n)			((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
 134#define IMUCTR0(n)			(0x0300 + ((n) * 16))		/* R-Car Gen2/3 */
 135#define IMUCTR32(n)			(0x0600 + (((n) - 32) * 16))	/* R-Car Gen3 only */
 136#define IMUCTR_TTSEL_MMU(n)		((n) << 4)	/* R-Car Gen2/3 */
 137#define IMUCTR_FLUSH			(1 << 1)	/* R-Car Gen2/3 */
 138#define IMUCTR_MMUEN			(1 << 0)	/* R-Car Gen2/3 */
 139
 140#define IMUASID(n)			((n) < 32 ? IMUASID0(n) : IMUASID32(n))
 141#define IMUASID0(n)			(0x0308 + ((n) * 16))		/* R-Car Gen2/3 */
 142#define IMUASID32(n)			(0x0608 + (((n) - 32) * 16))	/* R-Car Gen3 only */
 143
 144/* -----------------------------------------------------------------------------
 145 * Root device handling
 146 */
 147
 148static struct platform_driver ipmmu_driver;
 149
 150static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
 151{
 152	return mmu->root == mmu;
 153}
 154
 155static int __ipmmu_check_device(struct device *dev, void *data)
 156{
 157	struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
 158	struct ipmmu_vmsa_device **rootp = data;
 159
 160	if (ipmmu_is_root(mmu))
 161		*rootp = mmu;
 162
 163	return 0;
 164}
 165
 166static struct ipmmu_vmsa_device *ipmmu_find_root(void)
 167{
 168	struct ipmmu_vmsa_device *root = NULL;
 169
 170	return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
 171				      __ipmmu_check_device) == 0 ? root : NULL;
 172}
 173
 174/* -----------------------------------------------------------------------------
 175 * Read/Write Access
 176 */
 177
 178static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
 179{
 180	return ioread32(mmu->base + offset);
 181}
 182
 183static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
 184			u32 data)
 185{
 186	iowrite32(data, mmu->base + offset);
 187}
 188
 189static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
 190				  unsigned int context_id, unsigned int reg)
 191{
 192	unsigned int base = mmu->features->ctx_offset_base;
 193
 194	if (context_id > 7)
 195		base += 0x800 - 8 * 0x40;
 196
 197	return base + context_id * mmu->features->ctx_offset_stride + reg;
 198}
 199
 200static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
 201			  unsigned int context_id, unsigned int reg)
 202{
 203	return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
 204}
 205
 206static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
 207			    unsigned int context_id, unsigned int reg, u32 data)
 208{
 209	ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
 210}
 211
 212static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
 213			       unsigned int reg)
 214{
 215	return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
 216}
 217
 218static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
 219				 unsigned int reg, u32 data)
 220{
 221	ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
 222}
 223
 224static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
 225				unsigned int reg, u32 data)
 226{
 227	if (domain->mmu != domain->mmu->root)
 228		ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
 229
 230	ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
 231}
 232
 233static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
 234{
 235	return mmu->features->utlb_offset_base + reg;
 236}
 237
 238static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
 239				unsigned int utlb, u32 data)
 240{
 241	ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
 242}
 243
 244static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
 245			       unsigned int utlb, u32 data)
 246{
 247	ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
 248}
 249
 250/* -----------------------------------------------------------------------------
 251 * TLB and microTLB Management
 252 */
 253
 254/* Wait for any pending TLB invalidations to complete */
 255static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
 256{
 257	unsigned int count = 0;
 258
 259	while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
 260		cpu_relax();
 261		if (++count == TLB_LOOP_TIMEOUT) {
 262			dev_err_ratelimited(domain->mmu->dev,
 263			"TLB sync timed out -- MMU may be deadlocked\n");
 264			return;
 265		}
 266		udelay(1);
 267	}
 268}
 269
 270static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
 271{
 272	u32 reg;
 273
 274	reg = ipmmu_ctx_read_root(domain, IMCTR);
 275	reg |= IMCTR_FLUSH;
 276	ipmmu_ctx_write_all(domain, IMCTR, reg);
 277
 278	ipmmu_tlb_sync(domain);
 279}
 280
 281/*
 282 * Enable MMU translation for the microTLB.
 283 */
 284static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
 285			      unsigned int utlb)
 286{
 287	struct ipmmu_vmsa_device *mmu = domain->mmu;
 288
 289	/*
 290	 * TODO: Reference-count the microTLB as several bus masters can be
 291	 * connected to the same microTLB.
 292	 */
 293
 294	/* TODO: What should we set the ASID to ? */
 295	ipmmu_imuasid_write(mmu, utlb, 0);
 296	/* TODO: Do we need to flush the microTLB ? */
 297	ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
 298				      IMUCTR_FLUSH | IMUCTR_MMUEN);
 299	mmu->utlb_ctx[utlb] = domain->context_id;
 300}
 301
 302/*
 303 * Disable MMU translation for the microTLB.
 304 */
 305static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
 306			       unsigned int utlb)
 307{
 308	struct ipmmu_vmsa_device *mmu = domain->mmu;
 309
 310	ipmmu_imuctr_write(mmu, utlb, 0);
 311	mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
 312}
 313
 314static void ipmmu_tlb_flush_all(void *cookie)
 315{
 316	struct ipmmu_vmsa_domain *domain = cookie;
 317
 318	ipmmu_tlb_invalidate(domain);
 319}
 320
 321static void ipmmu_tlb_flush(unsigned long iova, size_t size,
 322				size_t granule, void *cookie)
 323{
 324	ipmmu_tlb_flush_all(cookie);
 325}
 326
 327static const struct iommu_flush_ops ipmmu_flush_ops = {
 328	.tlb_flush_all = ipmmu_tlb_flush_all,
 329	.tlb_flush_walk = ipmmu_tlb_flush,
 330};
 331
 332/* -----------------------------------------------------------------------------
 333 * Domain/Context Management
 334 */
 335
 336static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
 337					 struct ipmmu_vmsa_domain *domain)
 338{
 339	unsigned long flags;
 340	int ret;
 341
 342	spin_lock_irqsave(&mmu->lock, flags);
 343
 344	ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
 345	if (ret != mmu->num_ctx) {
 346		mmu->domains[ret] = domain;
 347		set_bit(ret, mmu->ctx);
 348	} else
 349		ret = -EBUSY;
 350
 351	spin_unlock_irqrestore(&mmu->lock, flags);
 352
 353	return ret;
 354}
 355
 356static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
 357				      unsigned int context_id)
 358{
 359	unsigned long flags;
 360
 361	spin_lock_irqsave(&mmu->lock, flags);
 362
 363	clear_bit(context_id, mmu->ctx);
 364	mmu->domains[context_id] = NULL;
 365
 366	spin_unlock_irqrestore(&mmu->lock, flags);
 367}
 368
 369static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
 370{
 371	u64 ttbr;
 372	u32 tmp;
 373
 374	/* TTBR0 */
 375	ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
 376	ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
 377	ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
 378
 379	/*
 380	 * TTBCR
 381	 * We use long descriptors and allocate the whole 32-bit VA space to
 382	 * TTBR0.
 383	 */
 384	if (domain->mmu->features->twobit_imttbcr_sl0)
 385		tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
 386	else
 387		tmp = IMTTBCR_SL0_LVL_1;
 388
 389	if (domain->mmu->features->cache_snoop)
 390		tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
 391		       IMTTBCR_IRGN0_WB_WA;
 392
 393	ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
 394
 395	/* MAIR0 */
 396	ipmmu_ctx_write_root(domain, IMMAIR0,
 397			     domain->cfg.arm_lpae_s1_cfg.mair);
 398
 399	/* IMBUSCR */
 400	if (domain->mmu->features->setup_imbuscr)
 401		ipmmu_ctx_write_root(domain, IMBUSCR,
 402				     ipmmu_ctx_read_root(domain, IMBUSCR) &
 403				     ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
 404
 405	/*
 406	 * IMSTR
 407	 * Clear all interrupt flags.
 408	 */
 409	ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
 410
 411	/*
 412	 * IMCTR
 413	 * Enable the MMU and interrupt generation. The long-descriptor
 414	 * translation table format doesn't use TEX remapping. Don't enable AF
 415	 * software management as we have no use for it. Flush the TLB as
 416	 * required when modifying the context registers.
 417	 */
 418	ipmmu_ctx_write_all(domain, IMCTR,
 419			    IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
 420}
 421
 422static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
 423{
 424	int ret;
 425
 426	/*
 427	 * Allocate the page table operations.
 428	 *
 429	 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
 430	 * access, Long-descriptor format" that the NStable bit being set in a
 431	 * table descriptor will result in the NStable and NS bits of all child
 432	 * entries being ignored and considered as being set. The IPMMU seems
 433	 * not to comply with this, as it generates a secure access page fault
 434	 * if any of the NStable and NS bits isn't set when running in
 435	 * non-secure mode.
 436	 */
 437	domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
 438	domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
 439	domain->cfg.ias = 32;
 440	domain->cfg.oas = 40;
 441	domain->cfg.tlb = &ipmmu_flush_ops;
 442	domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
 443	domain->io_domain.geometry.force_aperture = true;
 444	/*
 445	 * TODO: Add support for coherent walk through CCI with DVM and remove
 446	 * cache handling. For now, delegate it to the io-pgtable code.
 447	 */
 448	domain->cfg.coherent_walk = false;
 449	domain->cfg.iommu_dev = domain->mmu->root->dev;
 450
 451	/*
 452	 * Find an unused context.
 453	 */
 454	ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
 455	if (ret < 0)
 456		return ret;
 457
 458	domain->context_id = ret;
 459
 460	domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
 461					   domain);
 462	if (!domain->iop) {
 463		ipmmu_domain_free_context(domain->mmu->root,
 464					  domain->context_id);
 465		return -EINVAL;
 466	}
 467
 468	ipmmu_domain_setup_context(domain);
 469	return 0;
 470}
 471
 472static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
 473{
 474	if (!domain->mmu)
 475		return;
 476
 477	/*
 478	 * Disable the context. Flush the TLB as required when modifying the
 479	 * context registers.
 480	 *
 481	 * TODO: Is TLB flush really needed ?
 482	 */
 483	ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
 484	ipmmu_tlb_sync(domain);
 485	ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
 486}
 487
 488/* -----------------------------------------------------------------------------
 489 * Fault Handling
 490 */
 491
 492static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
 493{
 494	const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
 495	struct ipmmu_vmsa_device *mmu = domain->mmu;
 496	unsigned long iova;
 497	u32 status;
 498
 499	status = ipmmu_ctx_read_root(domain, IMSTR);
 500	if (!(status & err_mask))
 501		return IRQ_NONE;
 502
 503	iova = ipmmu_ctx_read_root(domain, IMELAR);
 504	if (IS_ENABLED(CONFIG_64BIT))
 505		iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
 506
 507	/*
 508	 * Clear the error status flags. Unlike traditional interrupt flag
 509	 * registers that must be cleared by writing 1, this status register
 510	 * seems to require 0. The error address register must be read before,
 511	 * otherwise its value will be 0.
 512	 */
 513	ipmmu_ctx_write_root(domain, IMSTR, 0);
 514
 515	/* Log fatal errors. */
 516	if (status & IMSTR_MHIT)
 517		dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
 518				    iova);
 519	if (status & IMSTR_ABORT)
 520		dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
 521				    iova);
 522
 523	if (!(status & (IMSTR_PF | IMSTR_TF)))
 524		return IRQ_NONE;
 525
 526	/*
 527	 * Try to handle page faults and translation faults.
 528	 *
 529	 * TODO: We need to look up the faulty device based on the I/O VA. Use
 530	 * the IOMMU device for now.
 531	 */
 532	if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
 533		return IRQ_HANDLED;
 534
 535	dev_err_ratelimited(mmu->dev,
 536			    "Unhandled fault: status 0x%08x iova 0x%lx\n",
 537			    status, iova);
 538
 539	return IRQ_HANDLED;
 540}
 541
 542static irqreturn_t ipmmu_irq(int irq, void *dev)
 543{
 544	struct ipmmu_vmsa_device *mmu = dev;
 545	irqreturn_t status = IRQ_NONE;
 546	unsigned int i;
 547	unsigned long flags;
 548
 549	spin_lock_irqsave(&mmu->lock, flags);
 550
 551	/*
 552	 * Check interrupts for all active contexts.
 553	 */
 554	for (i = 0; i < mmu->num_ctx; i++) {
 555		if (!mmu->domains[i])
 556			continue;
 557		if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
 558			status = IRQ_HANDLED;
 559	}
 560
 561	spin_unlock_irqrestore(&mmu->lock, flags);
 562
 563	return status;
 564}
 565
 566/* -----------------------------------------------------------------------------
 567 * IOMMU Operations
 568 */
 569
 570static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
 571{
 572	struct ipmmu_vmsa_domain *domain;
 573
 574	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
 575		return NULL;
 576
 577	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
 578	if (!domain)
 579		return NULL;
 580
 581	mutex_init(&domain->mutex);
 582
 583	return &domain->io_domain;
 584}
 585
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 586static void ipmmu_domain_free(struct iommu_domain *io_domain)
 587{
 588	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 589
 590	/*
 591	 * Free the domain resources. We assume that all devices have already
 592	 * been detached.
 593	 */
 
 594	ipmmu_domain_destroy_context(domain);
 595	free_io_pgtable_ops(domain->iop);
 596	kfree(domain);
 597}
 598
 599static int ipmmu_attach_device(struct iommu_domain *io_domain,
 600			       struct device *dev)
 601{
 602	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 603	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
 604	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 605	unsigned int i;
 606	int ret = 0;
 607
 608	if (!mmu) {
 609		dev_err(dev, "Cannot attach to IPMMU\n");
 610		return -ENXIO;
 611	}
 612
 613	mutex_lock(&domain->mutex);
 614
 615	if (!domain->mmu) {
 616		/* The domain hasn't been used yet, initialize it. */
 617		domain->mmu = mmu;
 618		ret = ipmmu_domain_init_context(domain);
 619		if (ret < 0) {
 620			dev_err(dev, "Unable to initialize IPMMU context\n");
 621			domain->mmu = NULL;
 622		} else {
 623			dev_info(dev, "Using IPMMU context %u\n",
 624				 domain->context_id);
 625		}
 626	} else if (domain->mmu != mmu) {
 627		/*
 628		 * Something is wrong, we can't attach two devices using
 629		 * different IOMMUs to the same domain.
 630		 */
 
 
 631		ret = -EINVAL;
 632	} else
 633		dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
 634
 635	mutex_unlock(&domain->mutex);
 636
 637	if (ret < 0)
 638		return ret;
 639
 640	for (i = 0; i < fwspec->num_ids; ++i)
 641		ipmmu_utlb_enable(domain, fwspec->ids[i]);
 642
 643	return 0;
 644}
 645
 646static void ipmmu_detach_device(struct iommu_domain *io_domain,
 647				struct device *dev)
 648{
 649	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 650	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 651	unsigned int i;
 652
 653	for (i = 0; i < fwspec->num_ids; ++i)
 654		ipmmu_utlb_disable(domain, fwspec->ids[i]);
 655
 656	/*
 657	 * TODO: Optimize by disabling the context when no device is attached.
 658	 */
 659}
 660
 661static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
 662		     phys_addr_t paddr, size_t pgsize, size_t pgcount,
 663		     int prot, gfp_t gfp, size_t *mapped)
 664{
 665	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 666
 667	return domain->iop->map_pages(domain->iop, iova, paddr, pgsize, pgcount,
 668				      prot, gfp, mapped);
 
 
 669}
 670
 671static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
 672			  size_t pgsize, size_t pgcount,
 673			  struct iommu_iotlb_gather *gather)
 674{
 675	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 676
 677	return domain->iop->unmap_pages(domain->iop, iova, pgsize, pgcount, gather);
 678}
 679
 680static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
 681{
 682	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 683
 684	if (domain->mmu)
 685		ipmmu_tlb_flush_all(domain);
 686}
 687
 688static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
 689			     struct iommu_iotlb_gather *gather)
 690{
 691	ipmmu_flush_iotlb_all(io_domain);
 692}
 693
 694static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
 695				      dma_addr_t iova)
 696{
 697	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 698
 699	/* TODO: Is locking needed ? */
 700
 701	return domain->iop->iova_to_phys(domain->iop, iova);
 702}
 703
 704static int ipmmu_init_platform_device(struct device *dev,
 705				      struct of_phandle_args *args)
 706{
 707	struct platform_device *ipmmu_pdev;
 708
 709	ipmmu_pdev = of_find_device_by_node(args->np);
 710	if (!ipmmu_pdev)
 711		return -ENODEV;
 712
 713	dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
 714
 715	return 0;
 716}
 717
 718static const struct soc_device_attribute soc_needs_opt_in[] = {
 719	{ .family = "R-Car Gen3", },
 720	{ .family = "R-Car Gen4", },
 721	{ .family = "RZ/G2", },
 722	{ /* sentinel */ }
 723};
 724
 725static const struct soc_device_attribute soc_denylist[] = {
 726	{ .soc_id = "r8a774a1", },
 727	{ .soc_id = "r8a7795", .revision = "ES1.*" },
 728	{ .soc_id = "r8a7795", .revision = "ES2.*" },
 729	{ .soc_id = "r8a7796", },
 730	{ /* sentinel */ }
 731};
 732
 733static const char * const devices_allowlist[] = {
 734	"ee100000.mmc",
 735	"ee120000.mmc",
 736	"ee140000.mmc",
 737	"ee160000.mmc"
 738};
 739
 740static bool ipmmu_device_is_allowed(struct device *dev)
 741{
 742	unsigned int i;
 743
 744	/*
 745	 * R-Car Gen3/4 and RZ/G2 use the allow list to opt-in devices.
 746	 * For Other SoCs, this returns true anyway.
 747	 */
 748	if (!soc_device_match(soc_needs_opt_in))
 749		return true;
 750
 751	/* Check whether this SoC can use the IPMMU correctly or not */
 752	if (soc_device_match(soc_denylist))
 753		return false;
 754
 755	/* Check whether this device can work with the IPMMU */
 756	for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) {
 757		if (!strcmp(dev_name(dev), devices_allowlist[i]))
 758			return true;
 759	}
 760
 761	/* Otherwise, do not allow use of IPMMU */
 762	return false;
 763}
 764
 765static int ipmmu_of_xlate(struct device *dev,
 766			  struct of_phandle_args *spec)
 767{
 768	if (!ipmmu_device_is_allowed(dev))
 769		return -ENODEV;
 770
 771	iommu_fwspec_add_ids(dev, spec->args, 1);
 772
 773	/* Initialize once - xlate() will call multiple times */
 774	if (to_ipmmu(dev))
 775		return 0;
 776
 777	return ipmmu_init_platform_device(dev, spec);
 778}
 779
 780static int ipmmu_init_arm_mapping(struct device *dev)
 781{
 782	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
 783	int ret;
 784
 785	/*
 786	 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
 787	 * VAs. This will allocate a corresponding IOMMU domain.
 788	 *
 789	 * TODO:
 790	 * - Create one mapping per context (TLB).
 791	 * - Make the mapping size configurable ? We currently use a 2GB mapping
 792	 *   at a 1GB offset to ensure that NULL VAs will fault.
 793	 */
 794	if (!mmu->mapping) {
 795		struct dma_iommu_mapping *mapping;
 796
 797		mapping = arm_iommu_create_mapping(&platform_bus_type,
 798						   SZ_1G, SZ_2G);
 799		if (IS_ERR(mapping)) {
 800			dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
 801			ret = PTR_ERR(mapping);
 802			goto error;
 803		}
 804
 805		mmu->mapping = mapping;
 806	}
 807
 808	/* Attach the ARM VA mapping to the device. */
 809	ret = arm_iommu_attach_device(dev, mmu->mapping);
 810	if (ret < 0) {
 811		dev_err(dev, "Failed to attach device to VA mapping\n");
 812		goto error;
 813	}
 814
 815	return 0;
 816
 817error:
 818	if (mmu->mapping)
 819		arm_iommu_release_mapping(mmu->mapping);
 820
 821	return ret;
 822}
 823
 824static struct iommu_device *ipmmu_probe_device(struct device *dev)
 825{
 826	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
 827
 828	/*
 829	 * Only let through devices that have been verified in xlate()
 830	 */
 831	if (!mmu)
 832		return ERR_PTR(-ENODEV);
 833
 834	return &mmu->iommu;
 835}
 836
 837static void ipmmu_probe_finalize(struct device *dev)
 838{
 839	int ret = 0;
 840
 841	if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
 842		ret = ipmmu_init_arm_mapping(dev);
 843
 844	if (ret)
 845		dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
 846}
 847
 848static void ipmmu_release_device(struct device *dev)
 849{
 850	arm_iommu_detach_device(dev);
 851}
 852
 853static struct iommu_group *ipmmu_find_group(struct device *dev)
 854{
 855	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
 856	struct iommu_group *group;
 857
 858	if (mmu->group)
 859		return iommu_group_ref_get(mmu->group);
 860
 861	group = iommu_group_alloc();
 862	if (!IS_ERR(group))
 863		mmu->group = group;
 864
 865	return group;
 866}
 867
 868static const struct iommu_ops ipmmu_ops = {
 869	.domain_alloc = ipmmu_domain_alloc,
 
 
 
 
 
 
 
 
 870	.probe_device = ipmmu_probe_device,
 871	.release_device = ipmmu_release_device,
 872	.probe_finalize = ipmmu_probe_finalize,
 873	.device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
 874			? generic_device_group : ipmmu_find_group,
 875	.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
 876	.of_xlate = ipmmu_of_xlate,
 877	.default_domain_ops = &(const struct iommu_domain_ops) {
 878		.attach_dev	= ipmmu_attach_device,
 879		.detach_dev	= ipmmu_detach_device,
 880		.map_pages	= ipmmu_map,
 881		.unmap_pages	= ipmmu_unmap,
 882		.flush_iotlb_all = ipmmu_flush_iotlb_all,
 883		.iotlb_sync	= ipmmu_iotlb_sync,
 884		.iova_to_phys	= ipmmu_iova_to_phys,
 885		.free		= ipmmu_domain_free,
 886	}
 887};
 888
 889/* -----------------------------------------------------------------------------
 890 * Probe/remove and init
 891 */
 892
 893static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
 894{
 895	unsigned int i;
 896
 897	/* Disable all contexts. */
 898	for (i = 0; i < mmu->num_ctx; ++i)
 899		ipmmu_ctx_write(mmu, i, IMCTR, 0);
 900}
 901
 902static const struct ipmmu_features ipmmu_features_default = {
 903	.use_ns_alias_offset = true,
 904	.has_cache_leaf_nodes = false,
 905	.number_of_contexts = 1, /* software only tested with one context */
 906	.num_utlbs = 32,
 907	.setup_imbuscr = true,
 908	.twobit_imttbcr_sl0 = false,
 909	.reserved_context = false,
 910	.cache_snoop = true,
 911	.ctx_offset_base = 0,
 912	.ctx_offset_stride = 0x40,
 913	.utlb_offset_base = 0,
 914};
 915
 916static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
 917	.use_ns_alias_offset = false,
 918	.has_cache_leaf_nodes = true,
 919	.number_of_contexts = 8,
 920	.num_utlbs = 48,
 921	.setup_imbuscr = false,
 922	.twobit_imttbcr_sl0 = true,
 923	.reserved_context = true,
 924	.cache_snoop = false,
 925	.ctx_offset_base = 0,
 926	.ctx_offset_stride = 0x40,
 927	.utlb_offset_base = 0,
 928};
 929
 930static const struct ipmmu_features ipmmu_features_rcar_gen4 = {
 931	.use_ns_alias_offset = false,
 932	.has_cache_leaf_nodes = true,
 933	.number_of_contexts = 16,
 934	.num_utlbs = 64,
 935	.setup_imbuscr = false,
 936	.twobit_imttbcr_sl0 = true,
 937	.reserved_context = true,
 938	.cache_snoop = false,
 939	.ctx_offset_base = 0x10000,
 940	.ctx_offset_stride = 0x1040,
 941	.utlb_offset_base = 0x3000,
 942};
 943
 944static const struct of_device_id ipmmu_of_ids[] = {
 945	{
 946		.compatible = "renesas,ipmmu-vmsa",
 947		.data = &ipmmu_features_default,
 948	}, {
 949		.compatible = "renesas,ipmmu-r8a774a1",
 950		.data = &ipmmu_features_rcar_gen3,
 951	}, {
 952		.compatible = "renesas,ipmmu-r8a774b1",
 953		.data = &ipmmu_features_rcar_gen3,
 954	}, {
 955		.compatible = "renesas,ipmmu-r8a774c0",
 956		.data = &ipmmu_features_rcar_gen3,
 957	}, {
 958		.compatible = "renesas,ipmmu-r8a774e1",
 959		.data = &ipmmu_features_rcar_gen3,
 960	}, {
 961		.compatible = "renesas,ipmmu-r8a7795",
 962		.data = &ipmmu_features_rcar_gen3,
 963	}, {
 964		.compatible = "renesas,ipmmu-r8a7796",
 965		.data = &ipmmu_features_rcar_gen3,
 966	}, {
 967		.compatible = "renesas,ipmmu-r8a77961",
 968		.data = &ipmmu_features_rcar_gen3,
 969	}, {
 970		.compatible = "renesas,ipmmu-r8a77965",
 971		.data = &ipmmu_features_rcar_gen3,
 972	}, {
 973		.compatible = "renesas,ipmmu-r8a77970",
 974		.data = &ipmmu_features_rcar_gen3,
 975	}, {
 976		.compatible = "renesas,ipmmu-r8a77980",
 977		.data = &ipmmu_features_rcar_gen3,
 978	}, {
 979		.compatible = "renesas,ipmmu-r8a77990",
 980		.data = &ipmmu_features_rcar_gen3,
 981	}, {
 982		.compatible = "renesas,ipmmu-r8a77995",
 983		.data = &ipmmu_features_rcar_gen3,
 984	}, {
 985		.compatible = "renesas,ipmmu-r8a779a0",
 986		.data = &ipmmu_features_rcar_gen4,
 987	}, {
 988		.compatible = "renesas,rcar-gen4-ipmmu-vmsa",
 989		.data = &ipmmu_features_rcar_gen4,
 990	}, {
 991		/* Terminator */
 992	},
 993};
 994
 995static int ipmmu_probe(struct platform_device *pdev)
 996{
 997	struct ipmmu_vmsa_device *mmu;
 998	struct resource *res;
 999	int irq;
1000	int ret;
1001
1002	mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
1003	if (!mmu) {
1004		dev_err(&pdev->dev, "cannot allocate device data\n");
1005		return -ENOMEM;
1006	}
1007
1008	mmu->dev = &pdev->dev;
1009	spin_lock_init(&mmu->lock);
1010	bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
1011	mmu->features = of_device_get_match_data(&pdev->dev);
1012	memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
1013	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1014	if (ret)
1015		return ret;
1016
1017	/* Map I/O memory and request IRQ. */
1018	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1019	mmu->base = devm_ioremap_resource(&pdev->dev, res);
1020	if (IS_ERR(mmu->base))
1021		return PTR_ERR(mmu->base);
1022
1023	/*
1024	 * The IPMMU has two register banks, for secure and non-secure modes.
1025	 * The bank mapped at the beginning of the IPMMU address space
1026	 * corresponds to the running mode of the CPU. When running in secure
1027	 * mode the non-secure register bank is also available at an offset.
1028	 *
1029	 * Secure mode operation isn't clearly documented and is thus currently
1030	 * not implemented in the driver. Furthermore, preliminary tests of
1031	 * non-secure operation with the main register bank were not successful.
1032	 * Offset the registers base unconditionally to point to the non-secure
1033	 * alias space for now.
1034	 */
1035	if (mmu->features->use_ns_alias_offset)
1036		mmu->base += IM_NS_ALIAS_OFFSET;
1037
1038	mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
1039
1040	/*
1041	 * Determine if this IPMMU instance is a root device by checking for
1042	 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
1043	 */
1044	if (!mmu->features->has_cache_leaf_nodes ||
1045	    !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
1046		mmu->root = mmu;
1047	else
1048		mmu->root = ipmmu_find_root();
1049
1050	/*
1051	 * Wait until the root device has been registered for sure.
1052	 */
1053	if (!mmu->root)
1054		return -EPROBE_DEFER;
1055
1056	/* Root devices have mandatory IRQs */
1057	if (ipmmu_is_root(mmu)) {
1058		irq = platform_get_irq(pdev, 0);
1059		if (irq < 0)
1060			return irq;
1061
1062		ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
1063				       dev_name(&pdev->dev), mmu);
1064		if (ret < 0) {
1065			dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1066			return ret;
1067		}
1068
1069		ipmmu_device_reset(mmu);
1070
1071		if (mmu->features->reserved_context) {
1072			dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
1073			set_bit(0, mmu->ctx);
1074		}
1075	}
1076
1077	/*
1078	 * Register the IPMMU to the IOMMU subsystem in the following cases:
1079	 * - R-Car Gen2 IPMMU (all devices registered)
1080	 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
1081	 */
1082	if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
1083		ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
1084					     dev_name(&pdev->dev));
1085		if (ret)
1086			return ret;
1087
1088		ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
1089		if (ret)
1090			return ret;
 
 
 
 
 
1091	}
1092
1093	/*
1094	 * We can't create the ARM mapping here as it requires the bus to have
1095	 * an IOMMU, which only happens when bus_set_iommu() is called in
1096	 * ipmmu_init() after the probe function returns.
1097	 */
1098
1099	platform_set_drvdata(pdev, mmu);
1100
1101	return 0;
1102}
1103
1104static int ipmmu_remove(struct platform_device *pdev)
1105{
1106	struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1107
1108	iommu_device_sysfs_remove(&mmu->iommu);
1109	iommu_device_unregister(&mmu->iommu);
1110
1111	arm_iommu_release_mapping(mmu->mapping);
1112
1113	ipmmu_device_reset(mmu);
1114
1115	return 0;
1116}
1117
1118#ifdef CONFIG_PM_SLEEP
1119static int ipmmu_resume_noirq(struct device *dev)
1120{
1121	struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
1122	unsigned int i;
1123
1124	/* Reset root MMU and restore contexts */
1125	if (ipmmu_is_root(mmu)) {
1126		ipmmu_device_reset(mmu);
1127
1128		for (i = 0; i < mmu->num_ctx; i++) {
1129			if (!mmu->domains[i])
1130				continue;
1131
1132			ipmmu_domain_setup_context(mmu->domains[i]);
1133		}
1134	}
1135
1136	/* Re-enable active micro-TLBs */
1137	for (i = 0; i < mmu->features->num_utlbs; i++) {
1138		if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
1139			continue;
1140
1141		ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
1142	}
1143
1144	return 0;
1145}
1146
1147static const struct dev_pm_ops ipmmu_pm  = {
1148	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
1149};
1150#define DEV_PM_OPS	&ipmmu_pm
1151#else
1152#define DEV_PM_OPS	NULL
1153#endif /* CONFIG_PM_SLEEP */
1154
1155static struct platform_driver ipmmu_driver = {
1156	.driver = {
1157		.name = "ipmmu-vmsa",
1158		.of_match_table = of_match_ptr(ipmmu_of_ids),
1159		.pm = DEV_PM_OPS,
1160	},
1161	.probe = ipmmu_probe,
1162	.remove	= ipmmu_remove,
1163};
1164builtin_platform_driver(ipmmu_driver);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * IOMMU API for Renesas VMSA-compatible IPMMU
   4 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
   5 *
   6 * Copyright (C) 2014-2020 Renesas Electronics Corporation
   7 */
   8
   9#include <linux/bitmap.h>
  10#include <linux/delay.h>
  11#include <linux/dma-iommu.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/err.h>
  14#include <linux/export.h>
  15#include <linux/init.h>
  16#include <linux/interrupt.h>
  17#include <linux/io.h>
  18#include <linux/io-pgtable.h>
  19#include <linux/iommu.h>
  20#include <linux/of.h>
  21#include <linux/of_device.h>
  22#include <linux/of_platform.h>
  23#include <linux/platform_device.h>
  24#include <linux/sizes.h>
  25#include <linux/slab.h>
  26#include <linux/sys_soc.h>
  27
  28#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
  29#include <asm/dma-iommu.h>
  30#else
  31#define arm_iommu_create_mapping(...)	NULL
  32#define arm_iommu_attach_device(...)	-ENODEV
  33#define arm_iommu_release_mapping(...)	do {} while (0)
  34#define arm_iommu_detach_device(...)	do {} while (0)
  35#endif
  36
  37#define IPMMU_CTX_MAX		8U
  38#define IPMMU_CTX_INVALID	-1
  39
  40#define IPMMU_UTLB_MAX		48U
  41
  42struct ipmmu_features {
  43	bool use_ns_alias_offset;
  44	bool has_cache_leaf_nodes;
  45	unsigned int number_of_contexts;
  46	unsigned int num_utlbs;
  47	bool setup_imbuscr;
  48	bool twobit_imttbcr_sl0;
  49	bool reserved_context;
  50	bool cache_snoop;
  51	unsigned int ctx_offset_base;
  52	unsigned int ctx_offset_stride;
  53	unsigned int utlb_offset_base;
  54};
  55
  56struct ipmmu_vmsa_device {
  57	struct device *dev;
  58	void __iomem *base;
  59	struct iommu_device iommu;
  60	struct ipmmu_vmsa_device *root;
  61	const struct ipmmu_features *features;
  62	unsigned int num_ctx;
  63	spinlock_t lock;			/* Protects ctx and domains[] */
  64	DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
  65	struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
  66	s8 utlb_ctx[IPMMU_UTLB_MAX];
  67
  68	struct iommu_group *group;
  69	struct dma_iommu_mapping *mapping;
  70};
  71
  72struct ipmmu_vmsa_domain {
  73	struct ipmmu_vmsa_device *mmu;
  74	struct iommu_domain io_domain;
  75
  76	struct io_pgtable_cfg cfg;
  77	struct io_pgtable_ops *iop;
  78
  79	unsigned int context_id;
  80	struct mutex mutex;			/* Protects mappings */
  81};
  82
  83static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
  84{
  85	return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
  86}
  87
  88static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
  89{
  90	return dev_iommu_priv_get(dev);
  91}
  92
  93#define TLB_LOOP_TIMEOUT		100	/* 100us */
  94
  95/* -----------------------------------------------------------------------------
  96 * Registers Definition
  97 */
  98
  99#define IM_NS_ALIAS_OFFSET		0x800
 100
 101/* MMU "context" registers */
 102#define IMCTR				0x0000		/* R-Car Gen2/3 */
 103#define IMCTR_INTEN			(1 << 2)	/* R-Car Gen2/3 */
 104#define IMCTR_FLUSH			(1 << 1)	/* R-Car Gen2/3 */
 105#define IMCTR_MMUEN			(1 << 0)	/* R-Car Gen2/3 */
 106
 107#define IMTTBCR				0x0008		/* R-Car Gen2/3 */
 108#define IMTTBCR_EAE			(1 << 31)	/* R-Car Gen2/3 */
 109#define IMTTBCR_SH0_INNER_SHAREABLE	(3 << 12)	/* R-Car Gen2 only */
 110#define IMTTBCR_ORGN0_WB_WA		(1 << 10)	/* R-Car Gen2 only */
 111#define IMTTBCR_IRGN0_WB_WA		(1 << 8)	/* R-Car Gen2 only */
 112#define IMTTBCR_SL0_TWOBIT_LVL_1	(2 << 6)	/* R-Car Gen3 only */
 113#define IMTTBCR_SL0_LVL_1		(1 << 4)	/* R-Car Gen2 only */
 114
 115#define IMBUSCR				0x000c		/* R-Car Gen2 only */
 116#define IMBUSCR_DVM			(1 << 2)	/* R-Car Gen2 only */
 117#define IMBUSCR_BUSSEL_MASK		(3 << 0)	/* R-Car Gen2 only */
 118
 119#define IMTTLBR0			0x0010		/* R-Car Gen2/3 */
 120#define IMTTUBR0			0x0014		/* R-Car Gen2/3 */
 121
 122#define IMSTR				0x0020		/* R-Car Gen2/3 */
 123#define IMSTR_MHIT			(1 << 4)	/* R-Car Gen2/3 */
 124#define IMSTR_ABORT			(1 << 2)	/* R-Car Gen2/3 */
 125#define IMSTR_PF			(1 << 1)	/* R-Car Gen2/3 */
 126#define IMSTR_TF			(1 << 0)	/* R-Car Gen2/3 */
 127
 128#define IMMAIR0				0x0028		/* R-Car Gen2/3 */
 129
 130#define IMELAR				0x0030		/* R-Car Gen2/3, IMEAR on R-Car Gen2 */
 131#define IMEUAR				0x0034		/* R-Car Gen3 only */
 132
 133/* uTLB registers */
 134#define IMUCTR(n)			((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
 135#define IMUCTR0(n)			(0x0300 + ((n) * 16))		/* R-Car Gen2/3 */
 136#define IMUCTR32(n)			(0x0600 + (((n) - 32) * 16))	/* R-Car Gen3 only */
 137#define IMUCTR_TTSEL_MMU(n)		((n) << 4)	/* R-Car Gen2/3 */
 138#define IMUCTR_FLUSH			(1 << 1)	/* R-Car Gen2/3 */
 139#define IMUCTR_MMUEN			(1 << 0)	/* R-Car Gen2/3 */
 140
 141#define IMUASID(n)			((n) < 32 ? IMUASID0(n) : IMUASID32(n))
 142#define IMUASID0(n)			(0x0308 + ((n) * 16))		/* R-Car Gen2/3 */
 143#define IMUASID32(n)			(0x0608 + (((n) - 32) * 16))	/* R-Car Gen3 only */
 144
 145/* -----------------------------------------------------------------------------
 146 * Root device handling
 147 */
 148
 149static struct platform_driver ipmmu_driver;
 150
 151static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
 152{
 153	return mmu->root == mmu;
 154}
 155
 156static int __ipmmu_check_device(struct device *dev, void *data)
 157{
 158	struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
 159	struct ipmmu_vmsa_device **rootp = data;
 160
 161	if (ipmmu_is_root(mmu))
 162		*rootp = mmu;
 163
 164	return 0;
 165}
 166
 167static struct ipmmu_vmsa_device *ipmmu_find_root(void)
 168{
 169	struct ipmmu_vmsa_device *root = NULL;
 170
 171	return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
 172				      __ipmmu_check_device) == 0 ? root : NULL;
 173}
 174
 175/* -----------------------------------------------------------------------------
 176 * Read/Write Access
 177 */
 178
 179static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
 180{
 181	return ioread32(mmu->base + offset);
 182}
 183
 184static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
 185			u32 data)
 186{
 187	iowrite32(data, mmu->base + offset);
 188}
 189
 190static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
 191				  unsigned int context_id, unsigned int reg)
 192{
 193	return mmu->features->ctx_offset_base +
 194	       context_id * mmu->features->ctx_offset_stride + reg;
 
 
 
 
 195}
 196
 197static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
 198			  unsigned int context_id, unsigned int reg)
 199{
 200	return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
 201}
 202
 203static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
 204			    unsigned int context_id, unsigned int reg, u32 data)
 205{
 206	ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
 207}
 208
 209static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
 210			       unsigned int reg)
 211{
 212	return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
 213}
 214
 215static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
 216				 unsigned int reg, u32 data)
 217{
 218	ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
 219}
 220
 221static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
 222				unsigned int reg, u32 data)
 223{
 224	if (domain->mmu != domain->mmu->root)
 225		ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
 226
 227	ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
 228}
 229
 230static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
 231{
 232	return mmu->features->utlb_offset_base + reg;
 233}
 234
 235static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
 236				unsigned int utlb, u32 data)
 237{
 238	ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
 239}
 240
 241static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
 242			       unsigned int utlb, u32 data)
 243{
 244	ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
 245}
 246
 247/* -----------------------------------------------------------------------------
 248 * TLB and microTLB Management
 249 */
 250
 251/* Wait for any pending TLB invalidations to complete */
 252static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
 253{
 254	unsigned int count = 0;
 255
 256	while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
 257		cpu_relax();
 258		if (++count == TLB_LOOP_TIMEOUT) {
 259			dev_err_ratelimited(domain->mmu->dev,
 260			"TLB sync timed out -- MMU may be deadlocked\n");
 261			return;
 262		}
 263		udelay(1);
 264	}
 265}
 266
 267static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
 268{
 269	u32 reg;
 270
 271	reg = ipmmu_ctx_read_root(domain, IMCTR);
 272	reg |= IMCTR_FLUSH;
 273	ipmmu_ctx_write_all(domain, IMCTR, reg);
 274
 275	ipmmu_tlb_sync(domain);
 276}
 277
 278/*
 279 * Enable MMU translation for the microTLB.
 280 */
 281static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
 282			      unsigned int utlb)
 283{
 284	struct ipmmu_vmsa_device *mmu = domain->mmu;
 285
 286	/*
 287	 * TODO: Reference-count the microTLB as several bus masters can be
 288	 * connected to the same microTLB.
 289	 */
 290
 291	/* TODO: What should we set the ASID to ? */
 292	ipmmu_imuasid_write(mmu, utlb, 0);
 293	/* TODO: Do we need to flush the microTLB ? */
 294	ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
 295				      IMUCTR_FLUSH | IMUCTR_MMUEN);
 296	mmu->utlb_ctx[utlb] = domain->context_id;
 297}
 298
 299/*
 300 * Disable MMU translation for the microTLB.
 301 */
 302static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
 303			       unsigned int utlb)
 304{
 305	struct ipmmu_vmsa_device *mmu = domain->mmu;
 306
 307	ipmmu_imuctr_write(mmu, utlb, 0);
 308	mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
 309}
 310
 311static void ipmmu_tlb_flush_all(void *cookie)
 312{
 313	struct ipmmu_vmsa_domain *domain = cookie;
 314
 315	ipmmu_tlb_invalidate(domain);
 316}
 317
 318static void ipmmu_tlb_flush(unsigned long iova, size_t size,
 319				size_t granule, void *cookie)
 320{
 321	ipmmu_tlb_flush_all(cookie);
 322}
 323
 324static const struct iommu_flush_ops ipmmu_flush_ops = {
 325	.tlb_flush_all = ipmmu_tlb_flush_all,
 326	.tlb_flush_walk = ipmmu_tlb_flush,
 327};
 328
 329/* -----------------------------------------------------------------------------
 330 * Domain/Context Management
 331 */
 332
 333static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
 334					 struct ipmmu_vmsa_domain *domain)
 335{
 336	unsigned long flags;
 337	int ret;
 338
 339	spin_lock_irqsave(&mmu->lock, flags);
 340
 341	ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
 342	if (ret != mmu->num_ctx) {
 343		mmu->domains[ret] = domain;
 344		set_bit(ret, mmu->ctx);
 345	} else
 346		ret = -EBUSY;
 347
 348	spin_unlock_irqrestore(&mmu->lock, flags);
 349
 350	return ret;
 351}
 352
 353static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
 354				      unsigned int context_id)
 355{
 356	unsigned long flags;
 357
 358	spin_lock_irqsave(&mmu->lock, flags);
 359
 360	clear_bit(context_id, mmu->ctx);
 361	mmu->domains[context_id] = NULL;
 362
 363	spin_unlock_irqrestore(&mmu->lock, flags);
 364}
 365
 366static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
 367{
 368	u64 ttbr;
 369	u32 tmp;
 370
 371	/* TTBR0 */
 372	ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
 373	ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
 374	ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
 375
 376	/*
 377	 * TTBCR
 378	 * We use long descriptors and allocate the whole 32-bit VA space to
 379	 * TTBR0.
 380	 */
 381	if (domain->mmu->features->twobit_imttbcr_sl0)
 382		tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
 383	else
 384		tmp = IMTTBCR_SL0_LVL_1;
 385
 386	if (domain->mmu->features->cache_snoop)
 387		tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
 388		       IMTTBCR_IRGN0_WB_WA;
 389
 390	ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
 391
 392	/* MAIR0 */
 393	ipmmu_ctx_write_root(domain, IMMAIR0,
 394			     domain->cfg.arm_lpae_s1_cfg.mair);
 395
 396	/* IMBUSCR */
 397	if (domain->mmu->features->setup_imbuscr)
 398		ipmmu_ctx_write_root(domain, IMBUSCR,
 399				     ipmmu_ctx_read_root(domain, IMBUSCR) &
 400				     ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
 401
 402	/*
 403	 * IMSTR
 404	 * Clear all interrupt flags.
 405	 */
 406	ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
 407
 408	/*
 409	 * IMCTR
 410	 * Enable the MMU and interrupt generation. The long-descriptor
 411	 * translation table format doesn't use TEX remapping. Don't enable AF
 412	 * software management as we have no use for it. Flush the TLB as
 413	 * required when modifying the context registers.
 414	 */
 415	ipmmu_ctx_write_all(domain, IMCTR,
 416			    IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
 417}
 418
 419static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
 420{
 421	int ret;
 422
 423	/*
 424	 * Allocate the page table operations.
 425	 *
 426	 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
 427	 * access, Long-descriptor format" that the NStable bit being set in a
 428	 * table descriptor will result in the NStable and NS bits of all child
 429	 * entries being ignored and considered as being set. The IPMMU seems
 430	 * not to comply with this, as it generates a secure access page fault
 431	 * if any of the NStable and NS bits isn't set when running in
 432	 * non-secure mode.
 433	 */
 434	domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
 435	domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
 436	domain->cfg.ias = 32;
 437	domain->cfg.oas = 40;
 438	domain->cfg.tlb = &ipmmu_flush_ops;
 439	domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
 440	domain->io_domain.geometry.force_aperture = true;
 441	/*
 442	 * TODO: Add support for coherent walk through CCI with DVM and remove
 443	 * cache handling. For now, delegate it to the io-pgtable code.
 444	 */
 445	domain->cfg.coherent_walk = false;
 446	domain->cfg.iommu_dev = domain->mmu->root->dev;
 447
 448	/*
 449	 * Find an unused context.
 450	 */
 451	ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
 452	if (ret < 0)
 453		return ret;
 454
 455	domain->context_id = ret;
 456
 457	domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
 458					   domain);
 459	if (!domain->iop) {
 460		ipmmu_domain_free_context(domain->mmu->root,
 461					  domain->context_id);
 462		return -EINVAL;
 463	}
 464
 465	ipmmu_domain_setup_context(domain);
 466	return 0;
 467}
 468
 469static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
 470{
 471	if (!domain->mmu)
 472		return;
 473
 474	/*
 475	 * Disable the context. Flush the TLB as required when modifying the
 476	 * context registers.
 477	 *
 478	 * TODO: Is TLB flush really needed ?
 479	 */
 480	ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
 481	ipmmu_tlb_sync(domain);
 482	ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
 483}
 484
 485/* -----------------------------------------------------------------------------
 486 * Fault Handling
 487 */
 488
 489static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
 490{
 491	const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
 492	struct ipmmu_vmsa_device *mmu = domain->mmu;
 493	unsigned long iova;
 494	u32 status;
 495
 496	status = ipmmu_ctx_read_root(domain, IMSTR);
 497	if (!(status & err_mask))
 498		return IRQ_NONE;
 499
 500	iova = ipmmu_ctx_read_root(domain, IMELAR);
 501	if (IS_ENABLED(CONFIG_64BIT))
 502		iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
 503
 504	/*
 505	 * Clear the error status flags. Unlike traditional interrupt flag
 506	 * registers that must be cleared by writing 1, this status register
 507	 * seems to require 0. The error address register must be read before,
 508	 * otherwise its value will be 0.
 509	 */
 510	ipmmu_ctx_write_root(domain, IMSTR, 0);
 511
 512	/* Log fatal errors. */
 513	if (status & IMSTR_MHIT)
 514		dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
 515				    iova);
 516	if (status & IMSTR_ABORT)
 517		dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
 518				    iova);
 519
 520	if (!(status & (IMSTR_PF | IMSTR_TF)))
 521		return IRQ_NONE;
 522
 523	/*
 524	 * Try to handle page faults and translation faults.
 525	 *
 526	 * TODO: We need to look up the faulty device based on the I/O VA. Use
 527	 * the IOMMU device for now.
 528	 */
 529	if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
 530		return IRQ_HANDLED;
 531
 532	dev_err_ratelimited(mmu->dev,
 533			    "Unhandled fault: status 0x%08x iova 0x%lx\n",
 534			    status, iova);
 535
 536	return IRQ_HANDLED;
 537}
 538
 539static irqreturn_t ipmmu_irq(int irq, void *dev)
 540{
 541	struct ipmmu_vmsa_device *mmu = dev;
 542	irqreturn_t status = IRQ_NONE;
 543	unsigned int i;
 544	unsigned long flags;
 545
 546	spin_lock_irqsave(&mmu->lock, flags);
 547
 548	/*
 549	 * Check interrupts for all active contexts.
 550	 */
 551	for (i = 0; i < mmu->num_ctx; i++) {
 552		if (!mmu->domains[i])
 553			continue;
 554		if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
 555			status = IRQ_HANDLED;
 556	}
 557
 558	spin_unlock_irqrestore(&mmu->lock, flags);
 559
 560	return status;
 561}
 562
 563/* -----------------------------------------------------------------------------
 564 * IOMMU Operations
 565 */
 566
 567static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
 568{
 569	struct ipmmu_vmsa_domain *domain;
 570
 
 
 
 571	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
 572	if (!domain)
 573		return NULL;
 574
 575	mutex_init(&domain->mutex);
 576
 577	return &domain->io_domain;
 578}
 579
 580static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
 581{
 582	struct iommu_domain *io_domain = NULL;
 583
 584	switch (type) {
 585	case IOMMU_DOMAIN_UNMANAGED:
 586		io_domain = __ipmmu_domain_alloc(type);
 587		break;
 588
 589	case IOMMU_DOMAIN_DMA:
 590		io_domain = __ipmmu_domain_alloc(type);
 591		if (io_domain && iommu_get_dma_cookie(io_domain)) {
 592			kfree(io_domain);
 593			io_domain = NULL;
 594		}
 595		break;
 596	}
 597
 598	return io_domain;
 599}
 600
 601static void ipmmu_domain_free(struct iommu_domain *io_domain)
 602{
 603	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 604
 605	/*
 606	 * Free the domain resources. We assume that all devices have already
 607	 * been detached.
 608	 */
 609	iommu_put_dma_cookie(io_domain);
 610	ipmmu_domain_destroy_context(domain);
 611	free_io_pgtable_ops(domain->iop);
 612	kfree(domain);
 613}
 614
 615static int ipmmu_attach_device(struct iommu_domain *io_domain,
 616			       struct device *dev)
 617{
 618	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 619	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
 620	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 621	unsigned int i;
 622	int ret = 0;
 623
 624	if (!mmu) {
 625		dev_err(dev, "Cannot attach to IPMMU\n");
 626		return -ENXIO;
 627	}
 628
 629	mutex_lock(&domain->mutex);
 630
 631	if (!domain->mmu) {
 632		/* The domain hasn't been used yet, initialize it. */
 633		domain->mmu = mmu;
 634		ret = ipmmu_domain_init_context(domain);
 635		if (ret < 0) {
 636			dev_err(dev, "Unable to initialize IPMMU context\n");
 637			domain->mmu = NULL;
 638		} else {
 639			dev_info(dev, "Using IPMMU context %u\n",
 640				 domain->context_id);
 641		}
 642	} else if (domain->mmu != mmu) {
 643		/*
 644		 * Something is wrong, we can't attach two devices using
 645		 * different IOMMUs to the same domain.
 646		 */
 647		dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
 648			dev_name(mmu->dev), dev_name(domain->mmu->dev));
 649		ret = -EINVAL;
 650	} else
 651		dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
 652
 653	mutex_unlock(&domain->mutex);
 654
 655	if (ret < 0)
 656		return ret;
 657
 658	for (i = 0; i < fwspec->num_ids; ++i)
 659		ipmmu_utlb_enable(domain, fwspec->ids[i]);
 660
 661	return 0;
 662}
 663
 664static void ipmmu_detach_device(struct iommu_domain *io_domain,
 665				struct device *dev)
 666{
 667	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 668	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 669	unsigned int i;
 670
 671	for (i = 0; i < fwspec->num_ids; ++i)
 672		ipmmu_utlb_disable(domain, fwspec->ids[i]);
 673
 674	/*
 675	 * TODO: Optimize by disabling the context when no device is attached.
 676	 */
 677}
 678
 679static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
 680		     phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 
 681{
 682	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 683
 684	if (!domain)
 685		return -ENODEV;
 686
 687	return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp);
 688}
 689
 690static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
 691			  size_t size, struct iommu_iotlb_gather *gather)
 
 692{
 693	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 694
 695	return domain->iop->unmap(domain->iop, iova, size, gather);
 696}
 697
 698static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
 699{
 700	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 701
 702	if (domain->mmu)
 703		ipmmu_tlb_flush_all(domain);
 704}
 705
 706static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
 707			     struct iommu_iotlb_gather *gather)
 708{
 709	ipmmu_flush_iotlb_all(io_domain);
 710}
 711
 712static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
 713				      dma_addr_t iova)
 714{
 715	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 716
 717	/* TODO: Is locking needed ? */
 718
 719	return domain->iop->iova_to_phys(domain->iop, iova);
 720}
 721
 722static int ipmmu_init_platform_device(struct device *dev,
 723				      struct of_phandle_args *args)
 724{
 725	struct platform_device *ipmmu_pdev;
 726
 727	ipmmu_pdev = of_find_device_by_node(args->np);
 728	if (!ipmmu_pdev)
 729		return -ENODEV;
 730
 731	dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
 732
 733	return 0;
 734}
 735
 736static const struct soc_device_attribute soc_needs_opt_in[] = {
 737	{ .family = "R-Car Gen3", },
 
 738	{ .family = "RZ/G2", },
 739	{ /* sentinel */ }
 740};
 741
 742static const struct soc_device_attribute soc_denylist[] = {
 743	{ .soc_id = "r8a774a1", },
 744	{ .soc_id = "r8a7795", .revision = "ES1.*" },
 745	{ .soc_id = "r8a7795", .revision = "ES2.*" },
 746	{ .soc_id = "r8a7796", },
 747	{ /* sentinel */ }
 748};
 749
 750static const char * const devices_allowlist[] = {
 751	"ee100000.mmc",
 752	"ee120000.mmc",
 753	"ee140000.mmc",
 754	"ee160000.mmc"
 755};
 756
 757static bool ipmmu_device_is_allowed(struct device *dev)
 758{
 759	unsigned int i;
 760
 761	/*
 762	 * R-Car Gen3 and RZ/G2 use the allow list to opt-in devices.
 763	 * For Other SoCs, this returns true anyway.
 764	 */
 765	if (!soc_device_match(soc_needs_opt_in))
 766		return true;
 767
 768	/* Check whether this SoC can use the IPMMU correctly or not */
 769	if (soc_device_match(soc_denylist))
 770		return false;
 771
 772	/* Check whether this device can work with the IPMMU */
 773	for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) {
 774		if (!strcmp(dev_name(dev), devices_allowlist[i]))
 775			return true;
 776	}
 777
 778	/* Otherwise, do not allow use of IPMMU */
 779	return false;
 780}
 781
 782static int ipmmu_of_xlate(struct device *dev,
 783			  struct of_phandle_args *spec)
 784{
 785	if (!ipmmu_device_is_allowed(dev))
 786		return -ENODEV;
 787
 788	iommu_fwspec_add_ids(dev, spec->args, 1);
 789
 790	/* Initialize once - xlate() will call multiple times */
 791	if (to_ipmmu(dev))
 792		return 0;
 793
 794	return ipmmu_init_platform_device(dev, spec);
 795}
 796
 797static int ipmmu_init_arm_mapping(struct device *dev)
 798{
 799	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
 800	int ret;
 801
 802	/*
 803	 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
 804	 * VAs. This will allocate a corresponding IOMMU domain.
 805	 *
 806	 * TODO:
 807	 * - Create one mapping per context (TLB).
 808	 * - Make the mapping size configurable ? We currently use a 2GB mapping
 809	 *   at a 1GB offset to ensure that NULL VAs will fault.
 810	 */
 811	if (!mmu->mapping) {
 812		struct dma_iommu_mapping *mapping;
 813
 814		mapping = arm_iommu_create_mapping(&platform_bus_type,
 815						   SZ_1G, SZ_2G);
 816		if (IS_ERR(mapping)) {
 817			dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
 818			ret = PTR_ERR(mapping);
 819			goto error;
 820		}
 821
 822		mmu->mapping = mapping;
 823	}
 824
 825	/* Attach the ARM VA mapping to the device. */
 826	ret = arm_iommu_attach_device(dev, mmu->mapping);
 827	if (ret < 0) {
 828		dev_err(dev, "Failed to attach device to VA mapping\n");
 829		goto error;
 830	}
 831
 832	return 0;
 833
 834error:
 835	if (mmu->mapping)
 836		arm_iommu_release_mapping(mmu->mapping);
 837
 838	return ret;
 839}
 840
 841static struct iommu_device *ipmmu_probe_device(struct device *dev)
 842{
 843	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
 844
 845	/*
 846	 * Only let through devices that have been verified in xlate()
 847	 */
 848	if (!mmu)
 849		return ERR_PTR(-ENODEV);
 850
 851	return &mmu->iommu;
 852}
 853
 854static void ipmmu_probe_finalize(struct device *dev)
 855{
 856	int ret = 0;
 857
 858	if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
 859		ret = ipmmu_init_arm_mapping(dev);
 860
 861	if (ret)
 862		dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
 863}
 864
 865static void ipmmu_release_device(struct device *dev)
 866{
 867	arm_iommu_detach_device(dev);
 868}
 869
 870static struct iommu_group *ipmmu_find_group(struct device *dev)
 871{
 872	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
 873	struct iommu_group *group;
 874
 875	if (mmu->group)
 876		return iommu_group_ref_get(mmu->group);
 877
 878	group = iommu_group_alloc();
 879	if (!IS_ERR(group))
 880		mmu->group = group;
 881
 882	return group;
 883}
 884
 885static const struct iommu_ops ipmmu_ops = {
 886	.domain_alloc = ipmmu_domain_alloc,
 887	.domain_free = ipmmu_domain_free,
 888	.attach_dev = ipmmu_attach_device,
 889	.detach_dev = ipmmu_detach_device,
 890	.map = ipmmu_map,
 891	.unmap = ipmmu_unmap,
 892	.flush_iotlb_all = ipmmu_flush_iotlb_all,
 893	.iotlb_sync = ipmmu_iotlb_sync,
 894	.iova_to_phys = ipmmu_iova_to_phys,
 895	.probe_device = ipmmu_probe_device,
 896	.release_device = ipmmu_release_device,
 897	.probe_finalize = ipmmu_probe_finalize,
 898	.device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
 899			? generic_device_group : ipmmu_find_group,
 900	.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
 901	.of_xlate = ipmmu_of_xlate,
 
 
 
 
 
 
 
 
 
 
 902};
 903
 904/* -----------------------------------------------------------------------------
 905 * Probe/remove and init
 906 */
 907
 908static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
 909{
 910	unsigned int i;
 911
 912	/* Disable all contexts. */
 913	for (i = 0; i < mmu->num_ctx; ++i)
 914		ipmmu_ctx_write(mmu, i, IMCTR, 0);
 915}
 916
 917static const struct ipmmu_features ipmmu_features_default = {
 918	.use_ns_alias_offset = true,
 919	.has_cache_leaf_nodes = false,
 920	.number_of_contexts = 1, /* software only tested with one context */
 921	.num_utlbs = 32,
 922	.setup_imbuscr = true,
 923	.twobit_imttbcr_sl0 = false,
 924	.reserved_context = false,
 925	.cache_snoop = true,
 926	.ctx_offset_base = 0,
 927	.ctx_offset_stride = 0x40,
 928	.utlb_offset_base = 0,
 929};
 930
 931static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
 932	.use_ns_alias_offset = false,
 933	.has_cache_leaf_nodes = true,
 934	.number_of_contexts = 8,
 935	.num_utlbs = 48,
 936	.setup_imbuscr = false,
 937	.twobit_imttbcr_sl0 = true,
 938	.reserved_context = true,
 939	.cache_snoop = false,
 940	.ctx_offset_base = 0,
 941	.ctx_offset_stride = 0x40,
 942	.utlb_offset_base = 0,
 943};
 944
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 945static const struct of_device_id ipmmu_of_ids[] = {
 946	{
 947		.compatible = "renesas,ipmmu-vmsa",
 948		.data = &ipmmu_features_default,
 949	}, {
 950		.compatible = "renesas,ipmmu-r8a774a1",
 951		.data = &ipmmu_features_rcar_gen3,
 952	}, {
 953		.compatible = "renesas,ipmmu-r8a774b1",
 954		.data = &ipmmu_features_rcar_gen3,
 955	}, {
 956		.compatible = "renesas,ipmmu-r8a774c0",
 957		.data = &ipmmu_features_rcar_gen3,
 958	}, {
 959		.compatible = "renesas,ipmmu-r8a774e1",
 960		.data = &ipmmu_features_rcar_gen3,
 961	}, {
 962		.compatible = "renesas,ipmmu-r8a7795",
 963		.data = &ipmmu_features_rcar_gen3,
 964	}, {
 965		.compatible = "renesas,ipmmu-r8a7796",
 966		.data = &ipmmu_features_rcar_gen3,
 967	}, {
 968		.compatible = "renesas,ipmmu-r8a77961",
 969		.data = &ipmmu_features_rcar_gen3,
 970	}, {
 971		.compatible = "renesas,ipmmu-r8a77965",
 972		.data = &ipmmu_features_rcar_gen3,
 973	}, {
 974		.compatible = "renesas,ipmmu-r8a77970",
 975		.data = &ipmmu_features_rcar_gen3,
 976	}, {
 
 
 
 977		.compatible = "renesas,ipmmu-r8a77990",
 978		.data = &ipmmu_features_rcar_gen3,
 979	}, {
 980		.compatible = "renesas,ipmmu-r8a77995",
 981		.data = &ipmmu_features_rcar_gen3,
 982	}, {
 
 
 
 
 
 
 983		/* Terminator */
 984	},
 985};
 986
 987static int ipmmu_probe(struct platform_device *pdev)
 988{
 989	struct ipmmu_vmsa_device *mmu;
 990	struct resource *res;
 991	int irq;
 992	int ret;
 993
 994	mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
 995	if (!mmu) {
 996		dev_err(&pdev->dev, "cannot allocate device data\n");
 997		return -ENOMEM;
 998	}
 999
1000	mmu->dev = &pdev->dev;
1001	spin_lock_init(&mmu->lock);
1002	bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
1003	mmu->features = of_device_get_match_data(&pdev->dev);
1004	memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
1005	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
 
 
1006
1007	/* Map I/O memory and request IRQ. */
1008	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1009	mmu->base = devm_ioremap_resource(&pdev->dev, res);
1010	if (IS_ERR(mmu->base))
1011		return PTR_ERR(mmu->base);
1012
1013	/*
1014	 * The IPMMU has two register banks, for secure and non-secure modes.
1015	 * The bank mapped at the beginning of the IPMMU address space
1016	 * corresponds to the running mode of the CPU. When running in secure
1017	 * mode the non-secure register bank is also available at an offset.
1018	 *
1019	 * Secure mode operation isn't clearly documented and is thus currently
1020	 * not implemented in the driver. Furthermore, preliminary tests of
1021	 * non-secure operation with the main register bank were not successful.
1022	 * Offset the registers base unconditionally to point to the non-secure
1023	 * alias space for now.
1024	 */
1025	if (mmu->features->use_ns_alias_offset)
1026		mmu->base += IM_NS_ALIAS_OFFSET;
1027
1028	mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
1029
1030	/*
1031	 * Determine if this IPMMU instance is a root device by checking for
1032	 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
1033	 */
1034	if (!mmu->features->has_cache_leaf_nodes ||
1035	    !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
1036		mmu->root = mmu;
1037	else
1038		mmu->root = ipmmu_find_root();
1039
1040	/*
1041	 * Wait until the root device has been registered for sure.
1042	 */
1043	if (!mmu->root)
1044		return -EPROBE_DEFER;
1045
1046	/* Root devices have mandatory IRQs */
1047	if (ipmmu_is_root(mmu)) {
1048		irq = platform_get_irq(pdev, 0);
1049		if (irq < 0)
1050			return irq;
1051
1052		ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
1053				       dev_name(&pdev->dev), mmu);
1054		if (ret < 0) {
1055			dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1056			return ret;
1057		}
1058
1059		ipmmu_device_reset(mmu);
1060
1061		if (mmu->features->reserved_context) {
1062			dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
1063			set_bit(0, mmu->ctx);
1064		}
1065	}
1066
1067	/*
1068	 * Register the IPMMU to the IOMMU subsystem in the following cases:
1069	 * - R-Car Gen2 IPMMU (all devices registered)
1070	 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
1071	 */
1072	if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
1073		ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
1074					     dev_name(&pdev->dev));
1075		if (ret)
1076			return ret;
1077
1078		ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
1079		if (ret)
1080			return ret;
1081
1082#if defined(CONFIG_IOMMU_DMA)
1083		if (!iommu_present(&platform_bus_type))
1084			bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1085#endif
1086	}
1087
1088	/*
1089	 * We can't create the ARM mapping here as it requires the bus to have
1090	 * an IOMMU, which only happens when bus_set_iommu() is called in
1091	 * ipmmu_init() after the probe function returns.
1092	 */
1093
1094	platform_set_drvdata(pdev, mmu);
1095
1096	return 0;
1097}
1098
1099static int ipmmu_remove(struct platform_device *pdev)
1100{
1101	struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1102
1103	iommu_device_sysfs_remove(&mmu->iommu);
1104	iommu_device_unregister(&mmu->iommu);
1105
1106	arm_iommu_release_mapping(mmu->mapping);
1107
1108	ipmmu_device_reset(mmu);
1109
1110	return 0;
1111}
1112
1113#ifdef CONFIG_PM_SLEEP
1114static int ipmmu_resume_noirq(struct device *dev)
1115{
1116	struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
1117	unsigned int i;
1118
1119	/* Reset root MMU and restore contexts */
1120	if (ipmmu_is_root(mmu)) {
1121		ipmmu_device_reset(mmu);
1122
1123		for (i = 0; i < mmu->num_ctx; i++) {
1124			if (!mmu->domains[i])
1125				continue;
1126
1127			ipmmu_domain_setup_context(mmu->domains[i]);
1128		}
1129	}
1130
1131	/* Re-enable active micro-TLBs */
1132	for (i = 0; i < mmu->features->num_utlbs; i++) {
1133		if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
1134			continue;
1135
1136		ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
1137	}
1138
1139	return 0;
1140}
1141
1142static const struct dev_pm_ops ipmmu_pm  = {
1143	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
1144};
1145#define DEV_PM_OPS	&ipmmu_pm
1146#else
1147#define DEV_PM_OPS	NULL
1148#endif /* CONFIG_PM_SLEEP */
1149
1150static struct platform_driver ipmmu_driver = {
1151	.driver = {
1152		.name = "ipmmu-vmsa",
1153		.of_match_table = of_match_ptr(ipmmu_of_ids),
1154		.pm = DEV_PM_OPS,
1155	},
1156	.probe = ipmmu_probe,
1157	.remove	= ipmmu_remove,
1158};
1159
1160static int __init ipmmu_init(void)
1161{
1162	struct device_node *np;
1163	static bool setup_done;
1164	int ret;
1165
1166	if (setup_done)
1167		return 0;
1168
1169	np = of_find_matching_node(NULL, ipmmu_of_ids);
1170	if (!np)
1171		return 0;
1172
1173	of_node_put(np);
1174
1175	ret = platform_driver_register(&ipmmu_driver);
1176	if (ret < 0)
1177		return ret;
1178
1179#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
1180	if (!iommu_present(&platform_bus_type))
1181		bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1182#endif
1183
1184	setup_done = true;
1185	return 0;
1186}
1187subsys_initcall(ipmmu_init);