Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/* pci_sun4v.c: SUN4V specific PCI controller support.
   3 *
   4 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/types.h>
   9#include <linux/pci.h>
  10#include <linux/init.h>
  11#include <linux/slab.h>
  12#include <linux/interrupt.h>
  13#include <linux/percpu.h>
  14#include <linux/irq.h>
  15#include <linux/msi.h>
  16#include <linux/export.h>
  17#include <linux/log2.h>
  18#include <linux/of_device.h>
  19#include <linux/dma-map-ops.h>
  20#include <asm/iommu-common.h>
  21
  22#include <asm/iommu.h>
  23#include <asm/irq.h>
  24#include <asm/hypervisor.h>
  25#include <asm/prom.h>
  26
  27#include "pci_impl.h"
  28#include "iommu_common.h"
  29#include "kernel.h"
  30
  31#include "pci_sun4v.h"
  32
  33#define DRIVER_NAME	"pci_sun4v"
  34#define PFX		DRIVER_NAME ": "
  35
  36static unsigned long vpci_major;
  37static unsigned long vpci_minor;
  38
  39struct vpci_version {
  40	unsigned long major;
  41	unsigned long minor;
  42};
  43
  44/* Ordered from largest major to lowest */
  45static struct vpci_version vpci_versions[] = {
  46	{ .major = 2, .minor = 0 },
  47	{ .major = 1, .minor = 1 },
  48};
  49
  50static unsigned long vatu_major = 1;
  51static unsigned long vatu_minor = 1;
  52
  53#define PGLIST_NENTS	(PAGE_SIZE / sizeof(u64))
  54
  55struct iommu_batch {
  56	struct device	*dev;		/* Device mapping is for.	*/
  57	unsigned long	prot;		/* IOMMU page protections	*/
  58	unsigned long	entry;		/* Index into IOTSB.		*/
  59	u64		*pglist;	/* List of physical pages	*/
  60	unsigned long	npages;		/* Number of pages in list.	*/
  61};
  62
  63static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
  64static int iommu_batch_initialized;
  65
  66/* Interrupts must be disabled.  */
  67static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
  68{
  69	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
  70
  71	p->dev		= dev;
  72	p->prot		= prot;
  73	p->entry	= entry;
  74	p->npages	= 0;
  75}
  76
  77static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
  78{
  79	return iommu->atu && mask > DMA_BIT_MASK(32);
  80}
  81
  82/* Interrupts must be disabled.  */
  83static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
  84{
  85	struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
  86	u64 *pglist = p->pglist;
  87	u64 index_count;
  88	unsigned long devhandle = pbm->devhandle;
  89	unsigned long prot = p->prot;
  90	unsigned long entry = p->entry;
 
  91	unsigned long npages = p->npages;
  92	unsigned long iotsb_num;
  93	unsigned long ret;
  94	long num;
  95
  96	/* VPCI maj=1, min=[0,1] only supports read and write */
  97	if (vpci_major < 2)
  98		prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
  99
 100	while (npages != 0) {
 101		if (!iommu_use_atu(pbm->iommu, mask)) {
 102			num = pci_sun4v_iommu_map(devhandle,
 103						  HV_PCI_TSBID(0, entry),
 104						  npages,
 105						  prot,
 106						  __pa(pglist));
 107			if (unlikely(num < 0)) {
 108				pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
 109						   __func__,
 110						   devhandle,
 111						   HV_PCI_TSBID(0, entry),
 112						   npages, prot, __pa(pglist),
 113						   num);
 114				return -1;
 115			}
 116		} else {
 117			index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
 118			iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
 119			ret = pci_sun4v_iotsb_map(devhandle,
 120						  iotsb_num,
 121						  index_count,
 122						  prot,
 123						  __pa(pglist),
 124						  &num);
 125			if (unlikely(ret != HV_EOK)) {
 126				pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
 127						   __func__,
 128						   devhandle, iotsb_num,
 129						   index_count, prot,
 130						   __pa(pglist), ret);
 131				return -1;
 132			}
 133		}
 
 134		entry += num;
 135		npages -= num;
 136		pglist += num;
 137	}
 138
 139	p->entry = entry;
 140	p->npages = 0;
 141
 142	return 0;
 143}
 144
 145static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
 146{
 147	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 148
 149	if (p->entry + p->npages == entry)
 150		return;
 151	if (p->entry != ~0UL)
 152		iommu_batch_flush(p, mask);
 153	p->entry = entry;
 154}
 155
 156/* Interrupts must be disabled.  */
 157static inline long iommu_batch_add(u64 phys_page, u64 mask)
 158{
 159	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 160
 161	BUG_ON(p->npages >= PGLIST_NENTS);
 162
 163	p->pglist[p->npages++] = phys_page;
 164	if (p->npages == PGLIST_NENTS)
 165		return iommu_batch_flush(p, mask);
 166
 167	return 0;
 168}
 169
 170/* Interrupts must be disabled.  */
 171static inline long iommu_batch_end(u64 mask)
 172{
 173	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 174
 175	BUG_ON(p->npages >= PGLIST_NENTS);
 176
 177	return iommu_batch_flush(p, mask);
 178}
 179
 180static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
 181				   dma_addr_t *dma_addrp, gfp_t gfp,
 182				   unsigned long attrs)
 183{
 184	u64 mask;
 185	unsigned long flags, order, first_page, npages, n;
 186	unsigned long prot = 0;
 187	struct iommu *iommu;
 188	struct iommu_map_table *tbl;
 189	struct page *page;
 190	void *ret;
 191	long entry;
 192	int nid;
 193
 194	size = IO_PAGE_ALIGN(size);
 195	order = get_order(size);
 196	if (unlikely(order >= MAX_ORDER))
 197		return NULL;
 198
 199	npages = size >> IO_PAGE_SHIFT;
 200
 201	if (attrs & DMA_ATTR_WEAK_ORDERING)
 202		prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
 203
 204	nid = dev->archdata.numa_node;
 205	page = alloc_pages_node(nid, gfp, order);
 206	if (unlikely(!page))
 207		return NULL;
 208
 209	first_page = (unsigned long) page_address(page);
 210	memset((char *)first_page, 0, PAGE_SIZE << order);
 211
 212	iommu = dev->archdata.iommu;
 213	mask = dev->coherent_dma_mask;
 214	if (!iommu_use_atu(iommu, mask))
 215		tbl = &iommu->tbl;
 216	else
 217		tbl = &iommu->atu->tbl;
 218
 219	entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
 220				      (unsigned long)(-1), 0);
 
 221
 222	if (unlikely(entry == IOMMU_ERROR_CODE))
 223		goto range_alloc_fail;
 224
 225	*dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
 
 226	ret = (void *) first_page;
 227	first_page = __pa(first_page);
 228
 229	local_irq_save(flags);
 230
 231	iommu_batch_start(dev,
 232			  (HV_PCI_MAP_ATTR_READ | prot |
 233			   HV_PCI_MAP_ATTR_WRITE),
 234			  entry);
 235
 236	for (n = 0; n < npages; n++) {
 237		long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
 238		if (unlikely(err < 0L))
 239			goto iommu_map_fail;
 240	}
 241
 242	if (unlikely(iommu_batch_end(mask) < 0L))
 243		goto iommu_map_fail;
 244
 245	local_irq_restore(flags);
 246
 247	return ret;
 248
 249iommu_map_fail:
 250	local_irq_restore(flags);
 251	iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
 
 
 252
 253range_alloc_fail:
 254	free_pages(first_page, order);
 255	return NULL;
 256}
 257
 258unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
 259				unsigned long iotsb_num,
 260				struct pci_bus *bus_dev)
 261{
 262	struct pci_dev *pdev;
 263	unsigned long err;
 264	unsigned int bus;
 265	unsigned int device;
 266	unsigned int fun;
 267
 268	list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
 269		if (pdev->subordinate) {
 270			/* No need to bind pci bridge */
 271			dma_4v_iotsb_bind(devhandle, iotsb_num,
 272					  pdev->subordinate);
 273		} else {
 274			bus = bus_dev->number;
 275			device = PCI_SLOT(pdev->devfn);
 276			fun = PCI_FUNC(pdev->devfn);
 277			err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
 278						   HV_PCI_DEVICE_BUILD(bus,
 279								       device,
 280								       fun));
 281
 282			/* If bind fails for one device it is going to fail
 283			 * for rest of the devices because we are sharing
 284			 * IOTSB. So in case of failure simply return with
 285			 * error.
 286			 */
 287			if (err)
 288				return err;
 289		}
 290	}
 291
 292	return 0;
 293}
 294
 295static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
 296			       dma_addr_t dvma, unsigned long iotsb_num,
 297			       unsigned long entry, unsigned long npages)
 298{
 299	unsigned long num, flags;
 300	unsigned long ret;
 301
 302	local_irq_save(flags);
 303	do {
 304		if (dvma <= DMA_BIT_MASK(32)) {
 305			num = pci_sun4v_iommu_demap(devhandle,
 306						    HV_PCI_TSBID(0, entry),
 307						    npages);
 308		} else {
 309			ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
 310						    entry, npages, &num);
 311			if (unlikely(ret != HV_EOK)) {
 312				pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
 313						   ret);
 314			}
 315		}
 316		entry += num;
 317		npages -= num;
 318	} while (npages != 0);
 319	local_irq_restore(flags);
 320}
 321
 322static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
 323				 dma_addr_t dvma, unsigned long attrs)
 324{
 325	struct pci_pbm_info *pbm;
 326	struct iommu *iommu;
 327	struct atu *atu;
 328	struct iommu_map_table *tbl;
 329	unsigned long order, npages, entry;
 330	unsigned long iotsb_num;
 331	u32 devhandle;
 332
 333	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
 334	iommu = dev->archdata.iommu;
 335	pbm = dev->archdata.host_controller;
 336	atu = iommu->atu;
 337	devhandle = pbm->devhandle;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 338
 339	if (!iommu_use_atu(iommu, dvma)) {
 340		tbl = &iommu->tbl;
 341		iotsb_num = 0; /* we don't care for legacy iommu */
 342	} else {
 343		tbl = &atu->tbl;
 344		iotsb_num = atu->iotsb->iotsb_num;
 345	}
 346	entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
 347	dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
 348	iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
 349	order = get_order(size);
 350	if (order < 10)
 351		free_pages((unsigned long)cpu, order);
 352}
 353
 354static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
 355				  unsigned long offset, size_t sz,
 356				  enum dma_data_direction direction,
 357				  unsigned long attrs)
 358{
 359	struct iommu *iommu;
 360	struct atu *atu;
 361	struct iommu_map_table *tbl;
 362	u64 mask;
 363	unsigned long flags, npages, oaddr;
 364	unsigned long i, base_paddr;
 
 365	unsigned long prot;
 366	dma_addr_t bus_addr, ret;
 367	long entry;
 368
 369	iommu = dev->archdata.iommu;
 370	atu = iommu->atu;
 371
 372	if (unlikely(direction == DMA_NONE))
 373		goto bad;
 374
 375	oaddr = (unsigned long)(page_address(page) + offset);
 376	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
 377	npages >>= IO_PAGE_SHIFT;
 378
 379	mask = *dev->dma_mask;
 380	if (!iommu_use_atu(iommu, mask))
 381		tbl = &iommu->tbl;
 382	else
 383		tbl = &atu->tbl;
 384
 385	entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
 386				      (unsigned long)(-1), 0);
 387
 388	if (unlikely(entry == IOMMU_ERROR_CODE))
 389		goto bad;
 390
 391	bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
 
 392	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
 393	base_paddr = __pa(oaddr & IO_PAGE_MASK);
 394	prot = HV_PCI_MAP_ATTR_READ;
 395	if (direction != DMA_TO_DEVICE)
 396		prot |= HV_PCI_MAP_ATTR_WRITE;
 397
 398	if (attrs & DMA_ATTR_WEAK_ORDERING)
 399		prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
 400
 401	local_irq_save(flags);
 402
 403	iommu_batch_start(dev, prot, entry);
 404
 405	for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
 406		long err = iommu_batch_add(base_paddr, mask);
 407		if (unlikely(err < 0L))
 408			goto iommu_map_fail;
 409	}
 410	if (unlikely(iommu_batch_end(mask) < 0L))
 411		goto iommu_map_fail;
 412
 413	local_irq_restore(flags);
 414
 415	return ret;
 416
 417bad:
 418	if (printk_ratelimit())
 419		WARN_ON(1);
 420	return DMA_MAPPING_ERROR;
 421
 422iommu_map_fail:
 423	local_irq_restore(flags);
 424	iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
 425	return DMA_MAPPING_ERROR;
 
 
 
 426}
 427
 428static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
 429			      size_t sz, enum dma_data_direction direction,
 430			      unsigned long attrs)
 431{
 432	struct pci_pbm_info *pbm;
 433	struct iommu *iommu;
 434	struct atu *atu;
 435	struct iommu_map_table *tbl;
 436	unsigned long npages;
 437	unsigned long iotsb_num;
 438	long entry;
 439	u32 devhandle;
 440
 441	if (unlikely(direction == DMA_NONE)) {
 442		if (printk_ratelimit())
 443			WARN_ON(1);
 444		return;
 445	}
 446
 447	iommu = dev->archdata.iommu;
 448	pbm = dev->archdata.host_controller;
 449	atu = iommu->atu;
 450	devhandle = pbm->devhandle;
 451
 452	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
 453	npages >>= IO_PAGE_SHIFT;
 454	bus_addr &= IO_PAGE_MASK;
 455
 456	if (bus_addr <= DMA_BIT_MASK(32)) {
 457		iotsb_num = 0; /* we don't care for legacy iommu */
 458		tbl = &iommu->tbl;
 459	} else {
 460		iotsb_num = atu->iotsb->iotsb_num;
 461		tbl = &atu->tbl;
 462	}
 463	entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
 464	dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
 465	iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
 
 
 
 
 
 466}
 467
 468static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 469			 int nelems, enum dma_data_direction direction,
 470			 unsigned long attrs)
 471{
 472	struct scatterlist *s, *outs, *segstart;
 473	unsigned long flags, handle, prot;
 474	dma_addr_t dma_next = 0, dma_addr;
 475	unsigned int max_seg_size;
 476	unsigned long seg_boundary_size;
 477	int outcount, incount, i;
 478	struct iommu *iommu;
 479	struct atu *atu;
 480	struct iommu_map_table *tbl;
 481	u64 mask;
 482	unsigned long base_shift;
 483	long err;
 484
 485	BUG_ON(direction == DMA_NONE);
 486
 487	iommu = dev->archdata.iommu;
 488	if (nelems == 0 || !iommu)
 489		return -EINVAL;
 490	atu = iommu->atu;
 491
 492	prot = HV_PCI_MAP_ATTR_READ;
 493	if (direction != DMA_TO_DEVICE)
 494		prot |= HV_PCI_MAP_ATTR_WRITE;
 495
 496	if (attrs & DMA_ATTR_WEAK_ORDERING)
 497		prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
 498
 499	outs = s = segstart = &sglist[0];
 500	outcount = 1;
 501	incount = nelems;
 502	handle = 0;
 503
 504	/* Init first segment length for backout at failure */
 505	outs->dma_length = 0;
 506
 507	local_irq_save(flags);
 508
 509	iommu_batch_start(dev, prot, ~0UL);
 510
 511	max_seg_size = dma_get_max_seg_size(dev);
 512	seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
 513
 514	mask = *dev->dma_mask;
 515	if (!iommu_use_atu(iommu, mask))
 516		tbl = &iommu->tbl;
 517	else
 518		tbl = &atu->tbl;
 519
 520	base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
 521
 522	for_each_sg(sglist, s, nelems, i) {
 523		unsigned long paddr, npages, entry, out_entry = 0, slen;
 524
 525		slen = s->length;
 526		/* Sanity check */
 527		if (slen == 0) {
 528			dma_next = 0;
 529			continue;
 530		}
 531		/* Allocate iommu entries for that segment */
 532		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
 533		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
 534		entry = iommu_tbl_range_alloc(dev, tbl, npages,
 535					      &handle, (unsigned long)(-1), 0);
 536
 537		/* Handle failure */
 538		if (unlikely(entry == IOMMU_ERROR_CODE)) {
 539			pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
 540					   tbl, paddr, npages);
 
 541			goto iommu_map_failed;
 542		}
 543
 544		iommu_batch_new_entry(entry, mask);
 545
 546		/* Convert entry to a dma_addr_t */
 547		dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
 
 548		dma_addr |= (s->offset & ~IO_PAGE_MASK);
 549
 550		/* Insert into HW table */
 551		paddr &= IO_PAGE_MASK;
 552		while (npages--) {
 553			err = iommu_batch_add(paddr, mask);
 554			if (unlikely(err < 0L))
 555				goto iommu_map_failed;
 556			paddr += IO_PAGE_SIZE;
 557		}
 558
 559		/* If we are in an open segment, try merging */
 560		if (segstart != s) {
 561			/* We cannot merge if:
 562			 * - allocated dma_addr isn't contiguous to previous allocation
 563			 */
 564			if ((dma_addr != dma_next) ||
 565			    (outs->dma_length + s->length > max_seg_size) ||
 566			    (is_span_boundary(out_entry, base_shift,
 567					      seg_boundary_size, outs, s))) {
 568				/* Can't merge: create a new segment */
 569				segstart = s;
 570				outcount++;
 571				outs = sg_next(outs);
 572			} else {
 573				outs->dma_length += s->length;
 574			}
 575		}
 576
 577		if (segstart == s) {
 578			/* This is a new segment, fill entries */
 579			outs->dma_address = dma_addr;
 580			outs->dma_length = slen;
 581			out_entry = entry;
 582		}
 583
 584		/* Calculate next page pointer for contiguous check */
 585		dma_next = dma_addr + slen;
 586	}
 587
 588	err = iommu_batch_end(mask);
 589
 590	if (unlikely(err < 0L))
 591		goto iommu_map_failed;
 592
 593	local_irq_restore(flags);
 594
 595	if (outcount < incount) {
 596		outs = sg_next(outs);
 
 597		outs->dma_length = 0;
 598	}
 599
 600	return outcount;
 601
 602iommu_map_failed:
 603	for_each_sg(sglist, s, nelems, i) {
 604		if (s->dma_length != 0) {
 605			unsigned long vaddr, npages;
 606
 607			vaddr = s->dma_address & IO_PAGE_MASK;
 608			npages = iommu_num_pages(s->dma_address, s->dma_length,
 609						 IO_PAGE_SIZE);
 610			iommu_tbl_range_free(tbl, vaddr, npages,
 611					     IOMMU_ERROR_CODE);
 612			/* XXX demap? XXX */
 
 613			s->dma_length = 0;
 614		}
 615		if (s == outs)
 616			break;
 617	}
 618	local_irq_restore(flags);
 619
 620	return -EINVAL;
 621}
 622
 623static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
 624			    int nelems, enum dma_data_direction direction,
 625			    unsigned long attrs)
 626{
 627	struct pci_pbm_info *pbm;
 628	struct scatterlist *sg;
 629	struct iommu *iommu;
 630	struct atu *atu;
 631	unsigned long flags, entry;
 632	unsigned long iotsb_num;
 633	u32 devhandle;
 634
 635	BUG_ON(direction == DMA_NONE);
 636
 637	iommu = dev->archdata.iommu;
 638	pbm = dev->archdata.host_controller;
 639	atu = iommu->atu;
 640	devhandle = pbm->devhandle;
 641	
 642	local_irq_save(flags);
 643
 644	sg = sglist;
 645	while (nelems--) {
 646		dma_addr_t dma_handle = sg->dma_address;
 647		unsigned int len = sg->dma_length;
 648		unsigned long npages;
 649		struct iommu_map_table *tbl;
 650		unsigned long shift = IO_PAGE_SHIFT;
 651
 652		if (!len)
 653			break;
 654		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
 
 
 
 
 
 655
 656		if (dma_handle <= DMA_BIT_MASK(32)) {
 657			iotsb_num = 0; /* we don't care for legacy iommu */
 658			tbl = &iommu->tbl;
 659		} else {
 660			iotsb_num = atu->iotsb->iotsb_num;
 661			tbl = &atu->tbl;
 662		}
 663		entry = ((dma_handle - tbl->table_map_base) >> shift);
 664		dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
 665				   entry, npages);
 666		iommu_tbl_range_free(tbl, dma_handle, npages,
 667				     IOMMU_ERROR_CODE);
 668		sg = sg_next(sg);
 669	}
 670
 671	local_irq_restore(flags);
 672}
 673
 674static int dma_4v_supported(struct device *dev, u64 device_mask)
 675{
 676	struct iommu *iommu = dev->archdata.iommu;
 677
 678	if (ali_sound_dma_hack(dev, device_mask))
 679		return 1;
 680	if (device_mask < iommu->dma_addr_mask)
 681		return 0;
 682	return 1;
 683}
 684
 685static const struct dma_map_ops sun4v_dma_ops = {
 686	.alloc				= dma_4v_alloc_coherent,
 687	.free				= dma_4v_free_coherent,
 688	.map_page			= dma_4v_map_page,
 689	.unmap_page			= dma_4v_unmap_page,
 690	.map_sg				= dma_4v_map_sg,
 691	.unmap_sg			= dma_4v_unmap_sg,
 692	.dma_supported			= dma_4v_supported,
 693};
 694
 695static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
 
 696{
 697	struct property *prop;
 698	struct device_node *dp;
 699
 700	dp = pbm->op->dev.of_node;
 701	prop = of_find_property(dp, "66mhz-capable", NULL);
 702	pbm->is_66mhz_capable = (prop != NULL);
 703	pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
 704
 705	/* XXX register error interrupt handlers XXX */
 706}
 707
 708static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
 709					    struct iommu_map_table *iommu)
 710{
 711	struct iommu_pool *pool;
 712	unsigned long i, pool_nr, cnt = 0;
 713	u32 devhandle;
 714
 715	devhandle = pbm->devhandle;
 716	for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
 717		pool = &(iommu->pools[pool_nr]);
 718		for (i = pool->start; i <= pool->end; i++) {
 719			unsigned long ret, io_attrs, ra;
 720
 721			ret = pci_sun4v_iommu_getmap(devhandle,
 722						     HV_PCI_TSBID(0, i),
 723						     &io_attrs, &ra);
 724			if (ret == HV_EOK) {
 725				if (page_in_phys_avail(ra)) {
 726					pci_sun4v_iommu_demap(devhandle,
 727							      HV_PCI_TSBID(0,
 728							      i), 1);
 729				} else {
 730					cnt++;
 731					__set_bit(i, iommu->map);
 732				}
 733			}
 734		}
 735	}
 736	return cnt;
 737}
 738
 739static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
 740{
 741	struct atu *atu = pbm->iommu->atu;
 742	struct atu_iotsb *iotsb;
 743	void *table;
 744	u64 table_size;
 745	u64 iotsb_num;
 746	unsigned long order;
 747	unsigned long err;
 748
 749	iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
 750	if (!iotsb) {
 751		err = -ENOMEM;
 752		goto out_err;
 753	}
 754	atu->iotsb = iotsb;
 755
 756	/* calculate size of IOTSB */
 757	table_size = (atu->size / IO_PAGE_SIZE) * 8;
 758	order = get_order(table_size);
 759	table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 760	if (!table) {
 761		err = -ENOMEM;
 762		goto table_failed;
 763	}
 764	iotsb->table = table;
 765	iotsb->ra = __pa(table);
 766	iotsb->dvma_size = atu->size;
 767	iotsb->dvma_base = atu->base;
 768	iotsb->table_size = table_size;
 769	iotsb->page_size = IO_PAGE_SIZE;
 770
 771	/* configure and register IOTSB with HV */
 772	err = pci_sun4v_iotsb_conf(pbm->devhandle,
 773				   iotsb->ra,
 774				   iotsb->table_size,
 775				   iotsb->page_size,
 776				   iotsb->dvma_base,
 777				   &iotsb_num);
 778	if (err) {
 779		pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
 780		goto iotsb_conf_failed;
 781	}
 782	iotsb->iotsb_num = iotsb_num;
 783
 784	err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
 785	if (err) {
 786		pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
 787		goto iotsb_conf_failed;
 788	}
 789
 790	return 0;
 791
 792iotsb_conf_failed:
 793	free_pages((unsigned long)table, order);
 794table_failed:
 795	kfree(iotsb);
 796out_err:
 797	return err;
 798}
 799
 800static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
 801{
 802	struct atu *atu = pbm->iommu->atu;
 803	unsigned long err;
 804	const u64 *ranges;
 805	u64 map_size, num_iotte;
 806	u64 dma_mask;
 807	const u32 *page_size;
 808	int len;
 809
 810	ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
 811				 &len);
 812	if (!ranges) {
 813		pr_err(PFX "No iommu-address-ranges\n");
 814		return -EINVAL;
 815	}
 816
 817	page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
 818				    NULL);
 819	if (!page_size) {
 820		pr_err(PFX "No iommu-pagesizes\n");
 821		return -EINVAL;
 822	}
 823
 824	/* There are 4 iommu-address-ranges supported. Each range is pair of
 825	 * {base, size}. The ranges[0] and ranges[1] are 32bit address space
 826	 * while ranges[2] and ranges[3] are 64bit space.  We want to use 64bit
 827	 * address ranges to support 64bit addressing. Because 'size' for
 828	 * address ranges[2] and ranges[3] are same we can select either of
 829	 * ranges[2] or ranges[3] for mapping. However due to 'size' is too
 830	 * large for OS to allocate IOTSB we are using fix size 32G
 831	 * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
 832	 * to share.
 833	 */
 834	atu->ranges = (struct atu_ranges *)ranges;
 835	atu->base = atu->ranges[3].base;
 836	atu->size = ATU_64_SPACE_SIZE;
 837
 838	/* Create IOTSB */
 839	err = pci_sun4v_atu_alloc_iotsb(pbm);
 840	if (err) {
 841		pr_err(PFX "Error creating ATU IOTSB\n");
 842		return err;
 843	}
 844
 845	/* Create ATU iommu map.
 846	 * One bit represents one iotte in IOTSB table.
 847	 */
 848	dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
 849	num_iotte = atu->size / IO_PAGE_SIZE;
 850	map_size = num_iotte / 8;
 851	atu->tbl.table_map_base = atu->base;
 852	atu->dma_addr_mask = dma_mask;
 853	atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
 854	if (!atu->tbl.map)
 855		return -ENOMEM;
 856
 857	iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
 858			    NULL, false /* no large_pool */,
 859			    0 /* default npools */,
 860			    false /* want span boundary checking */);
 861
 862	return 0;
 863}
 864
 865static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
 866{
 867	static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
 868	struct iommu *iommu = pbm->iommu;
 869	unsigned long num_tsb_entries, sz;
 870	u32 dma_mask, dma_offset;
 871	const u32 *vdma;
 872
 873	vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
 874	if (!vdma)
 875		vdma = vdma_default;
 876
 877	if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
 878		printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
 879		       vdma[0], vdma[1]);
 880		return -EINVAL;
 881	}
 882
 883	dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
 884	num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
 885
 886	dma_offset = vdma[0];
 887
 888	/* Setup initial software IOMMU state. */
 889	spin_lock_init(&iommu->lock);
 890	iommu->ctx_lowest_free = 1;
 891	iommu->tbl.table_map_base = dma_offset;
 892	iommu->dma_addr_mask = dma_mask;
 893
 894	/* Allocate and initialize the free area map.  */
 895	sz = (num_tsb_entries + 7) / 8;
 896	sz = (sz + 7UL) & ~7UL;
 897	iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
 898	if (!iommu->tbl.map) {
 899		printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
 900		return -ENOMEM;
 901	}
 902	iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
 903			    NULL, false /* no large_pool */,
 904			    0 /* default npools */,
 905			    false /* want span boundary checking */);
 906	sz = probe_existing_entries(pbm, &iommu->tbl);
 907	if (sz)
 908		printk("%s: Imported %lu TSB entries from OBP\n",
 909		       pbm->name, sz);
 910
 911	return 0;
 912}
 913
 914#ifdef CONFIG_PCI_MSI
 915struct pci_sun4v_msiq_entry {
 916	u64		version_type;
 917#define MSIQ_VERSION_MASK		0xffffffff00000000UL
 918#define MSIQ_VERSION_SHIFT		32
 919#define MSIQ_TYPE_MASK			0x00000000000000ffUL
 920#define MSIQ_TYPE_SHIFT			0
 921#define MSIQ_TYPE_NONE			0x00
 922#define MSIQ_TYPE_MSG			0x01
 923#define MSIQ_TYPE_MSI32			0x02
 924#define MSIQ_TYPE_MSI64			0x03
 925#define MSIQ_TYPE_INTX			0x08
 926#define MSIQ_TYPE_NONE2			0xff
 927
 928	u64		intx_sysino;
 929	u64		reserved1;
 930	u64		stick;
 931	u64		req_id;  /* bus/device/func */
 932#define MSIQ_REQID_BUS_MASK		0xff00UL
 933#define MSIQ_REQID_BUS_SHIFT		8
 934#define MSIQ_REQID_DEVICE_MASK		0x00f8UL
 935#define MSIQ_REQID_DEVICE_SHIFT		3
 936#define MSIQ_REQID_FUNC_MASK		0x0007UL
 937#define MSIQ_REQID_FUNC_SHIFT		0
 938
 939	u64		msi_address;
 940
 941	/* The format of this value is message type dependent.
 942	 * For MSI bits 15:0 are the data from the MSI packet.
 943	 * For MSI-X bits 31:0 are the data from the MSI packet.
 944	 * For MSG, the message code and message routing code where:
 945	 * 	bits 39:32 is the bus/device/fn of the msg target-id
 946	 *	bits 18:16 is the message routing code
 947	 *	bits 7:0 is the message code
 948	 * For INTx the low order 2-bits are:
 949	 *	00 - INTA
 950	 *	01 - INTB
 951	 *	10 - INTC
 952	 *	11 - INTD
 953	 */
 954	u64		msi_data;
 955
 956	u64		reserved2;
 957};
 958
 959static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
 960			      unsigned long *head)
 961{
 962	unsigned long err, limit;
 963
 964	err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
 965	if (unlikely(err))
 966		return -ENXIO;
 967
 968	limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 969	if (unlikely(*head >= limit))
 970		return -EFBIG;
 971
 972	return 0;
 973}
 974
 975static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
 976				 unsigned long msiqid, unsigned long *head,
 977				 unsigned long *msi)
 978{
 979	struct pci_sun4v_msiq_entry *ep;
 980	unsigned long err, type;
 981
 982	/* Note: void pointer arithmetic, 'head' is a byte offset  */
 983	ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
 984				 (pbm->msiq_ent_count *
 985				  sizeof(struct pci_sun4v_msiq_entry))) +
 986	      *head);
 987
 988	if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
 989		return 0;
 990
 991	type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
 992	if (unlikely(type != MSIQ_TYPE_MSI32 &&
 993		     type != MSIQ_TYPE_MSI64))
 994		return -EINVAL;
 995
 996	*msi = ep->msi_data;
 997
 998	err = pci_sun4v_msi_setstate(pbm->devhandle,
 999				     ep->msi_data /* msi_num */,
1000				     HV_MSISTATE_IDLE);
1001	if (unlikely(err))
1002		return -ENXIO;
1003
1004	/* Clear the entry.  */
1005	ep->version_type &= ~MSIQ_TYPE_MASK;
1006
1007	(*head) += sizeof(struct pci_sun4v_msiq_entry);
1008	if (*head >=
1009	    (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
1010		*head = 0;
1011
1012	return 1;
1013}
1014
1015static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
1016			      unsigned long head)
1017{
1018	unsigned long err;
1019
1020	err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
1021	if (unlikely(err))
1022		return -EINVAL;
1023
1024	return 0;
1025}
1026
1027static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
1028			       unsigned long msi, int is_msi64)
1029{
1030	if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
1031				  (is_msi64 ?
1032				   HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
1033		return -ENXIO;
1034	if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
1035		return -ENXIO;
1036	if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
1037		return -ENXIO;
1038	return 0;
1039}
1040
1041static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
1042{
1043	unsigned long err, msiqid;
1044
1045	err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
1046	if (err)
1047		return -ENXIO;
1048
1049	pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
1050
1051	return 0;
1052}
1053
1054static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
1055{
1056	unsigned long q_size, alloc_size, pages, order;
1057	int i;
1058
1059	q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1060	alloc_size = (pbm->msiq_num * q_size);
1061	order = get_order(alloc_size);
1062	pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
1063	if (pages == 0UL) {
1064		printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
1065		       order);
1066		return -ENOMEM;
1067	}
1068	memset((char *)pages, 0, PAGE_SIZE << order);
1069	pbm->msi_queues = (void *) pages;
1070
1071	for (i = 0; i < pbm->msiq_num; i++) {
1072		unsigned long err, base = __pa(pages + (i * q_size));
1073		unsigned long ret1, ret2;
1074
1075		err = pci_sun4v_msiq_conf(pbm->devhandle,
1076					  pbm->msiq_first + i,
1077					  base, pbm->msiq_ent_count);
1078		if (err) {
1079			printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
1080			       err);
1081			goto h_error;
1082		}
1083
1084		err = pci_sun4v_msiq_info(pbm->devhandle,
1085					  pbm->msiq_first + i,
1086					  &ret1, &ret2);
1087		if (err) {
1088			printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
1089			       err);
1090			goto h_error;
1091		}
1092		if (ret1 != base || ret2 != pbm->msiq_ent_count) {
1093			printk(KERN_ERR "MSI: Bogus qconf "
1094			       "expected[%lx:%x] got[%lx:%lx]\n",
1095			       base, pbm->msiq_ent_count,
1096			       ret1, ret2);
1097			goto h_error;
1098		}
1099	}
1100
1101	return 0;
1102
1103h_error:
1104	free_pages(pages, order);
1105	return -EINVAL;
1106}
1107
1108static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
1109{
1110	unsigned long q_size, alloc_size, pages, order;
1111	int i;
1112
1113	for (i = 0; i < pbm->msiq_num; i++) {
1114		unsigned long msiqid = pbm->msiq_first + i;
1115
1116		(void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
1117	}
1118
1119	q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1120	alloc_size = (pbm->msiq_num * q_size);
1121	order = get_order(alloc_size);
1122
1123	pages = (unsigned long) pbm->msi_queues;
1124
1125	free_pages(pages, order);
1126
1127	pbm->msi_queues = NULL;
1128}
1129
1130static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
1131				    unsigned long msiqid,
1132				    unsigned long devino)
1133{
1134	unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
1135
1136	if (!irq)
1137		return -ENOMEM;
1138
1139	if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
1140		return -EINVAL;
1141	if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
1142		return -EINVAL;
1143
1144	return irq;
1145}
1146
1147static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
1148	.get_head	=	pci_sun4v_get_head,
1149	.dequeue_msi	=	pci_sun4v_dequeue_msi,
1150	.set_head	=	pci_sun4v_set_head,
1151	.msi_setup	=	pci_sun4v_msi_setup,
1152	.msi_teardown	=	pci_sun4v_msi_teardown,
1153	.msiq_alloc	=	pci_sun4v_msiq_alloc,
1154	.msiq_free	=	pci_sun4v_msiq_free,
1155	.msiq_build_irq	=	pci_sun4v_msiq_build_irq,
1156};
1157
1158static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1159{
1160	sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
1161}
1162#else /* CONFIG_PCI_MSI */
1163static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1164{
1165}
1166#endif /* !(CONFIG_PCI_MSI) */
1167
1168static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
1169			      struct platform_device *op, u32 devhandle)
1170{
1171	struct device_node *dp = op->dev.of_node;
1172	int err;
1173
1174	pbm->numa_node = of_node_to_nid(dp);
1175
1176	pbm->pci_ops = &sun4v_pci_ops;
1177	pbm->config_space_reg_bits = 12;
1178
1179	pbm->index = pci_num_pbms++;
1180
1181	pbm->op = op;
1182
1183	pbm->devhandle = devhandle;
1184
1185	pbm->name = dp->full_name;
1186
1187	printk("%s: SUN4V PCI Bus Module\n", pbm->name);
1188	printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
1189
1190	pci_determine_mem_io_space(pbm);
1191
1192	pci_get_pbm_props(pbm);
1193
1194	err = pci_sun4v_iommu_init(pbm);
1195	if (err)
1196		return err;
1197
1198	pci_sun4v_msi_init(pbm);
1199
1200	pci_sun4v_scan_bus(pbm, &op->dev);
1201
1202	/* if atu_init fails its not complete failure.
1203	 * we can still continue using legacy iommu.
1204	 */
1205	if (pbm->iommu->atu) {
1206		err = pci_sun4v_atu_init(pbm);
1207		if (err) {
1208			kfree(pbm->iommu->atu);
1209			pbm->iommu->atu = NULL;
1210			pr_err(PFX "ATU init failed, err=%d\n", err);
1211		}
1212	}
1213
1214	pbm->next = pci_pbm_root;
1215	pci_pbm_root = pbm;
1216
1217	return 0;
1218}
1219
1220static int pci_sun4v_probe(struct platform_device *op)
1221{
1222	const struct linux_prom64_registers *regs;
1223	static int hvapi_negotiated = 0;
1224	struct pci_pbm_info *pbm;
1225	struct device_node *dp;
1226	struct iommu *iommu;
1227	struct atu *atu;
1228	u32 devhandle;
1229	int i, err = -ENODEV;
1230	static bool hv_atu = true;
1231
1232	dp = op->dev.of_node;
1233
1234	if (!hvapi_negotiated++) {
1235		for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
1236			vpci_major = vpci_versions[i].major;
1237			vpci_minor = vpci_versions[i].minor;
1238
1239			err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
1240						   &vpci_minor);
1241			if (!err)
1242				break;
1243		}
1244
1245		if (err) {
1246			pr_err(PFX "Could not register hvapi, err=%d\n", err);
 
1247			return err;
1248		}
1249		pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
1250			vpci_major, vpci_minor);
1251
1252		err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
1253		if (err) {
1254			/* don't return an error if we fail to register the
1255			 * ATU group, but ATU hcalls won't be available.
1256			 */
1257			hv_atu = false;
1258		} else {
1259			pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
1260				vatu_major, vatu_minor);
1261		}
1262
1263		dma_ops = &sun4v_dma_ops;
1264	}
1265
1266	regs = of_get_property(dp, "reg", NULL);
1267	err = -ENODEV;
1268	if (!regs) {
1269		printk(KERN_ERR PFX "Could not find config registers\n");
1270		goto out_err;
1271	}
1272	devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1273
1274	err = -ENOMEM;
1275	if (!iommu_batch_initialized) {
1276		for_each_possible_cpu(i) {
1277			unsigned long page = get_zeroed_page(GFP_KERNEL);
1278
1279			if (!page)
1280				goto out_err;
1281
1282			per_cpu(iommu_batch, i).pglist = (u64 *) page;
1283		}
1284		iommu_batch_initialized = 1;
1285	}
1286
1287	pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
1288	if (!pbm) {
1289		printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
1290		goto out_err;
1291	}
1292
1293	iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
1294	if (!iommu) {
1295		printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
1296		goto out_free_controller;
1297	}
1298
1299	pbm->iommu = iommu;
1300	iommu->atu = NULL;
1301	if (hv_atu) {
1302		atu = kzalloc(sizeof(*atu), GFP_KERNEL);
1303		if (!atu)
1304			pr_err(PFX "Could not allocate atu\n");
1305		else
1306			iommu->atu = atu;
1307	}
1308
1309	err = pci_sun4v_pbm_init(pbm, op, devhandle);
1310	if (err)
1311		goto out_free_iommu;
1312
1313	dev_set_drvdata(&op->dev, pbm);
1314
1315	return 0;
1316
1317out_free_iommu:
1318	kfree(iommu->atu);
1319	kfree(pbm->iommu);
1320
1321out_free_controller:
1322	kfree(pbm);
1323
1324out_err:
1325	return err;
1326}
1327
1328static const struct of_device_id pci_sun4v_match[] = {
1329	{
1330		.name = "pci",
1331		.compatible = "SUNW,sun4v-pci",
1332	},
1333	{},
1334};
1335
1336static struct platform_driver pci_sun4v_driver = {
1337	.driver = {
1338		.name = DRIVER_NAME,
 
1339		.of_match_table = pci_sun4v_match,
1340	},
1341	.probe		= pci_sun4v_probe,
1342};
1343
1344static int __init pci_sun4v_init(void)
1345{
1346	return platform_driver_register(&pci_sun4v_driver);
1347}
1348
1349subsys_initcall(pci_sun4v_init);
v3.5.6
 
   1/* pci_sun4v.c: SUN4V specific PCI controller support.
   2 *
   3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/types.h>
   8#include <linux/pci.h>
   9#include <linux/init.h>
  10#include <linux/slab.h>
  11#include <linux/interrupt.h>
  12#include <linux/percpu.h>
  13#include <linux/irq.h>
  14#include <linux/msi.h>
  15#include <linux/export.h>
  16#include <linux/log2.h>
  17#include <linux/of_device.h>
 
 
  18
  19#include <asm/iommu.h>
  20#include <asm/irq.h>
  21#include <asm/hypervisor.h>
  22#include <asm/prom.h>
  23
  24#include "pci_impl.h"
  25#include "iommu_common.h"
 
  26
  27#include "pci_sun4v.h"
  28
  29#define DRIVER_NAME	"pci_sun4v"
  30#define PFX		DRIVER_NAME ": "
  31
  32static unsigned long vpci_major = 1;
  33static unsigned long vpci_minor = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34
  35#define PGLIST_NENTS	(PAGE_SIZE / sizeof(u64))
  36
  37struct iommu_batch {
  38	struct device	*dev;		/* Device mapping is for.	*/
  39	unsigned long	prot;		/* IOMMU page protections	*/
  40	unsigned long	entry;		/* Index into IOTSB.		*/
  41	u64		*pglist;	/* List of physical pages	*/
  42	unsigned long	npages;		/* Number of pages in list.	*/
  43};
  44
  45static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
  46static int iommu_batch_initialized;
  47
  48/* Interrupts must be disabled.  */
  49static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
  50{
  51	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
  52
  53	p->dev		= dev;
  54	p->prot		= prot;
  55	p->entry	= entry;
  56	p->npages	= 0;
  57}
  58
 
 
 
 
 
  59/* Interrupts must be disabled.  */
  60static long iommu_batch_flush(struct iommu_batch *p)
  61{
  62	struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
 
 
  63	unsigned long devhandle = pbm->devhandle;
  64	unsigned long prot = p->prot;
  65	unsigned long entry = p->entry;
  66	u64 *pglist = p->pglist;
  67	unsigned long npages = p->npages;
 
 
 
 
 
 
 
  68
  69	while (npages != 0) {
  70		long num;
  71
  72		num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
  73					  npages, prot, __pa(pglist));
  74		if (unlikely(num < 0)) {
  75			if (printk_ratelimit())
  76				printk("iommu_batch_flush: IOMMU map of "
  77				       "[%08lx:%08llx:%lx:%lx:%lx] failed with "
  78				       "status %ld\n",
  79				       devhandle, HV_PCI_TSBID(0, entry),
  80				       npages, prot, __pa(pglist), num);
  81			return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  82		}
  83
  84		entry += num;
  85		npages -= num;
  86		pglist += num;
  87	}
  88
  89	p->entry = entry;
  90	p->npages = 0;
  91
  92	return 0;
  93}
  94
  95static inline void iommu_batch_new_entry(unsigned long entry)
  96{
  97	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
  98
  99	if (p->entry + p->npages == entry)
 100		return;
 101	if (p->entry != ~0UL)
 102		iommu_batch_flush(p);
 103	p->entry = entry;
 104}
 105
 106/* Interrupts must be disabled.  */
 107static inline long iommu_batch_add(u64 phys_page)
 108{
 109	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
 110
 111	BUG_ON(p->npages >= PGLIST_NENTS);
 112
 113	p->pglist[p->npages++] = phys_page;
 114	if (p->npages == PGLIST_NENTS)
 115		return iommu_batch_flush(p);
 116
 117	return 0;
 118}
 119
 120/* Interrupts must be disabled.  */
 121static inline long iommu_batch_end(void)
 122{
 123	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
 124
 125	BUG_ON(p->npages >= PGLIST_NENTS);
 126
 127	return iommu_batch_flush(p);
 128}
 129
 130static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
 131				   dma_addr_t *dma_addrp, gfp_t gfp,
 132				   struct dma_attrs *attrs)
 133{
 
 134	unsigned long flags, order, first_page, npages, n;
 
 135	struct iommu *iommu;
 
 136	struct page *page;
 137	void *ret;
 138	long entry;
 139	int nid;
 140
 141	size = IO_PAGE_ALIGN(size);
 142	order = get_order(size);
 143	if (unlikely(order >= MAX_ORDER))
 144		return NULL;
 145
 146	npages = size >> IO_PAGE_SHIFT;
 147
 
 
 
 148	nid = dev->archdata.numa_node;
 149	page = alloc_pages_node(nid, gfp, order);
 150	if (unlikely(!page))
 151		return NULL;
 152
 153	first_page = (unsigned long) page_address(page);
 154	memset((char *)first_page, 0, PAGE_SIZE << order);
 155
 156	iommu = dev->archdata.iommu;
 
 
 
 
 
 157
 158	spin_lock_irqsave(&iommu->lock, flags);
 159	entry = iommu_range_alloc(dev, iommu, npages, NULL);
 160	spin_unlock_irqrestore(&iommu->lock, flags);
 161
 162	if (unlikely(entry == DMA_ERROR_CODE))
 163		goto range_alloc_fail;
 164
 165	*dma_addrp = (iommu->page_table_map_base +
 166		      (entry << IO_PAGE_SHIFT));
 167	ret = (void *) first_page;
 168	first_page = __pa(first_page);
 169
 170	local_irq_save(flags);
 171
 172	iommu_batch_start(dev,
 173			  (HV_PCI_MAP_ATTR_READ |
 174			   HV_PCI_MAP_ATTR_WRITE),
 175			  entry);
 176
 177	for (n = 0; n < npages; n++) {
 178		long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
 179		if (unlikely(err < 0L))
 180			goto iommu_map_fail;
 181	}
 182
 183	if (unlikely(iommu_batch_end() < 0L))
 184		goto iommu_map_fail;
 185
 186	local_irq_restore(flags);
 187
 188	return ret;
 189
 190iommu_map_fail:
 191	/* Interrupts are disabled.  */
 192	spin_lock(&iommu->lock);
 193	iommu_range_free(iommu, *dma_addrp, npages);
 194	spin_unlock_irqrestore(&iommu->lock, flags);
 195
 196range_alloc_fail:
 197	free_pages(first_page, order);
 198	return NULL;
 199}
 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 201static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
 202				 dma_addr_t dvma, struct dma_attrs *attrs)
 203{
 204	struct pci_pbm_info *pbm;
 205	struct iommu *iommu;
 206	unsigned long flags, order, npages, entry;
 
 
 
 207	u32 devhandle;
 208
 209	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
 210	iommu = dev->archdata.iommu;
 211	pbm = dev->archdata.host_controller;
 
 212	devhandle = pbm->devhandle;
 213	entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 214
 215	spin_lock_irqsave(&iommu->lock, flags);
 216
 217	iommu_range_free(iommu, dvma, npages);
 218
 219	do {
 220		unsigned long num;
 221
 222		num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
 223					    npages);
 224		entry += num;
 225		npages -= num;
 226	} while (npages != 0);
 227
 228	spin_unlock_irqrestore(&iommu->lock, flags);
 229
 
 
 
 
 
 
 
 
 
 
 230	order = get_order(size);
 231	if (order < 10)
 232		free_pages((unsigned long)cpu, order);
 233}
 234
 235static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
 236				  unsigned long offset, size_t sz,
 237				  enum dma_data_direction direction,
 238				  struct dma_attrs *attrs)
 239{
 240	struct iommu *iommu;
 
 
 
 241	unsigned long flags, npages, oaddr;
 242	unsigned long i, base_paddr;
 243	u32 bus_addr, ret;
 244	unsigned long prot;
 
 245	long entry;
 246
 247	iommu = dev->archdata.iommu;
 
 248
 249	if (unlikely(direction == DMA_NONE))
 250		goto bad;
 251
 252	oaddr = (unsigned long)(page_address(page) + offset);
 253	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
 254	npages >>= IO_PAGE_SHIFT;
 255
 256	spin_lock_irqsave(&iommu->lock, flags);
 257	entry = iommu_range_alloc(dev, iommu, npages, NULL);
 258	spin_unlock_irqrestore(&iommu->lock, flags);
 
 
 259
 260	if (unlikely(entry == DMA_ERROR_CODE))
 
 
 
 261		goto bad;
 262
 263	bus_addr = (iommu->page_table_map_base +
 264		    (entry << IO_PAGE_SHIFT));
 265	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
 266	base_paddr = __pa(oaddr & IO_PAGE_MASK);
 267	prot = HV_PCI_MAP_ATTR_READ;
 268	if (direction != DMA_TO_DEVICE)
 269		prot |= HV_PCI_MAP_ATTR_WRITE;
 270
 
 
 
 271	local_irq_save(flags);
 272
 273	iommu_batch_start(dev, prot, entry);
 274
 275	for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
 276		long err = iommu_batch_add(base_paddr);
 277		if (unlikely(err < 0L))
 278			goto iommu_map_fail;
 279	}
 280	if (unlikely(iommu_batch_end() < 0L))
 281		goto iommu_map_fail;
 282
 283	local_irq_restore(flags);
 284
 285	return ret;
 286
 287bad:
 288	if (printk_ratelimit())
 289		WARN_ON(1);
 290	return DMA_ERROR_CODE;
 291
 292iommu_map_fail:
 293	/* Interrupts are disabled.  */
 294	spin_lock(&iommu->lock);
 295	iommu_range_free(iommu, bus_addr, npages);
 296	spin_unlock_irqrestore(&iommu->lock, flags);
 297
 298	return DMA_ERROR_CODE;
 299}
 300
 301static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
 302			      size_t sz, enum dma_data_direction direction,
 303			      struct dma_attrs *attrs)
 304{
 305	struct pci_pbm_info *pbm;
 306	struct iommu *iommu;
 307	unsigned long flags, npages;
 
 
 
 308	long entry;
 309	u32 devhandle;
 310
 311	if (unlikely(direction == DMA_NONE)) {
 312		if (printk_ratelimit())
 313			WARN_ON(1);
 314		return;
 315	}
 316
 317	iommu = dev->archdata.iommu;
 318	pbm = dev->archdata.host_controller;
 
 319	devhandle = pbm->devhandle;
 320
 321	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
 322	npages >>= IO_PAGE_SHIFT;
 323	bus_addr &= IO_PAGE_MASK;
 324
 325	spin_lock_irqsave(&iommu->lock, flags);
 326
 327	iommu_range_free(iommu, bus_addr, npages);
 328
 329	entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
 330	do {
 331		unsigned long num;
 332
 333		num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
 334					    npages);
 335		entry += num;
 336		npages -= num;
 337	} while (npages != 0);
 338
 339	spin_unlock_irqrestore(&iommu->lock, flags);
 340}
 341
 342static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 343			 int nelems, enum dma_data_direction direction,
 344			 struct dma_attrs *attrs)
 345{
 346	struct scatterlist *s, *outs, *segstart;
 347	unsigned long flags, handle, prot;
 348	dma_addr_t dma_next = 0, dma_addr;
 349	unsigned int max_seg_size;
 350	unsigned long seg_boundary_size;
 351	int outcount, incount, i;
 352	struct iommu *iommu;
 
 
 
 353	unsigned long base_shift;
 354	long err;
 355
 356	BUG_ON(direction == DMA_NONE);
 357
 358	iommu = dev->archdata.iommu;
 359	if (nelems == 0 || !iommu)
 360		return 0;
 361	
 
 362	prot = HV_PCI_MAP_ATTR_READ;
 363	if (direction != DMA_TO_DEVICE)
 364		prot |= HV_PCI_MAP_ATTR_WRITE;
 365
 
 
 
 366	outs = s = segstart = &sglist[0];
 367	outcount = 1;
 368	incount = nelems;
 369	handle = 0;
 370
 371	/* Init first segment length for backout at failure */
 372	outs->dma_length = 0;
 373
 374	spin_lock_irqsave(&iommu->lock, flags);
 375
 376	iommu_batch_start(dev, prot, ~0UL);
 377
 378	max_seg_size = dma_get_max_seg_size(dev);
 379	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 380				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
 381	base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
 
 
 
 
 
 
 
 382	for_each_sg(sglist, s, nelems, i) {
 383		unsigned long paddr, npages, entry, out_entry = 0, slen;
 384
 385		slen = s->length;
 386		/* Sanity check */
 387		if (slen == 0) {
 388			dma_next = 0;
 389			continue;
 390		}
 391		/* Allocate iommu entries for that segment */
 392		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
 393		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
 394		entry = iommu_range_alloc(dev, iommu, npages, &handle);
 
 395
 396		/* Handle failure */
 397		if (unlikely(entry == DMA_ERROR_CODE)) {
 398			if (printk_ratelimit())
 399				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
 400				       " npages %lx\n", iommu, paddr, npages);
 401			goto iommu_map_failed;
 402		}
 403
 404		iommu_batch_new_entry(entry);
 405
 406		/* Convert entry to a dma_addr_t */
 407		dma_addr = iommu->page_table_map_base +
 408			(entry << IO_PAGE_SHIFT);
 409		dma_addr |= (s->offset & ~IO_PAGE_MASK);
 410
 411		/* Insert into HW table */
 412		paddr &= IO_PAGE_MASK;
 413		while (npages--) {
 414			err = iommu_batch_add(paddr);
 415			if (unlikely(err < 0L))
 416				goto iommu_map_failed;
 417			paddr += IO_PAGE_SIZE;
 418		}
 419
 420		/* If we are in an open segment, try merging */
 421		if (segstart != s) {
 422			/* We cannot merge if:
 423			 * - allocated dma_addr isn't contiguous to previous allocation
 424			 */
 425			if ((dma_addr != dma_next) ||
 426			    (outs->dma_length + s->length > max_seg_size) ||
 427			    (is_span_boundary(out_entry, base_shift,
 428					      seg_boundary_size, outs, s))) {
 429				/* Can't merge: create a new segment */
 430				segstart = s;
 431				outcount++;
 432				outs = sg_next(outs);
 433			} else {
 434				outs->dma_length += s->length;
 435			}
 436		}
 437
 438		if (segstart == s) {
 439			/* This is a new segment, fill entries */
 440			outs->dma_address = dma_addr;
 441			outs->dma_length = slen;
 442			out_entry = entry;
 443		}
 444
 445		/* Calculate next page pointer for contiguous check */
 446		dma_next = dma_addr + slen;
 447	}
 448
 449	err = iommu_batch_end();
 450
 451	if (unlikely(err < 0L))
 452		goto iommu_map_failed;
 453
 454	spin_unlock_irqrestore(&iommu->lock, flags);
 455
 456	if (outcount < incount) {
 457		outs = sg_next(outs);
 458		outs->dma_address = DMA_ERROR_CODE;
 459		outs->dma_length = 0;
 460	}
 461
 462	return outcount;
 463
 464iommu_map_failed:
 465	for_each_sg(sglist, s, nelems, i) {
 466		if (s->dma_length != 0) {
 467			unsigned long vaddr, npages;
 468
 469			vaddr = s->dma_address & IO_PAGE_MASK;
 470			npages = iommu_num_pages(s->dma_address, s->dma_length,
 471						 IO_PAGE_SIZE);
 472			iommu_range_free(iommu, vaddr, npages);
 
 473			/* XXX demap? XXX */
 474			s->dma_address = DMA_ERROR_CODE;
 475			s->dma_length = 0;
 476		}
 477		if (s == outs)
 478			break;
 479	}
 480	spin_unlock_irqrestore(&iommu->lock, flags);
 481
 482	return 0;
 483}
 484
 485static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
 486			    int nelems, enum dma_data_direction direction,
 487			    struct dma_attrs *attrs)
 488{
 489	struct pci_pbm_info *pbm;
 490	struct scatterlist *sg;
 491	struct iommu *iommu;
 492	unsigned long flags;
 
 
 493	u32 devhandle;
 494
 495	BUG_ON(direction == DMA_NONE);
 496
 497	iommu = dev->archdata.iommu;
 498	pbm = dev->archdata.host_controller;
 
 499	devhandle = pbm->devhandle;
 500	
 501	spin_lock_irqsave(&iommu->lock, flags);
 502
 503	sg = sglist;
 504	while (nelems--) {
 505		dma_addr_t dma_handle = sg->dma_address;
 506		unsigned int len = sg->dma_length;
 507		unsigned long npages, entry;
 
 
 508
 509		if (!len)
 510			break;
 511		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
 512		iommu_range_free(iommu, dma_handle, npages);
 513
 514		entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 515		while (npages) {
 516			unsigned long num;
 517
 518			num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
 519						    npages);
 520			entry += num;
 521			npages -= num;
 
 
 522		}
 523
 
 
 
 
 524		sg = sg_next(sg);
 525	}
 526
 527	spin_unlock_irqrestore(&iommu->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 528}
 529
 530static struct dma_map_ops sun4v_dma_ops = {
 531	.alloc				= dma_4v_alloc_coherent,
 532	.free				= dma_4v_free_coherent,
 533	.map_page			= dma_4v_map_page,
 534	.unmap_page			= dma_4v_unmap_page,
 535	.map_sg				= dma_4v_map_sg,
 536	.unmap_sg			= dma_4v_unmap_sg,
 
 537};
 538
 539static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
 540					 struct device *parent)
 541{
 542	struct property *prop;
 543	struct device_node *dp;
 544
 545	dp = pbm->op->dev.of_node;
 546	prop = of_find_property(dp, "66mhz-capable", NULL);
 547	pbm->is_66mhz_capable = (prop != NULL);
 548	pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
 549
 550	/* XXX register error interrupt handlers XXX */
 551}
 552
 553static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm,
 554						      struct iommu *iommu)
 555{
 556	struct iommu_arena *arena = &iommu->arena;
 557	unsigned long i, cnt = 0;
 558	u32 devhandle;
 559
 560	devhandle = pbm->devhandle;
 561	for (i = 0; i < arena->limit; i++) {
 562		unsigned long ret, io_attrs, ra;
 563
 564		ret = pci_sun4v_iommu_getmap(devhandle,
 565					     HV_PCI_TSBID(0, i),
 566					     &io_attrs, &ra);
 567		if (ret == HV_EOK) {
 568			if (page_in_phys_avail(ra)) {
 569				pci_sun4v_iommu_demap(devhandle,
 570						      HV_PCI_TSBID(0, i), 1);
 571			} else {
 572				cnt++;
 573				__set_bit(i, arena->map);
 
 
 
 
 574			}
 575		}
 576	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 577
 578	return cnt;
 579}
 580
 581static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
 582{
 583	static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
 584	struct iommu *iommu = pbm->iommu;
 585	unsigned long num_tsb_entries, sz;
 586	u32 dma_mask, dma_offset;
 587	const u32 *vdma;
 588
 589	vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
 590	if (!vdma)
 591		vdma = vdma_default;
 592
 593	if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
 594		printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
 595		       vdma[0], vdma[1]);
 596		return -EINVAL;
 597	};
 598
 599	dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
 600	num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
 601
 602	dma_offset = vdma[0];
 603
 604	/* Setup initial software IOMMU state. */
 605	spin_lock_init(&iommu->lock);
 606	iommu->ctx_lowest_free = 1;
 607	iommu->page_table_map_base = dma_offset;
 608	iommu->dma_addr_mask = dma_mask;
 609
 610	/* Allocate and initialize the free area map.  */
 611	sz = (num_tsb_entries + 7) / 8;
 612	sz = (sz + 7UL) & ~7UL;
 613	iommu->arena.map = kzalloc(sz, GFP_KERNEL);
 614	if (!iommu->arena.map) {
 615		printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
 616		return -ENOMEM;
 617	}
 618	iommu->arena.limit = num_tsb_entries;
 619
 620	sz = probe_existing_entries(pbm, iommu);
 
 
 621	if (sz)
 622		printk("%s: Imported %lu TSB entries from OBP\n",
 623		       pbm->name, sz);
 624
 625	return 0;
 626}
 627
 628#ifdef CONFIG_PCI_MSI
 629struct pci_sun4v_msiq_entry {
 630	u64		version_type;
 631#define MSIQ_VERSION_MASK		0xffffffff00000000UL
 632#define MSIQ_VERSION_SHIFT		32
 633#define MSIQ_TYPE_MASK			0x00000000000000ffUL
 634#define MSIQ_TYPE_SHIFT			0
 635#define MSIQ_TYPE_NONE			0x00
 636#define MSIQ_TYPE_MSG			0x01
 637#define MSIQ_TYPE_MSI32			0x02
 638#define MSIQ_TYPE_MSI64			0x03
 639#define MSIQ_TYPE_INTX			0x08
 640#define MSIQ_TYPE_NONE2			0xff
 641
 642	u64		intx_sysino;
 643	u64		reserved1;
 644	u64		stick;
 645	u64		req_id;  /* bus/device/func */
 646#define MSIQ_REQID_BUS_MASK		0xff00UL
 647#define MSIQ_REQID_BUS_SHIFT		8
 648#define MSIQ_REQID_DEVICE_MASK		0x00f8UL
 649#define MSIQ_REQID_DEVICE_SHIFT		3
 650#define MSIQ_REQID_FUNC_MASK		0x0007UL
 651#define MSIQ_REQID_FUNC_SHIFT		0
 652
 653	u64		msi_address;
 654
 655	/* The format of this value is message type dependent.
 656	 * For MSI bits 15:0 are the data from the MSI packet.
 657	 * For MSI-X bits 31:0 are the data from the MSI packet.
 658	 * For MSG, the message code and message routing code where:
 659	 * 	bits 39:32 is the bus/device/fn of the msg target-id
 660	 *	bits 18:16 is the message routing code
 661	 *	bits 7:0 is the message code
 662	 * For INTx the low order 2-bits are:
 663	 *	00 - INTA
 664	 *	01 - INTB
 665	 *	10 - INTC
 666	 *	11 - INTD
 667	 */
 668	u64		msi_data;
 669
 670	u64		reserved2;
 671};
 672
 673static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
 674			      unsigned long *head)
 675{
 676	unsigned long err, limit;
 677
 678	err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
 679	if (unlikely(err))
 680		return -ENXIO;
 681
 682	limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 683	if (unlikely(*head >= limit))
 684		return -EFBIG;
 685
 686	return 0;
 687}
 688
 689static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
 690				 unsigned long msiqid, unsigned long *head,
 691				 unsigned long *msi)
 692{
 693	struct pci_sun4v_msiq_entry *ep;
 694	unsigned long err, type;
 695
 696	/* Note: void pointer arithmetic, 'head' is a byte offset  */
 697	ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
 698				 (pbm->msiq_ent_count *
 699				  sizeof(struct pci_sun4v_msiq_entry))) +
 700	      *head);
 701
 702	if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
 703		return 0;
 704
 705	type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
 706	if (unlikely(type != MSIQ_TYPE_MSI32 &&
 707		     type != MSIQ_TYPE_MSI64))
 708		return -EINVAL;
 709
 710	*msi = ep->msi_data;
 711
 712	err = pci_sun4v_msi_setstate(pbm->devhandle,
 713				     ep->msi_data /* msi_num */,
 714				     HV_MSISTATE_IDLE);
 715	if (unlikely(err))
 716		return -ENXIO;
 717
 718	/* Clear the entry.  */
 719	ep->version_type &= ~MSIQ_TYPE_MASK;
 720
 721	(*head) += sizeof(struct pci_sun4v_msiq_entry);
 722	if (*head >=
 723	    (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
 724		*head = 0;
 725
 726	return 1;
 727}
 728
 729static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
 730			      unsigned long head)
 731{
 732	unsigned long err;
 733
 734	err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
 735	if (unlikely(err))
 736		return -EINVAL;
 737
 738	return 0;
 739}
 740
 741static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
 742			       unsigned long msi, int is_msi64)
 743{
 744	if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
 745				  (is_msi64 ?
 746				   HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
 747		return -ENXIO;
 748	if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
 749		return -ENXIO;
 750	if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
 751		return -ENXIO;
 752	return 0;
 753}
 754
 755static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
 756{
 757	unsigned long err, msiqid;
 758
 759	err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
 760	if (err)
 761		return -ENXIO;
 762
 763	pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
 764
 765	return 0;
 766}
 767
 768static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
 769{
 770	unsigned long q_size, alloc_size, pages, order;
 771	int i;
 772
 773	q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 774	alloc_size = (pbm->msiq_num * q_size);
 775	order = get_order(alloc_size);
 776	pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
 777	if (pages == 0UL) {
 778		printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
 779		       order);
 780		return -ENOMEM;
 781	}
 782	memset((char *)pages, 0, PAGE_SIZE << order);
 783	pbm->msi_queues = (void *) pages;
 784
 785	for (i = 0; i < pbm->msiq_num; i++) {
 786		unsigned long err, base = __pa(pages + (i * q_size));
 787		unsigned long ret1, ret2;
 788
 789		err = pci_sun4v_msiq_conf(pbm->devhandle,
 790					  pbm->msiq_first + i,
 791					  base, pbm->msiq_ent_count);
 792		if (err) {
 793			printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
 794			       err);
 795			goto h_error;
 796		}
 797
 798		err = pci_sun4v_msiq_info(pbm->devhandle,
 799					  pbm->msiq_first + i,
 800					  &ret1, &ret2);
 801		if (err) {
 802			printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
 803			       err);
 804			goto h_error;
 805		}
 806		if (ret1 != base || ret2 != pbm->msiq_ent_count) {
 807			printk(KERN_ERR "MSI: Bogus qconf "
 808			       "expected[%lx:%x] got[%lx:%lx]\n",
 809			       base, pbm->msiq_ent_count,
 810			       ret1, ret2);
 811			goto h_error;
 812		}
 813	}
 814
 815	return 0;
 816
 817h_error:
 818	free_pages(pages, order);
 819	return -EINVAL;
 820}
 821
 822static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
 823{
 824	unsigned long q_size, alloc_size, pages, order;
 825	int i;
 826
 827	for (i = 0; i < pbm->msiq_num; i++) {
 828		unsigned long msiqid = pbm->msiq_first + i;
 829
 830		(void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
 831	}
 832
 833	q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 834	alloc_size = (pbm->msiq_num * q_size);
 835	order = get_order(alloc_size);
 836
 837	pages = (unsigned long) pbm->msi_queues;
 838
 839	free_pages(pages, order);
 840
 841	pbm->msi_queues = NULL;
 842}
 843
 844static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
 845				    unsigned long msiqid,
 846				    unsigned long devino)
 847{
 848	unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
 849
 850	if (!irq)
 851		return -ENOMEM;
 852
 853	if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
 854		return -EINVAL;
 855	if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
 856		return -EINVAL;
 857
 858	return irq;
 859}
 860
 861static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
 862	.get_head	=	pci_sun4v_get_head,
 863	.dequeue_msi	=	pci_sun4v_dequeue_msi,
 864	.set_head	=	pci_sun4v_set_head,
 865	.msi_setup	=	pci_sun4v_msi_setup,
 866	.msi_teardown	=	pci_sun4v_msi_teardown,
 867	.msiq_alloc	=	pci_sun4v_msiq_alloc,
 868	.msiq_free	=	pci_sun4v_msiq_free,
 869	.msiq_build_irq	=	pci_sun4v_msiq_build_irq,
 870};
 871
 872static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
 873{
 874	sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
 875}
 876#else /* CONFIG_PCI_MSI */
 877static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
 878{
 879}
 880#endif /* !(CONFIG_PCI_MSI) */
 881
 882static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
 883					struct platform_device *op, u32 devhandle)
 884{
 885	struct device_node *dp = op->dev.of_node;
 886	int err;
 887
 888	pbm->numa_node = of_node_to_nid(dp);
 889
 890	pbm->pci_ops = &sun4v_pci_ops;
 891	pbm->config_space_reg_bits = 12;
 892
 893	pbm->index = pci_num_pbms++;
 894
 895	pbm->op = op;
 896
 897	pbm->devhandle = devhandle;
 898
 899	pbm->name = dp->full_name;
 900
 901	printk("%s: SUN4V PCI Bus Module\n", pbm->name);
 902	printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
 903
 904	pci_determine_mem_io_space(pbm);
 905
 906	pci_get_pbm_props(pbm);
 907
 908	err = pci_sun4v_iommu_init(pbm);
 909	if (err)
 910		return err;
 911
 912	pci_sun4v_msi_init(pbm);
 913
 914	pci_sun4v_scan_bus(pbm, &op->dev);
 915
 
 
 
 
 
 
 
 
 
 
 
 
 916	pbm->next = pci_pbm_root;
 917	pci_pbm_root = pbm;
 918
 919	return 0;
 920}
 921
 922static int __devinit pci_sun4v_probe(struct platform_device *op)
 923{
 924	const struct linux_prom64_registers *regs;
 925	static int hvapi_negotiated = 0;
 926	struct pci_pbm_info *pbm;
 927	struct device_node *dp;
 928	struct iommu *iommu;
 
 929	u32 devhandle;
 930	int i, err;
 
 931
 932	dp = op->dev.of_node;
 933
 934	if (!hvapi_negotiated++) {
 935		err = sun4v_hvapi_register(HV_GRP_PCI,
 936					   vpci_major,
 937					   &vpci_minor);
 
 
 
 
 
 
 938
 939		if (err) {
 940			printk(KERN_ERR PFX "Could not register hvapi, "
 941			       "err=%d\n", err);
 942			return err;
 943		}
 944		printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
 945		       vpci_major, vpci_minor);
 
 
 
 
 
 
 
 
 
 
 
 946
 947		dma_ops = &sun4v_dma_ops;
 948	}
 949
 950	regs = of_get_property(dp, "reg", NULL);
 951	err = -ENODEV;
 952	if (!regs) {
 953		printk(KERN_ERR PFX "Could not find config registers\n");
 954		goto out_err;
 955	}
 956	devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
 957
 958	err = -ENOMEM;
 959	if (!iommu_batch_initialized) {
 960		for_each_possible_cpu(i) {
 961			unsigned long page = get_zeroed_page(GFP_KERNEL);
 962
 963			if (!page)
 964				goto out_err;
 965
 966			per_cpu(iommu_batch, i).pglist = (u64 *) page;
 967		}
 968		iommu_batch_initialized = 1;
 969	}
 970
 971	pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
 972	if (!pbm) {
 973		printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
 974		goto out_err;
 975	}
 976
 977	iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
 978	if (!iommu) {
 979		printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
 980		goto out_free_controller;
 981	}
 982
 983	pbm->iommu = iommu;
 
 
 
 
 
 
 
 
 984
 985	err = pci_sun4v_pbm_init(pbm, op, devhandle);
 986	if (err)
 987		goto out_free_iommu;
 988
 989	dev_set_drvdata(&op->dev, pbm);
 990
 991	return 0;
 992
 993out_free_iommu:
 
 994	kfree(pbm->iommu);
 995
 996out_free_controller:
 997	kfree(pbm);
 998
 999out_err:
1000	return err;
1001}
1002
1003static const struct of_device_id pci_sun4v_match[] = {
1004	{
1005		.name = "pci",
1006		.compatible = "SUNW,sun4v-pci",
1007	},
1008	{},
1009};
1010
1011static struct platform_driver pci_sun4v_driver = {
1012	.driver = {
1013		.name = DRIVER_NAME,
1014		.owner = THIS_MODULE,
1015		.of_match_table = pci_sun4v_match,
1016	},
1017	.probe		= pci_sun4v_probe,
1018};
1019
1020static int __init pci_sun4v_init(void)
1021{
1022	return platform_driver_register(&pci_sun4v_driver);
1023}
1024
1025subsys_initcall(pci_sun4v_init);