Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/* pci_sun4v.c: SUN4V specific PCI controller support.
   3 *
   4 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/types.h>
   9#include <linux/pci.h>
  10#include <linux/init.h>
  11#include <linux/slab.h>
  12#include <linux/interrupt.h>
  13#include <linux/percpu.h>
  14#include <linux/irq.h>
  15#include <linux/msi.h>
  16#include <linux/export.h>
  17#include <linux/log2.h>
  18#include <linux/of_device.h>
  19#include <linux/iommu-common.h>
  20
  21#include <asm/iommu.h>
  22#include <asm/irq.h>
  23#include <asm/hypervisor.h>
  24#include <asm/prom.h>
  25
  26#include "pci_impl.h"
  27#include "iommu_common.h"
  28#include "kernel.h"
  29
  30#include "pci_sun4v.h"
  31
  32#define DRIVER_NAME	"pci_sun4v"
  33#define PFX		DRIVER_NAME ": "
  34
  35static unsigned long vpci_major;
  36static unsigned long vpci_minor;
  37
  38struct vpci_version {
  39	unsigned long major;
  40	unsigned long minor;
  41};
  42
  43/* Ordered from largest major to lowest */
  44static struct vpci_version vpci_versions[] = {
  45	{ .major = 2, .minor = 0 },
  46	{ .major = 1, .minor = 1 },
  47};
  48
  49static unsigned long vatu_major = 1;
  50static unsigned long vatu_minor = 1;
  51
  52#define PGLIST_NENTS	(PAGE_SIZE / sizeof(u64))
  53
  54struct iommu_batch {
  55	struct device	*dev;		/* Device mapping is for.	*/
  56	unsigned long	prot;		/* IOMMU page protections	*/
  57	unsigned long	entry;		/* Index into IOTSB.		*/
  58	u64		*pglist;	/* List of physical pages	*/
  59	unsigned long	npages;		/* Number of pages in list.	*/
  60};
  61
  62static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
  63static int iommu_batch_initialized;
  64
  65/* Interrupts must be disabled.  */
  66static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
  67{
  68	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
  69
  70	p->dev		= dev;
  71	p->prot		= prot;
  72	p->entry	= entry;
  73	p->npages	= 0;
  74}
  75
  76/* Interrupts must be disabled.  */
  77static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
  78{
  79	struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
  80	u64 *pglist = p->pglist;
  81	u64 index_count;
  82	unsigned long devhandle = pbm->devhandle;
  83	unsigned long prot = p->prot;
  84	unsigned long entry = p->entry;
 
  85	unsigned long npages = p->npages;
  86	unsigned long iotsb_num;
  87	unsigned long ret;
  88	long num;
  89
  90	/* VPCI maj=1, min=[0,1] only supports read and write */
  91	if (vpci_major < 2)
  92		prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
  93
  94	while (npages != 0) {
  95		if (mask <= DMA_BIT_MASK(32)) {
  96			num = pci_sun4v_iommu_map(devhandle,
  97						  HV_PCI_TSBID(0, entry),
  98						  npages,
  99						  prot,
 100						  __pa(pglist));
 101			if (unlikely(num < 0)) {
 102				pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
 103						   __func__,
 104						   devhandle,
 105						   HV_PCI_TSBID(0, entry),
 106						   npages, prot, __pa(pglist),
 107						   num);
 108				return -1;
 109			}
 110		} else {
 111			index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
 112			iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
 113			ret = pci_sun4v_iotsb_map(devhandle,
 114						  iotsb_num,
 115						  index_count,
 116						  prot,
 117						  __pa(pglist),
 118						  &num);
 119			if (unlikely(ret != HV_EOK)) {
 120				pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
 121						   __func__,
 122						   devhandle, iotsb_num,
 123						   index_count, prot,
 124						   __pa(pglist), ret);
 125				return -1;
 126			}
 127		}
 
 128		entry += num;
 129		npages -= num;
 130		pglist += num;
 131	}
 132
 133	p->entry = entry;
 134	p->npages = 0;
 135
 136	return 0;
 137}
 138
 139static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
 140{
 141	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 142
 143	if (p->entry + p->npages == entry)
 144		return;
 145	if (p->entry != ~0UL)
 146		iommu_batch_flush(p, mask);
 147	p->entry = entry;
 148}
 149
 150/* Interrupts must be disabled.  */
 151static inline long iommu_batch_add(u64 phys_page, u64 mask)
 152{
 153	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 154
 155	BUG_ON(p->npages >= PGLIST_NENTS);
 156
 157	p->pglist[p->npages++] = phys_page;
 158	if (p->npages == PGLIST_NENTS)
 159		return iommu_batch_flush(p, mask);
 160
 161	return 0;
 162}
 163
 164/* Interrupts must be disabled.  */
 165static inline long iommu_batch_end(u64 mask)
 166{
 167	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 168
 169	BUG_ON(p->npages >= PGLIST_NENTS);
 170
 171	return iommu_batch_flush(p, mask);
 172}
 173
 174static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
 175				   dma_addr_t *dma_addrp, gfp_t gfp,
 176				   unsigned long attrs)
 177{
 178	u64 mask;
 179	unsigned long flags, order, first_page, npages, n;
 180	unsigned long prot = 0;
 181	struct iommu *iommu;
 182	struct atu *atu;
 183	struct iommu_map_table *tbl;
 184	struct page *page;
 185	void *ret;
 186	long entry;
 187	int nid;
 188
 189	size = IO_PAGE_ALIGN(size);
 190	order = get_order(size);
 191	if (unlikely(order >= MAX_ORDER))
 192		return NULL;
 193
 194	npages = size >> IO_PAGE_SHIFT;
 195
 196	if (attrs & DMA_ATTR_WEAK_ORDERING)
 197		prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
 198
 199	nid = dev->archdata.numa_node;
 200	page = alloc_pages_node(nid, gfp, order);
 201	if (unlikely(!page))
 202		return NULL;
 203
 204	first_page = (unsigned long) page_address(page);
 205	memset((char *)first_page, 0, PAGE_SIZE << order);
 206
 207	iommu = dev->archdata.iommu;
 208	atu = iommu->atu;
 209
 210	mask = dev->coherent_dma_mask;
 211	if (mask <= DMA_BIT_MASK(32))
 212		tbl = &iommu->tbl;
 213	else
 214		tbl = &atu->tbl;
 215
 216	entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
 217				      (unsigned long)(-1), 0);
 
 218
 219	if (unlikely(entry == IOMMU_ERROR_CODE))
 220		goto range_alloc_fail;
 221
 222	*dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
 
 223	ret = (void *) first_page;
 224	first_page = __pa(first_page);
 225
 226	local_irq_save(flags);
 227
 228	iommu_batch_start(dev,
 229			  (HV_PCI_MAP_ATTR_READ | prot |
 230			   HV_PCI_MAP_ATTR_WRITE),
 231			  entry);
 232
 233	for (n = 0; n < npages; n++) {
 234		long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
 235		if (unlikely(err < 0L))
 236			goto iommu_map_fail;
 237	}
 238
 239	if (unlikely(iommu_batch_end(mask) < 0L))
 240		goto iommu_map_fail;
 241
 242	local_irq_restore(flags);
 243
 244	return ret;
 245
 246iommu_map_fail:
 247	local_irq_restore(flags);
 248	iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
 
 
 249
 250range_alloc_fail:
 251	free_pages(first_page, order);
 252	return NULL;
 253}
 254
 255unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
 256				unsigned long iotsb_num,
 257				struct pci_bus *bus_dev)
 258{
 259	struct pci_dev *pdev;
 260	unsigned long err;
 261	unsigned int bus;
 262	unsigned int device;
 263	unsigned int fun;
 264
 265	list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
 266		if (pdev->subordinate) {
 267			/* No need to bind pci bridge */
 268			dma_4v_iotsb_bind(devhandle, iotsb_num,
 269					  pdev->subordinate);
 270		} else {
 271			bus = bus_dev->number;
 272			device = PCI_SLOT(pdev->devfn);
 273			fun = PCI_FUNC(pdev->devfn);
 274			err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
 275						   HV_PCI_DEVICE_BUILD(bus,
 276								       device,
 277								       fun));
 278
 279			/* If bind fails for one device it is going to fail
 280			 * for rest of the devices because we are sharing
 281			 * IOTSB. So in case of failure simply return with
 282			 * error.
 283			 */
 284			if (err)
 285				return err;
 286		}
 287	}
 288
 289	return 0;
 290}
 291
 292static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
 293			       dma_addr_t dvma, unsigned long iotsb_num,
 294			       unsigned long entry, unsigned long npages)
 295{
 296	unsigned long num, flags;
 297	unsigned long ret;
 298
 299	local_irq_save(flags);
 300	do {
 301		if (dvma <= DMA_BIT_MASK(32)) {
 302			num = pci_sun4v_iommu_demap(devhandle,
 303						    HV_PCI_TSBID(0, entry),
 304						    npages);
 305		} else {
 306			ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
 307						    entry, npages, &num);
 308			if (unlikely(ret != HV_EOK)) {
 309				pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
 310						   ret);
 311			}
 312		}
 313		entry += num;
 314		npages -= num;
 315	} while (npages != 0);
 316	local_irq_restore(flags);
 317}
 318
 319static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
 320				 dma_addr_t dvma, unsigned long attrs)
 321{
 322	struct pci_pbm_info *pbm;
 323	struct iommu *iommu;
 324	struct atu *atu;
 325	struct iommu_map_table *tbl;
 326	unsigned long order, npages, entry;
 327	unsigned long iotsb_num;
 328	u32 devhandle;
 329
 330	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
 331	iommu = dev->archdata.iommu;
 332	pbm = dev->archdata.host_controller;
 333	atu = iommu->atu;
 334	devhandle = pbm->devhandle;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 335
 336	if (dvma <= DMA_BIT_MASK(32)) {
 337		tbl = &iommu->tbl;
 338		iotsb_num = 0; /* we don't care for legacy iommu */
 339	} else {
 340		tbl = &atu->tbl;
 341		iotsb_num = atu->iotsb->iotsb_num;
 342	}
 343	entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
 344	dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
 345	iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
 346	order = get_order(size);
 347	if (order < 10)
 348		free_pages((unsigned long)cpu, order);
 349}
 350
 351static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
 352				  unsigned long offset, size_t sz,
 353				  enum dma_data_direction direction,
 354				  unsigned long attrs)
 355{
 356	struct iommu *iommu;
 357	struct atu *atu;
 358	struct iommu_map_table *tbl;
 359	u64 mask;
 360	unsigned long flags, npages, oaddr;
 361	unsigned long i, base_paddr;
 
 362	unsigned long prot;
 363	dma_addr_t bus_addr, ret;
 364	long entry;
 365
 366	iommu = dev->archdata.iommu;
 367	atu = iommu->atu;
 368
 369	if (unlikely(direction == DMA_NONE))
 370		goto bad;
 371
 372	oaddr = (unsigned long)(page_address(page) + offset);
 373	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
 374	npages >>= IO_PAGE_SHIFT;
 375
 376	mask = *dev->dma_mask;
 377	if (mask <= DMA_BIT_MASK(32))
 378		tbl = &iommu->tbl;
 379	else
 380		tbl = &atu->tbl;
 381
 382	entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
 383				      (unsigned long)(-1), 0);
 384
 385	if (unlikely(entry == IOMMU_ERROR_CODE))
 386		goto bad;
 387
 388	bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
 
 389	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
 390	base_paddr = __pa(oaddr & IO_PAGE_MASK);
 391	prot = HV_PCI_MAP_ATTR_READ;
 392	if (direction != DMA_TO_DEVICE)
 393		prot |= HV_PCI_MAP_ATTR_WRITE;
 394
 395	if (attrs & DMA_ATTR_WEAK_ORDERING)
 396		prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
 397
 398	local_irq_save(flags);
 399
 400	iommu_batch_start(dev, prot, entry);
 401
 402	for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
 403		long err = iommu_batch_add(base_paddr, mask);
 404		if (unlikely(err < 0L))
 405			goto iommu_map_fail;
 406	}
 407	if (unlikely(iommu_batch_end(mask) < 0L))
 408		goto iommu_map_fail;
 409
 410	local_irq_restore(flags);
 411
 412	return ret;
 413
 414bad:
 415	if (printk_ratelimit())
 416		WARN_ON(1);
 417	return SPARC_MAPPING_ERROR;
 418
 419iommu_map_fail:
 420	local_irq_restore(flags);
 421	iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
 422	return SPARC_MAPPING_ERROR;
 
 
 
 423}
 424
 425static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
 426			      size_t sz, enum dma_data_direction direction,
 427			      unsigned long attrs)
 428{
 429	struct pci_pbm_info *pbm;
 430	struct iommu *iommu;
 431	struct atu *atu;
 432	struct iommu_map_table *tbl;
 433	unsigned long npages;
 434	unsigned long iotsb_num;
 435	long entry;
 436	u32 devhandle;
 437
 438	if (unlikely(direction == DMA_NONE)) {
 439		if (printk_ratelimit())
 440			WARN_ON(1);
 441		return;
 442	}
 443
 444	iommu = dev->archdata.iommu;
 445	pbm = dev->archdata.host_controller;
 446	atu = iommu->atu;
 447	devhandle = pbm->devhandle;
 448
 449	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
 450	npages >>= IO_PAGE_SHIFT;
 451	bus_addr &= IO_PAGE_MASK;
 452
 453	if (bus_addr <= DMA_BIT_MASK(32)) {
 454		iotsb_num = 0; /* we don't care for legacy iommu */
 455		tbl = &iommu->tbl;
 456	} else {
 457		iotsb_num = atu->iotsb->iotsb_num;
 458		tbl = &atu->tbl;
 459	}
 460	entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
 461	dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
 462	iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
 
 
 
 
 
 463}
 464
 465static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 466			 int nelems, enum dma_data_direction direction,
 467			 unsigned long attrs)
 468{
 469	struct scatterlist *s, *outs, *segstart;
 470	unsigned long flags, handle, prot;
 471	dma_addr_t dma_next = 0, dma_addr;
 472	unsigned int max_seg_size;
 473	unsigned long seg_boundary_size;
 474	int outcount, incount, i;
 475	struct iommu *iommu;
 476	struct atu *atu;
 477	struct iommu_map_table *tbl;
 478	u64 mask;
 479	unsigned long base_shift;
 480	long err;
 481
 482	BUG_ON(direction == DMA_NONE);
 483
 484	iommu = dev->archdata.iommu;
 485	if (nelems == 0 || !iommu)
 486		return 0;
 487	atu = iommu->atu;
 488
 489	prot = HV_PCI_MAP_ATTR_READ;
 490	if (direction != DMA_TO_DEVICE)
 491		prot |= HV_PCI_MAP_ATTR_WRITE;
 492
 493	if (attrs & DMA_ATTR_WEAK_ORDERING)
 494		prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
 495
 496	outs = s = segstart = &sglist[0];
 497	outcount = 1;
 498	incount = nelems;
 499	handle = 0;
 500
 501	/* Init first segment length for backout at failure */
 502	outs->dma_length = 0;
 503
 504	local_irq_save(flags);
 505
 506	iommu_batch_start(dev, prot, ~0UL);
 507
 508	max_seg_size = dma_get_max_seg_size(dev);
 509	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 510				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
 511
 512	mask = *dev->dma_mask;
 513	if (mask <= DMA_BIT_MASK(32))
 514		tbl = &iommu->tbl;
 515	else
 516		tbl = &atu->tbl;
 517
 518	base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
 519
 520	for_each_sg(sglist, s, nelems, i) {
 521		unsigned long paddr, npages, entry, out_entry = 0, slen;
 522
 523		slen = s->length;
 524		/* Sanity check */
 525		if (slen == 0) {
 526			dma_next = 0;
 527			continue;
 528		}
 529		/* Allocate iommu entries for that segment */
 530		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
 531		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
 532		entry = iommu_tbl_range_alloc(dev, tbl, npages,
 533					      &handle, (unsigned long)(-1), 0);
 534
 535		/* Handle failure */
 536		if (unlikely(entry == IOMMU_ERROR_CODE)) {
 537			pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
 538					   tbl, paddr, npages);
 
 539			goto iommu_map_failed;
 540		}
 541
 542		iommu_batch_new_entry(entry, mask);
 543
 544		/* Convert entry to a dma_addr_t */
 545		dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
 
 546		dma_addr |= (s->offset & ~IO_PAGE_MASK);
 547
 548		/* Insert into HW table */
 549		paddr &= IO_PAGE_MASK;
 550		while (npages--) {
 551			err = iommu_batch_add(paddr, mask);
 552			if (unlikely(err < 0L))
 553				goto iommu_map_failed;
 554			paddr += IO_PAGE_SIZE;
 555		}
 556
 557		/* If we are in an open segment, try merging */
 558		if (segstart != s) {
 559			/* We cannot merge if:
 560			 * - allocated dma_addr isn't contiguous to previous allocation
 561			 */
 562			if ((dma_addr != dma_next) ||
 563			    (outs->dma_length + s->length > max_seg_size) ||
 564			    (is_span_boundary(out_entry, base_shift,
 565					      seg_boundary_size, outs, s))) {
 566				/* Can't merge: create a new segment */
 567				segstart = s;
 568				outcount++;
 569				outs = sg_next(outs);
 570			} else {
 571				outs->dma_length += s->length;
 572			}
 573		}
 574
 575		if (segstart == s) {
 576			/* This is a new segment, fill entries */
 577			outs->dma_address = dma_addr;
 578			outs->dma_length = slen;
 579			out_entry = entry;
 580		}
 581
 582		/* Calculate next page pointer for contiguous check */
 583		dma_next = dma_addr + slen;
 584	}
 585
 586	err = iommu_batch_end(mask);
 587
 588	if (unlikely(err < 0L))
 589		goto iommu_map_failed;
 590
 591	local_irq_restore(flags);
 592
 593	if (outcount < incount) {
 594		outs = sg_next(outs);
 595		outs->dma_address = SPARC_MAPPING_ERROR;
 596		outs->dma_length = 0;
 597	}
 598
 599	return outcount;
 600
 601iommu_map_failed:
 602	for_each_sg(sglist, s, nelems, i) {
 603		if (s->dma_length != 0) {
 604			unsigned long vaddr, npages;
 605
 606			vaddr = s->dma_address & IO_PAGE_MASK;
 607			npages = iommu_num_pages(s->dma_address, s->dma_length,
 608						 IO_PAGE_SIZE);
 609			iommu_tbl_range_free(tbl, vaddr, npages,
 610					     IOMMU_ERROR_CODE);
 611			/* XXX demap? XXX */
 612			s->dma_address = SPARC_MAPPING_ERROR;
 613			s->dma_length = 0;
 614		}
 615		if (s == outs)
 616			break;
 617	}
 618	local_irq_restore(flags);
 619
 620	return 0;
 621}
 622
 623static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
 624			    int nelems, enum dma_data_direction direction,
 625			    unsigned long attrs)
 626{
 627	struct pci_pbm_info *pbm;
 628	struct scatterlist *sg;
 629	struct iommu *iommu;
 630	struct atu *atu;
 631	unsigned long flags, entry;
 632	unsigned long iotsb_num;
 633	u32 devhandle;
 634
 635	BUG_ON(direction == DMA_NONE);
 636
 637	iommu = dev->archdata.iommu;
 638	pbm = dev->archdata.host_controller;
 639	atu = iommu->atu;
 640	devhandle = pbm->devhandle;
 641	
 642	local_irq_save(flags);
 643
 644	sg = sglist;
 645	while (nelems--) {
 646		dma_addr_t dma_handle = sg->dma_address;
 647		unsigned int len = sg->dma_length;
 648		unsigned long npages;
 649		struct iommu_map_table *tbl;
 650		unsigned long shift = IO_PAGE_SHIFT;
 651
 652		if (!len)
 653			break;
 654		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
 
 655
 656		if (dma_handle <= DMA_BIT_MASK(32)) {
 657			iotsb_num = 0; /* we don't care for legacy iommu */
 658			tbl = &iommu->tbl;
 659		} else {
 660			iotsb_num = atu->iotsb->iotsb_num;
 661			tbl = &atu->tbl;
 662		}
 663		entry = ((dma_handle - tbl->table_map_base) >> shift);
 664		dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
 665				   entry, npages);
 666		iommu_tbl_range_free(tbl, dma_handle, npages,
 667				     IOMMU_ERROR_CODE);
 668		sg = sg_next(sg);
 669	}
 670
 671	local_irq_restore(flags);
 672}
 673
 674static int dma_4v_supported(struct device *dev, u64 device_mask)
 675{
 676	struct iommu *iommu = dev->archdata.iommu;
 677	u64 dma_addr_mask = iommu->dma_addr_mask;
 678
 679	if (device_mask > DMA_BIT_MASK(32)) {
 680		if (iommu->atu)
 681			dma_addr_mask = iommu->atu->dma_addr_mask;
 682		else
 683			return 0;
 684	}
 685
 686	if ((device_mask & dma_addr_mask) == dma_addr_mask)
 687		return 1;
 688	return pci64_dma_supported(to_pci_dev(dev), device_mask);
 689}
 690
 691static int dma_4v_mapping_error(struct device *dev, dma_addr_t dma_addr)
 692{
 693	return dma_addr == SPARC_MAPPING_ERROR;
 694}
 695
 696static const struct dma_map_ops sun4v_dma_ops = {
 697	.alloc				= dma_4v_alloc_coherent,
 698	.free				= dma_4v_free_coherent,
 699	.map_page			= dma_4v_map_page,
 700	.unmap_page			= dma_4v_unmap_page,
 701	.map_sg				= dma_4v_map_sg,
 702	.unmap_sg			= dma_4v_unmap_sg,
 703	.dma_supported			= dma_4v_supported,
 704	.mapping_error			= dma_4v_mapping_error,
 705};
 706
 707static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
 
 708{
 709	struct property *prop;
 710	struct device_node *dp;
 711
 712	dp = pbm->op->dev.of_node;
 713	prop = of_find_property(dp, "66mhz-capable", NULL);
 714	pbm->is_66mhz_capable = (prop != NULL);
 715	pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
 716
 717	/* XXX register error interrupt handlers XXX */
 718}
 719
 720static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
 721					    struct iommu_map_table *iommu)
 722{
 723	struct iommu_pool *pool;
 724	unsigned long i, pool_nr, cnt = 0;
 725	u32 devhandle;
 726
 727	devhandle = pbm->devhandle;
 728	for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
 729		pool = &(iommu->pools[pool_nr]);
 730		for (i = pool->start; i <= pool->end; i++) {
 731			unsigned long ret, io_attrs, ra;
 732
 733			ret = pci_sun4v_iommu_getmap(devhandle,
 734						     HV_PCI_TSBID(0, i),
 735						     &io_attrs, &ra);
 736			if (ret == HV_EOK) {
 737				if (page_in_phys_avail(ra)) {
 738					pci_sun4v_iommu_demap(devhandle,
 739							      HV_PCI_TSBID(0,
 740							      i), 1);
 741				} else {
 742					cnt++;
 743					__set_bit(i, iommu->map);
 744				}
 745			}
 746		}
 747	}
 748	return cnt;
 749}
 750
 751static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
 752{
 753	struct atu *atu = pbm->iommu->atu;
 754	struct atu_iotsb *iotsb;
 755	void *table;
 756	u64 table_size;
 757	u64 iotsb_num;
 758	unsigned long order;
 759	unsigned long err;
 760
 761	iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
 762	if (!iotsb) {
 763		err = -ENOMEM;
 764		goto out_err;
 765	}
 766	atu->iotsb = iotsb;
 767
 768	/* calculate size of IOTSB */
 769	table_size = (atu->size / IO_PAGE_SIZE) * 8;
 770	order = get_order(table_size);
 771	table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 772	if (!table) {
 773		err = -ENOMEM;
 774		goto table_failed;
 775	}
 776	iotsb->table = table;
 777	iotsb->ra = __pa(table);
 778	iotsb->dvma_size = atu->size;
 779	iotsb->dvma_base = atu->base;
 780	iotsb->table_size = table_size;
 781	iotsb->page_size = IO_PAGE_SIZE;
 782
 783	/* configure and register IOTSB with HV */
 784	err = pci_sun4v_iotsb_conf(pbm->devhandle,
 785				   iotsb->ra,
 786				   iotsb->table_size,
 787				   iotsb->page_size,
 788				   iotsb->dvma_base,
 789				   &iotsb_num);
 790	if (err) {
 791		pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
 792		goto iotsb_conf_failed;
 793	}
 794	iotsb->iotsb_num = iotsb_num;
 795
 796	err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
 797	if (err) {
 798		pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
 799		goto iotsb_conf_failed;
 800	}
 801
 802	return 0;
 803
 804iotsb_conf_failed:
 805	free_pages((unsigned long)table, order);
 806table_failed:
 807	kfree(iotsb);
 808out_err:
 809	return err;
 810}
 811
 812static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
 813{
 814	struct atu *atu = pbm->iommu->atu;
 815	unsigned long err;
 816	const u64 *ranges;
 817	u64 map_size, num_iotte;
 818	u64 dma_mask;
 819	const u32 *page_size;
 820	int len;
 821
 822	ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
 823				 &len);
 824	if (!ranges) {
 825		pr_err(PFX "No iommu-address-ranges\n");
 826		return -EINVAL;
 827	}
 828
 829	page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
 830				    NULL);
 831	if (!page_size) {
 832		pr_err(PFX "No iommu-pagesizes\n");
 833		return -EINVAL;
 834	}
 835
 836	/* There are 4 iommu-address-ranges supported. Each range is pair of
 837	 * {base, size}. The ranges[0] and ranges[1] are 32bit address space
 838	 * while ranges[2] and ranges[3] are 64bit space.  We want to use 64bit
 839	 * address ranges to support 64bit addressing. Because 'size' for
 840	 * address ranges[2] and ranges[3] are same we can select either of
 841	 * ranges[2] or ranges[3] for mapping. However due to 'size' is too
 842	 * large for OS to allocate IOTSB we are using fix size 32G
 843	 * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
 844	 * to share.
 845	 */
 846	atu->ranges = (struct atu_ranges *)ranges;
 847	atu->base = atu->ranges[3].base;
 848	atu->size = ATU_64_SPACE_SIZE;
 849
 850	/* Create IOTSB */
 851	err = pci_sun4v_atu_alloc_iotsb(pbm);
 852	if (err) {
 853		pr_err(PFX "Error creating ATU IOTSB\n");
 854		return err;
 855	}
 856
 857	/* Create ATU iommu map.
 858	 * One bit represents one iotte in IOTSB table.
 859	 */
 860	dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
 861	num_iotte = atu->size / IO_PAGE_SIZE;
 862	map_size = num_iotte / 8;
 863	atu->tbl.table_map_base = atu->base;
 864	atu->dma_addr_mask = dma_mask;
 865	atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
 866	if (!atu->tbl.map)
 867		return -ENOMEM;
 868
 869	iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
 870			    NULL, false /* no large_pool */,
 871			    0 /* default npools */,
 872			    false /* want span boundary checking */);
 873
 874	return 0;
 875}
 876
 877static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
 878{
 879	static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
 880	struct iommu *iommu = pbm->iommu;
 881	unsigned long num_tsb_entries, sz;
 882	u32 dma_mask, dma_offset;
 883	const u32 *vdma;
 884
 885	vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
 886	if (!vdma)
 887		vdma = vdma_default;
 888
 889	if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
 890		printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
 891		       vdma[0], vdma[1]);
 892		return -EINVAL;
 893	}
 894
 895	dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
 896	num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
 897
 898	dma_offset = vdma[0];
 899
 900	/* Setup initial software IOMMU state. */
 901	spin_lock_init(&iommu->lock);
 902	iommu->ctx_lowest_free = 1;
 903	iommu->tbl.table_map_base = dma_offset;
 904	iommu->dma_addr_mask = dma_mask;
 905
 906	/* Allocate and initialize the free area map.  */
 907	sz = (num_tsb_entries + 7) / 8;
 908	sz = (sz + 7UL) & ~7UL;
 909	iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
 910	if (!iommu->tbl.map) {
 911		printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
 912		return -ENOMEM;
 913	}
 914	iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
 915			    NULL, false /* no large_pool */,
 916			    0 /* default npools */,
 917			    false /* want span boundary checking */);
 918	sz = probe_existing_entries(pbm, &iommu->tbl);
 919	if (sz)
 920		printk("%s: Imported %lu TSB entries from OBP\n",
 921		       pbm->name, sz);
 922
 923	return 0;
 924}
 925
 926#ifdef CONFIG_PCI_MSI
 927struct pci_sun4v_msiq_entry {
 928	u64		version_type;
 929#define MSIQ_VERSION_MASK		0xffffffff00000000UL
 930#define MSIQ_VERSION_SHIFT		32
 931#define MSIQ_TYPE_MASK			0x00000000000000ffUL
 932#define MSIQ_TYPE_SHIFT			0
 933#define MSIQ_TYPE_NONE			0x00
 934#define MSIQ_TYPE_MSG			0x01
 935#define MSIQ_TYPE_MSI32			0x02
 936#define MSIQ_TYPE_MSI64			0x03
 937#define MSIQ_TYPE_INTX			0x08
 938#define MSIQ_TYPE_NONE2			0xff
 939
 940	u64		intx_sysino;
 941	u64		reserved1;
 942	u64		stick;
 943	u64		req_id;  /* bus/device/func */
 944#define MSIQ_REQID_BUS_MASK		0xff00UL
 945#define MSIQ_REQID_BUS_SHIFT		8
 946#define MSIQ_REQID_DEVICE_MASK		0x00f8UL
 947#define MSIQ_REQID_DEVICE_SHIFT		3
 948#define MSIQ_REQID_FUNC_MASK		0x0007UL
 949#define MSIQ_REQID_FUNC_SHIFT		0
 950
 951	u64		msi_address;
 952
 953	/* The format of this value is message type dependent.
 954	 * For MSI bits 15:0 are the data from the MSI packet.
 955	 * For MSI-X bits 31:0 are the data from the MSI packet.
 956	 * For MSG, the message code and message routing code where:
 957	 * 	bits 39:32 is the bus/device/fn of the msg target-id
 958	 *	bits 18:16 is the message routing code
 959	 *	bits 7:0 is the message code
 960	 * For INTx the low order 2-bits are:
 961	 *	00 - INTA
 962	 *	01 - INTB
 963	 *	10 - INTC
 964	 *	11 - INTD
 965	 */
 966	u64		msi_data;
 967
 968	u64		reserved2;
 969};
 970
 971static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
 972			      unsigned long *head)
 973{
 974	unsigned long err, limit;
 975
 976	err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
 977	if (unlikely(err))
 978		return -ENXIO;
 979
 980	limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 981	if (unlikely(*head >= limit))
 982		return -EFBIG;
 983
 984	return 0;
 985}
 986
 987static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
 988				 unsigned long msiqid, unsigned long *head,
 989				 unsigned long *msi)
 990{
 991	struct pci_sun4v_msiq_entry *ep;
 992	unsigned long err, type;
 993
 994	/* Note: void pointer arithmetic, 'head' is a byte offset  */
 995	ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
 996				 (pbm->msiq_ent_count *
 997				  sizeof(struct pci_sun4v_msiq_entry))) +
 998	      *head);
 999
1000	if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
1001		return 0;
1002
1003	type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
1004	if (unlikely(type != MSIQ_TYPE_MSI32 &&
1005		     type != MSIQ_TYPE_MSI64))
1006		return -EINVAL;
1007
1008	*msi = ep->msi_data;
1009
1010	err = pci_sun4v_msi_setstate(pbm->devhandle,
1011				     ep->msi_data /* msi_num */,
1012				     HV_MSISTATE_IDLE);
1013	if (unlikely(err))
1014		return -ENXIO;
1015
1016	/* Clear the entry.  */
1017	ep->version_type &= ~MSIQ_TYPE_MASK;
1018
1019	(*head) += sizeof(struct pci_sun4v_msiq_entry);
1020	if (*head >=
1021	    (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
1022		*head = 0;
1023
1024	return 1;
1025}
1026
1027static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
1028			      unsigned long head)
1029{
1030	unsigned long err;
1031
1032	err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
1033	if (unlikely(err))
1034		return -EINVAL;
1035
1036	return 0;
1037}
1038
1039static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
1040			       unsigned long msi, int is_msi64)
1041{
1042	if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
1043				  (is_msi64 ?
1044				   HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
1045		return -ENXIO;
1046	if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
1047		return -ENXIO;
1048	if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
1049		return -ENXIO;
1050	return 0;
1051}
1052
1053static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
1054{
1055	unsigned long err, msiqid;
1056
1057	err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
1058	if (err)
1059		return -ENXIO;
1060
1061	pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
1062
1063	return 0;
1064}
1065
1066static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
1067{
1068	unsigned long q_size, alloc_size, pages, order;
1069	int i;
1070
1071	q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1072	alloc_size = (pbm->msiq_num * q_size);
1073	order = get_order(alloc_size);
1074	pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
1075	if (pages == 0UL) {
1076		printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
1077		       order);
1078		return -ENOMEM;
1079	}
1080	memset((char *)pages, 0, PAGE_SIZE << order);
1081	pbm->msi_queues = (void *) pages;
1082
1083	for (i = 0; i < pbm->msiq_num; i++) {
1084		unsigned long err, base = __pa(pages + (i * q_size));
1085		unsigned long ret1, ret2;
1086
1087		err = pci_sun4v_msiq_conf(pbm->devhandle,
1088					  pbm->msiq_first + i,
1089					  base, pbm->msiq_ent_count);
1090		if (err) {
1091			printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
1092			       err);
1093			goto h_error;
1094		}
1095
1096		err = pci_sun4v_msiq_info(pbm->devhandle,
1097					  pbm->msiq_first + i,
1098					  &ret1, &ret2);
1099		if (err) {
1100			printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
1101			       err);
1102			goto h_error;
1103		}
1104		if (ret1 != base || ret2 != pbm->msiq_ent_count) {
1105			printk(KERN_ERR "MSI: Bogus qconf "
1106			       "expected[%lx:%x] got[%lx:%lx]\n",
1107			       base, pbm->msiq_ent_count,
1108			       ret1, ret2);
1109			goto h_error;
1110		}
1111	}
1112
1113	return 0;
1114
1115h_error:
1116	free_pages(pages, order);
1117	return -EINVAL;
1118}
1119
1120static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
1121{
1122	unsigned long q_size, alloc_size, pages, order;
1123	int i;
1124
1125	for (i = 0; i < pbm->msiq_num; i++) {
1126		unsigned long msiqid = pbm->msiq_first + i;
1127
1128		(void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
1129	}
1130
1131	q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1132	alloc_size = (pbm->msiq_num * q_size);
1133	order = get_order(alloc_size);
1134
1135	pages = (unsigned long) pbm->msi_queues;
1136
1137	free_pages(pages, order);
1138
1139	pbm->msi_queues = NULL;
1140}
1141
1142static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
1143				    unsigned long msiqid,
1144				    unsigned long devino)
1145{
1146	unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
1147
1148	if (!irq)
1149		return -ENOMEM;
1150
1151	if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
1152		return -EINVAL;
1153	if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
1154		return -EINVAL;
1155
1156	return irq;
1157}
1158
1159static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
1160	.get_head	=	pci_sun4v_get_head,
1161	.dequeue_msi	=	pci_sun4v_dequeue_msi,
1162	.set_head	=	pci_sun4v_set_head,
1163	.msi_setup	=	pci_sun4v_msi_setup,
1164	.msi_teardown	=	pci_sun4v_msi_teardown,
1165	.msiq_alloc	=	pci_sun4v_msiq_alloc,
1166	.msiq_free	=	pci_sun4v_msiq_free,
1167	.msiq_build_irq	=	pci_sun4v_msiq_build_irq,
1168};
1169
1170static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1171{
1172	sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
1173}
1174#else /* CONFIG_PCI_MSI */
1175static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1176{
1177}
1178#endif /* !(CONFIG_PCI_MSI) */
1179
1180static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
1181			      struct platform_device *op, u32 devhandle)
1182{
1183	struct device_node *dp = op->dev.of_node;
1184	int err;
1185
1186	pbm->numa_node = of_node_to_nid(dp);
1187
1188	pbm->pci_ops = &sun4v_pci_ops;
1189	pbm->config_space_reg_bits = 12;
1190
1191	pbm->index = pci_num_pbms++;
1192
1193	pbm->op = op;
1194
1195	pbm->devhandle = devhandle;
1196
1197	pbm->name = dp->full_name;
1198
1199	printk("%s: SUN4V PCI Bus Module\n", pbm->name);
1200	printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
1201
1202	pci_determine_mem_io_space(pbm);
1203
1204	pci_get_pbm_props(pbm);
1205
1206	err = pci_sun4v_iommu_init(pbm);
1207	if (err)
1208		return err;
1209
1210	pci_sun4v_msi_init(pbm);
1211
1212	pci_sun4v_scan_bus(pbm, &op->dev);
1213
1214	/* if atu_init fails its not complete failure.
1215	 * we can still continue using legacy iommu.
1216	 */
1217	if (pbm->iommu->atu) {
1218		err = pci_sun4v_atu_init(pbm);
1219		if (err) {
1220			kfree(pbm->iommu->atu);
1221			pbm->iommu->atu = NULL;
1222			pr_err(PFX "ATU init failed, err=%d\n", err);
1223		}
1224	}
1225
1226	pbm->next = pci_pbm_root;
1227	pci_pbm_root = pbm;
1228
1229	return 0;
1230}
1231
1232static int pci_sun4v_probe(struct platform_device *op)
1233{
1234	const struct linux_prom64_registers *regs;
1235	static int hvapi_negotiated = 0;
1236	struct pci_pbm_info *pbm;
1237	struct device_node *dp;
1238	struct iommu *iommu;
1239	struct atu *atu;
1240	u32 devhandle;
1241	int i, err = -ENODEV;
1242	static bool hv_atu = true;
1243
1244	dp = op->dev.of_node;
1245
1246	if (!hvapi_negotiated++) {
1247		for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
1248			vpci_major = vpci_versions[i].major;
1249			vpci_minor = vpci_versions[i].minor;
1250
1251			err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
1252						   &vpci_minor);
1253			if (!err)
1254				break;
1255		}
1256
1257		if (err) {
1258			pr_err(PFX "Could not register hvapi, err=%d\n", err);
 
1259			return err;
1260		}
1261		pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
1262			vpci_major, vpci_minor);
1263
1264		err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
1265		if (err) {
1266			/* don't return an error if we fail to register the
1267			 * ATU group, but ATU hcalls won't be available.
1268			 */
1269			hv_atu = false;
1270		} else {
1271			pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
1272				vatu_major, vatu_minor);
1273		}
1274
1275		dma_ops = &sun4v_dma_ops;
1276	}
1277
1278	regs = of_get_property(dp, "reg", NULL);
1279	err = -ENODEV;
1280	if (!regs) {
1281		printk(KERN_ERR PFX "Could not find config registers\n");
1282		goto out_err;
1283	}
1284	devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1285
1286	err = -ENOMEM;
1287	if (!iommu_batch_initialized) {
1288		for_each_possible_cpu(i) {
1289			unsigned long page = get_zeroed_page(GFP_KERNEL);
1290
1291			if (!page)
1292				goto out_err;
1293
1294			per_cpu(iommu_batch, i).pglist = (u64 *) page;
1295		}
1296		iommu_batch_initialized = 1;
1297	}
1298
1299	pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
1300	if (!pbm) {
1301		printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
1302		goto out_err;
1303	}
1304
1305	iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
1306	if (!iommu) {
1307		printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
1308		goto out_free_controller;
1309	}
1310
1311	pbm->iommu = iommu;
1312	iommu->atu = NULL;
1313	if (hv_atu) {
1314		atu = kzalloc(sizeof(*atu), GFP_KERNEL);
1315		if (!atu)
1316			pr_err(PFX "Could not allocate atu\n");
1317		else
1318			iommu->atu = atu;
1319	}
1320
1321	err = pci_sun4v_pbm_init(pbm, op, devhandle);
1322	if (err)
1323		goto out_free_iommu;
1324
1325	dev_set_drvdata(&op->dev, pbm);
1326
1327	return 0;
1328
1329out_free_iommu:
1330	kfree(iommu->atu);
1331	kfree(pbm->iommu);
1332
1333out_free_controller:
1334	kfree(pbm);
1335
1336out_err:
1337	return err;
1338}
1339
1340static const struct of_device_id pci_sun4v_match[] = {
1341	{
1342		.name = "pci",
1343		.compatible = "SUNW,sun4v-pci",
1344	},
1345	{},
1346};
1347
1348static struct platform_driver pci_sun4v_driver = {
1349	.driver = {
1350		.name = DRIVER_NAME,
 
1351		.of_match_table = pci_sun4v_match,
1352	},
1353	.probe		= pci_sun4v_probe,
1354};
1355
1356static int __init pci_sun4v_init(void)
1357{
1358	return platform_driver_register(&pci_sun4v_driver);
1359}
1360
1361subsys_initcall(pci_sun4v_init);
v3.5.6
 
   1/* pci_sun4v.c: SUN4V specific PCI controller support.
   2 *
   3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/types.h>
   8#include <linux/pci.h>
   9#include <linux/init.h>
  10#include <linux/slab.h>
  11#include <linux/interrupt.h>
  12#include <linux/percpu.h>
  13#include <linux/irq.h>
  14#include <linux/msi.h>
  15#include <linux/export.h>
  16#include <linux/log2.h>
  17#include <linux/of_device.h>
 
  18
  19#include <asm/iommu.h>
  20#include <asm/irq.h>
  21#include <asm/hypervisor.h>
  22#include <asm/prom.h>
  23
  24#include "pci_impl.h"
  25#include "iommu_common.h"
 
  26
  27#include "pci_sun4v.h"
  28
  29#define DRIVER_NAME	"pci_sun4v"
  30#define PFX		DRIVER_NAME ": "
  31
  32static unsigned long vpci_major = 1;
  33static unsigned long vpci_minor = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34
  35#define PGLIST_NENTS	(PAGE_SIZE / sizeof(u64))
  36
  37struct iommu_batch {
  38	struct device	*dev;		/* Device mapping is for.	*/
  39	unsigned long	prot;		/* IOMMU page protections	*/
  40	unsigned long	entry;		/* Index into IOTSB.		*/
  41	u64		*pglist;	/* List of physical pages	*/
  42	unsigned long	npages;		/* Number of pages in list.	*/
  43};
  44
  45static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
  46static int iommu_batch_initialized;
  47
  48/* Interrupts must be disabled.  */
  49static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
  50{
  51	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
  52
  53	p->dev		= dev;
  54	p->prot		= prot;
  55	p->entry	= entry;
  56	p->npages	= 0;
  57}
  58
  59/* Interrupts must be disabled.  */
  60static long iommu_batch_flush(struct iommu_batch *p)
  61{
  62	struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
 
 
  63	unsigned long devhandle = pbm->devhandle;
  64	unsigned long prot = p->prot;
  65	unsigned long entry = p->entry;
  66	u64 *pglist = p->pglist;
  67	unsigned long npages = p->npages;
 
 
 
 
 
 
 
  68
  69	while (npages != 0) {
  70		long num;
  71
  72		num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
  73					  npages, prot, __pa(pglist));
  74		if (unlikely(num < 0)) {
  75			if (printk_ratelimit())
  76				printk("iommu_batch_flush: IOMMU map of "
  77				       "[%08lx:%08llx:%lx:%lx:%lx] failed with "
  78				       "status %ld\n",
  79				       devhandle, HV_PCI_TSBID(0, entry),
  80				       npages, prot, __pa(pglist), num);
  81			return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  82		}
  83
  84		entry += num;
  85		npages -= num;
  86		pglist += num;
  87	}
  88
  89	p->entry = entry;
  90	p->npages = 0;
  91
  92	return 0;
  93}
  94
  95static inline void iommu_batch_new_entry(unsigned long entry)
  96{
  97	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
  98
  99	if (p->entry + p->npages == entry)
 100		return;
 101	if (p->entry != ~0UL)
 102		iommu_batch_flush(p);
 103	p->entry = entry;
 104}
 105
 106/* Interrupts must be disabled.  */
 107static inline long iommu_batch_add(u64 phys_page)
 108{
 109	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
 110
 111	BUG_ON(p->npages >= PGLIST_NENTS);
 112
 113	p->pglist[p->npages++] = phys_page;
 114	if (p->npages == PGLIST_NENTS)
 115		return iommu_batch_flush(p);
 116
 117	return 0;
 118}
 119
 120/* Interrupts must be disabled.  */
 121static inline long iommu_batch_end(void)
 122{
 123	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
 124
 125	BUG_ON(p->npages >= PGLIST_NENTS);
 126
 127	return iommu_batch_flush(p);
 128}
 129
 130static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
 131				   dma_addr_t *dma_addrp, gfp_t gfp,
 132				   struct dma_attrs *attrs)
 133{
 
 134	unsigned long flags, order, first_page, npages, n;
 
 135	struct iommu *iommu;
 
 
 136	struct page *page;
 137	void *ret;
 138	long entry;
 139	int nid;
 140
 141	size = IO_PAGE_ALIGN(size);
 142	order = get_order(size);
 143	if (unlikely(order >= MAX_ORDER))
 144		return NULL;
 145
 146	npages = size >> IO_PAGE_SHIFT;
 147
 
 
 
 148	nid = dev->archdata.numa_node;
 149	page = alloc_pages_node(nid, gfp, order);
 150	if (unlikely(!page))
 151		return NULL;
 152
 153	first_page = (unsigned long) page_address(page);
 154	memset((char *)first_page, 0, PAGE_SIZE << order);
 155
 156	iommu = dev->archdata.iommu;
 
 
 
 
 
 
 
 157
 158	spin_lock_irqsave(&iommu->lock, flags);
 159	entry = iommu_range_alloc(dev, iommu, npages, NULL);
 160	spin_unlock_irqrestore(&iommu->lock, flags);
 161
 162	if (unlikely(entry == DMA_ERROR_CODE))
 163		goto range_alloc_fail;
 164
 165	*dma_addrp = (iommu->page_table_map_base +
 166		      (entry << IO_PAGE_SHIFT));
 167	ret = (void *) first_page;
 168	first_page = __pa(first_page);
 169
 170	local_irq_save(flags);
 171
 172	iommu_batch_start(dev,
 173			  (HV_PCI_MAP_ATTR_READ |
 174			   HV_PCI_MAP_ATTR_WRITE),
 175			  entry);
 176
 177	for (n = 0; n < npages; n++) {
 178		long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
 179		if (unlikely(err < 0L))
 180			goto iommu_map_fail;
 181	}
 182
 183	if (unlikely(iommu_batch_end() < 0L))
 184		goto iommu_map_fail;
 185
 186	local_irq_restore(flags);
 187
 188	return ret;
 189
 190iommu_map_fail:
 191	/* Interrupts are disabled.  */
 192	spin_lock(&iommu->lock);
 193	iommu_range_free(iommu, *dma_addrp, npages);
 194	spin_unlock_irqrestore(&iommu->lock, flags);
 195
 196range_alloc_fail:
 197	free_pages(first_page, order);
 198	return NULL;
 199}
 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 201static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
 202				 dma_addr_t dvma, struct dma_attrs *attrs)
 203{
 204	struct pci_pbm_info *pbm;
 205	struct iommu *iommu;
 206	unsigned long flags, order, npages, entry;
 
 
 
 207	u32 devhandle;
 208
 209	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
 210	iommu = dev->archdata.iommu;
 211	pbm = dev->archdata.host_controller;
 
 212	devhandle = pbm->devhandle;
 213	entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 214
 215	spin_lock_irqsave(&iommu->lock, flags);
 216
 217	iommu_range_free(iommu, dvma, npages);
 218
 219	do {
 220		unsigned long num;
 221
 222		num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
 223					    npages);
 224		entry += num;
 225		npages -= num;
 226	} while (npages != 0);
 227
 228	spin_unlock_irqrestore(&iommu->lock, flags);
 229
 
 
 
 
 
 
 
 
 
 
 230	order = get_order(size);
 231	if (order < 10)
 232		free_pages((unsigned long)cpu, order);
 233}
 234
 235static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
 236				  unsigned long offset, size_t sz,
 237				  enum dma_data_direction direction,
 238				  struct dma_attrs *attrs)
 239{
 240	struct iommu *iommu;
 
 
 
 241	unsigned long flags, npages, oaddr;
 242	unsigned long i, base_paddr;
 243	u32 bus_addr, ret;
 244	unsigned long prot;
 
 245	long entry;
 246
 247	iommu = dev->archdata.iommu;
 
 248
 249	if (unlikely(direction == DMA_NONE))
 250		goto bad;
 251
 252	oaddr = (unsigned long)(page_address(page) + offset);
 253	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
 254	npages >>= IO_PAGE_SHIFT;
 255
 256	spin_lock_irqsave(&iommu->lock, flags);
 257	entry = iommu_range_alloc(dev, iommu, npages, NULL);
 258	spin_unlock_irqrestore(&iommu->lock, flags);
 
 
 
 
 
 259
 260	if (unlikely(entry == DMA_ERROR_CODE))
 261		goto bad;
 262
 263	bus_addr = (iommu->page_table_map_base +
 264		    (entry << IO_PAGE_SHIFT));
 265	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
 266	base_paddr = __pa(oaddr & IO_PAGE_MASK);
 267	prot = HV_PCI_MAP_ATTR_READ;
 268	if (direction != DMA_TO_DEVICE)
 269		prot |= HV_PCI_MAP_ATTR_WRITE;
 270
 
 
 
 271	local_irq_save(flags);
 272
 273	iommu_batch_start(dev, prot, entry);
 274
 275	for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
 276		long err = iommu_batch_add(base_paddr);
 277		if (unlikely(err < 0L))
 278			goto iommu_map_fail;
 279	}
 280	if (unlikely(iommu_batch_end() < 0L))
 281		goto iommu_map_fail;
 282
 283	local_irq_restore(flags);
 284
 285	return ret;
 286
 287bad:
 288	if (printk_ratelimit())
 289		WARN_ON(1);
 290	return DMA_ERROR_CODE;
 291
 292iommu_map_fail:
 293	/* Interrupts are disabled.  */
 294	spin_lock(&iommu->lock);
 295	iommu_range_free(iommu, bus_addr, npages);
 296	spin_unlock_irqrestore(&iommu->lock, flags);
 297
 298	return DMA_ERROR_CODE;
 299}
 300
 301static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
 302			      size_t sz, enum dma_data_direction direction,
 303			      struct dma_attrs *attrs)
 304{
 305	struct pci_pbm_info *pbm;
 306	struct iommu *iommu;
 307	unsigned long flags, npages;
 
 
 
 308	long entry;
 309	u32 devhandle;
 310
 311	if (unlikely(direction == DMA_NONE)) {
 312		if (printk_ratelimit())
 313			WARN_ON(1);
 314		return;
 315	}
 316
 317	iommu = dev->archdata.iommu;
 318	pbm = dev->archdata.host_controller;
 
 319	devhandle = pbm->devhandle;
 320
 321	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
 322	npages >>= IO_PAGE_SHIFT;
 323	bus_addr &= IO_PAGE_MASK;
 324
 325	spin_lock_irqsave(&iommu->lock, flags);
 326
 327	iommu_range_free(iommu, bus_addr, npages);
 328
 329	entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
 330	do {
 331		unsigned long num;
 332
 333		num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
 334					    npages);
 335		entry += num;
 336		npages -= num;
 337	} while (npages != 0);
 338
 339	spin_unlock_irqrestore(&iommu->lock, flags);
 340}
 341
 342static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 343			 int nelems, enum dma_data_direction direction,
 344			 struct dma_attrs *attrs)
 345{
 346	struct scatterlist *s, *outs, *segstart;
 347	unsigned long flags, handle, prot;
 348	dma_addr_t dma_next = 0, dma_addr;
 349	unsigned int max_seg_size;
 350	unsigned long seg_boundary_size;
 351	int outcount, incount, i;
 352	struct iommu *iommu;
 
 
 
 353	unsigned long base_shift;
 354	long err;
 355
 356	BUG_ON(direction == DMA_NONE);
 357
 358	iommu = dev->archdata.iommu;
 359	if (nelems == 0 || !iommu)
 360		return 0;
 361	
 
 362	prot = HV_PCI_MAP_ATTR_READ;
 363	if (direction != DMA_TO_DEVICE)
 364		prot |= HV_PCI_MAP_ATTR_WRITE;
 365
 
 
 
 366	outs = s = segstart = &sglist[0];
 367	outcount = 1;
 368	incount = nelems;
 369	handle = 0;
 370
 371	/* Init first segment length for backout at failure */
 372	outs->dma_length = 0;
 373
 374	spin_lock_irqsave(&iommu->lock, flags);
 375
 376	iommu_batch_start(dev, prot, ~0UL);
 377
 378	max_seg_size = dma_get_max_seg_size(dev);
 379	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 380				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
 381	base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
 
 
 
 
 
 
 
 
 382	for_each_sg(sglist, s, nelems, i) {
 383		unsigned long paddr, npages, entry, out_entry = 0, slen;
 384
 385		slen = s->length;
 386		/* Sanity check */
 387		if (slen == 0) {
 388			dma_next = 0;
 389			continue;
 390		}
 391		/* Allocate iommu entries for that segment */
 392		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
 393		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
 394		entry = iommu_range_alloc(dev, iommu, npages, &handle);
 
 395
 396		/* Handle failure */
 397		if (unlikely(entry == DMA_ERROR_CODE)) {
 398			if (printk_ratelimit())
 399				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
 400				       " npages %lx\n", iommu, paddr, npages);
 401			goto iommu_map_failed;
 402		}
 403
 404		iommu_batch_new_entry(entry);
 405
 406		/* Convert entry to a dma_addr_t */
 407		dma_addr = iommu->page_table_map_base +
 408			(entry << IO_PAGE_SHIFT);
 409		dma_addr |= (s->offset & ~IO_PAGE_MASK);
 410
 411		/* Insert into HW table */
 412		paddr &= IO_PAGE_MASK;
 413		while (npages--) {
 414			err = iommu_batch_add(paddr);
 415			if (unlikely(err < 0L))
 416				goto iommu_map_failed;
 417			paddr += IO_PAGE_SIZE;
 418		}
 419
 420		/* If we are in an open segment, try merging */
 421		if (segstart != s) {
 422			/* We cannot merge if:
 423			 * - allocated dma_addr isn't contiguous to previous allocation
 424			 */
 425			if ((dma_addr != dma_next) ||
 426			    (outs->dma_length + s->length > max_seg_size) ||
 427			    (is_span_boundary(out_entry, base_shift,
 428					      seg_boundary_size, outs, s))) {
 429				/* Can't merge: create a new segment */
 430				segstart = s;
 431				outcount++;
 432				outs = sg_next(outs);
 433			} else {
 434				outs->dma_length += s->length;
 435			}
 436		}
 437
 438		if (segstart == s) {
 439			/* This is a new segment, fill entries */
 440			outs->dma_address = dma_addr;
 441			outs->dma_length = slen;
 442			out_entry = entry;
 443		}
 444
 445		/* Calculate next page pointer for contiguous check */
 446		dma_next = dma_addr + slen;
 447	}
 448
 449	err = iommu_batch_end();
 450
 451	if (unlikely(err < 0L))
 452		goto iommu_map_failed;
 453
 454	spin_unlock_irqrestore(&iommu->lock, flags);
 455
 456	if (outcount < incount) {
 457		outs = sg_next(outs);
 458		outs->dma_address = DMA_ERROR_CODE;
 459		outs->dma_length = 0;
 460	}
 461
 462	return outcount;
 463
 464iommu_map_failed:
 465	for_each_sg(sglist, s, nelems, i) {
 466		if (s->dma_length != 0) {
 467			unsigned long vaddr, npages;
 468
 469			vaddr = s->dma_address & IO_PAGE_MASK;
 470			npages = iommu_num_pages(s->dma_address, s->dma_length,
 471						 IO_PAGE_SIZE);
 472			iommu_range_free(iommu, vaddr, npages);
 
 473			/* XXX demap? XXX */
 474			s->dma_address = DMA_ERROR_CODE;
 475			s->dma_length = 0;
 476		}
 477		if (s == outs)
 478			break;
 479	}
 480	spin_unlock_irqrestore(&iommu->lock, flags);
 481
 482	return 0;
 483}
 484
 485static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
 486			    int nelems, enum dma_data_direction direction,
 487			    struct dma_attrs *attrs)
 488{
 489	struct pci_pbm_info *pbm;
 490	struct scatterlist *sg;
 491	struct iommu *iommu;
 492	unsigned long flags;
 
 
 493	u32 devhandle;
 494
 495	BUG_ON(direction == DMA_NONE);
 496
 497	iommu = dev->archdata.iommu;
 498	pbm = dev->archdata.host_controller;
 
 499	devhandle = pbm->devhandle;
 500	
 501	spin_lock_irqsave(&iommu->lock, flags);
 502
 503	sg = sglist;
 504	while (nelems--) {
 505		dma_addr_t dma_handle = sg->dma_address;
 506		unsigned int len = sg->dma_length;
 507		unsigned long npages, entry;
 
 
 508
 509		if (!len)
 510			break;
 511		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
 512		iommu_range_free(iommu, dma_handle, npages);
 513
 514		entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 515		while (npages) {
 516			unsigned long num;
 
 
 
 
 
 
 
 
 
 
 
 517
 518			num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
 519						    npages);
 520			entry += num;
 521			npages -= num;
 522		}
 
 
 523
 524		sg = sg_next(sg);
 
 
 
 
 525	}
 526
 527	spin_unlock_irqrestore(&iommu->lock, flags);
 
 
 
 
 
 
 
 528}
 529
 530static struct dma_map_ops sun4v_dma_ops = {
 531	.alloc				= dma_4v_alloc_coherent,
 532	.free				= dma_4v_free_coherent,
 533	.map_page			= dma_4v_map_page,
 534	.unmap_page			= dma_4v_unmap_page,
 535	.map_sg				= dma_4v_map_sg,
 536	.unmap_sg			= dma_4v_unmap_sg,
 
 
 537};
 538
 539static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
 540					 struct device *parent)
 541{
 542	struct property *prop;
 543	struct device_node *dp;
 544
 545	dp = pbm->op->dev.of_node;
 546	prop = of_find_property(dp, "66mhz-capable", NULL);
 547	pbm->is_66mhz_capable = (prop != NULL);
 548	pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
 549
 550	/* XXX register error interrupt handlers XXX */
 551}
 552
 553static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm,
 554						      struct iommu *iommu)
 555{
 556	struct iommu_arena *arena = &iommu->arena;
 557	unsigned long i, cnt = 0;
 558	u32 devhandle;
 559
 560	devhandle = pbm->devhandle;
 561	for (i = 0; i < arena->limit; i++) {
 562		unsigned long ret, io_attrs, ra;
 563
 564		ret = pci_sun4v_iommu_getmap(devhandle,
 565					     HV_PCI_TSBID(0, i),
 566					     &io_attrs, &ra);
 567		if (ret == HV_EOK) {
 568			if (page_in_phys_avail(ra)) {
 569				pci_sun4v_iommu_demap(devhandle,
 570						      HV_PCI_TSBID(0, i), 1);
 571			} else {
 572				cnt++;
 573				__set_bit(i, arena->map);
 
 
 
 
 574			}
 575		}
 576	}
 
 
 
 
 
 
 
 
 
 
 
 
 577
 578	return cnt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 579}
 580
 581static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
 582{
 583	static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
 584	struct iommu *iommu = pbm->iommu;
 585	unsigned long num_tsb_entries, sz;
 586	u32 dma_mask, dma_offset;
 587	const u32 *vdma;
 588
 589	vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
 590	if (!vdma)
 591		vdma = vdma_default;
 592
 593	if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
 594		printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
 595		       vdma[0], vdma[1]);
 596		return -EINVAL;
 597	};
 598
 599	dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
 600	num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
 601
 602	dma_offset = vdma[0];
 603
 604	/* Setup initial software IOMMU state. */
 605	spin_lock_init(&iommu->lock);
 606	iommu->ctx_lowest_free = 1;
 607	iommu->page_table_map_base = dma_offset;
 608	iommu->dma_addr_mask = dma_mask;
 609
 610	/* Allocate and initialize the free area map.  */
 611	sz = (num_tsb_entries + 7) / 8;
 612	sz = (sz + 7UL) & ~7UL;
 613	iommu->arena.map = kzalloc(sz, GFP_KERNEL);
 614	if (!iommu->arena.map) {
 615		printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
 616		return -ENOMEM;
 617	}
 618	iommu->arena.limit = num_tsb_entries;
 619
 620	sz = probe_existing_entries(pbm, iommu);
 
 
 621	if (sz)
 622		printk("%s: Imported %lu TSB entries from OBP\n",
 623		       pbm->name, sz);
 624
 625	return 0;
 626}
 627
 628#ifdef CONFIG_PCI_MSI
 629struct pci_sun4v_msiq_entry {
 630	u64		version_type;
 631#define MSIQ_VERSION_MASK		0xffffffff00000000UL
 632#define MSIQ_VERSION_SHIFT		32
 633#define MSIQ_TYPE_MASK			0x00000000000000ffUL
 634#define MSIQ_TYPE_SHIFT			0
 635#define MSIQ_TYPE_NONE			0x00
 636#define MSIQ_TYPE_MSG			0x01
 637#define MSIQ_TYPE_MSI32			0x02
 638#define MSIQ_TYPE_MSI64			0x03
 639#define MSIQ_TYPE_INTX			0x08
 640#define MSIQ_TYPE_NONE2			0xff
 641
 642	u64		intx_sysino;
 643	u64		reserved1;
 644	u64		stick;
 645	u64		req_id;  /* bus/device/func */
 646#define MSIQ_REQID_BUS_MASK		0xff00UL
 647#define MSIQ_REQID_BUS_SHIFT		8
 648#define MSIQ_REQID_DEVICE_MASK		0x00f8UL
 649#define MSIQ_REQID_DEVICE_SHIFT		3
 650#define MSIQ_REQID_FUNC_MASK		0x0007UL
 651#define MSIQ_REQID_FUNC_SHIFT		0
 652
 653	u64		msi_address;
 654
 655	/* The format of this value is message type dependent.
 656	 * For MSI bits 15:0 are the data from the MSI packet.
 657	 * For MSI-X bits 31:0 are the data from the MSI packet.
 658	 * For MSG, the message code and message routing code where:
 659	 * 	bits 39:32 is the bus/device/fn of the msg target-id
 660	 *	bits 18:16 is the message routing code
 661	 *	bits 7:0 is the message code
 662	 * For INTx the low order 2-bits are:
 663	 *	00 - INTA
 664	 *	01 - INTB
 665	 *	10 - INTC
 666	 *	11 - INTD
 667	 */
 668	u64		msi_data;
 669
 670	u64		reserved2;
 671};
 672
 673static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
 674			      unsigned long *head)
 675{
 676	unsigned long err, limit;
 677
 678	err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
 679	if (unlikely(err))
 680		return -ENXIO;
 681
 682	limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 683	if (unlikely(*head >= limit))
 684		return -EFBIG;
 685
 686	return 0;
 687}
 688
 689static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
 690				 unsigned long msiqid, unsigned long *head,
 691				 unsigned long *msi)
 692{
 693	struct pci_sun4v_msiq_entry *ep;
 694	unsigned long err, type;
 695
 696	/* Note: void pointer arithmetic, 'head' is a byte offset  */
 697	ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
 698				 (pbm->msiq_ent_count *
 699				  sizeof(struct pci_sun4v_msiq_entry))) +
 700	      *head);
 701
 702	if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
 703		return 0;
 704
 705	type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
 706	if (unlikely(type != MSIQ_TYPE_MSI32 &&
 707		     type != MSIQ_TYPE_MSI64))
 708		return -EINVAL;
 709
 710	*msi = ep->msi_data;
 711
 712	err = pci_sun4v_msi_setstate(pbm->devhandle,
 713				     ep->msi_data /* msi_num */,
 714				     HV_MSISTATE_IDLE);
 715	if (unlikely(err))
 716		return -ENXIO;
 717
 718	/* Clear the entry.  */
 719	ep->version_type &= ~MSIQ_TYPE_MASK;
 720
 721	(*head) += sizeof(struct pci_sun4v_msiq_entry);
 722	if (*head >=
 723	    (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
 724		*head = 0;
 725
 726	return 1;
 727}
 728
 729static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
 730			      unsigned long head)
 731{
 732	unsigned long err;
 733
 734	err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
 735	if (unlikely(err))
 736		return -EINVAL;
 737
 738	return 0;
 739}
 740
 741static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
 742			       unsigned long msi, int is_msi64)
 743{
 744	if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
 745				  (is_msi64 ?
 746				   HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
 747		return -ENXIO;
 748	if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
 749		return -ENXIO;
 750	if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
 751		return -ENXIO;
 752	return 0;
 753}
 754
 755static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
 756{
 757	unsigned long err, msiqid;
 758
 759	err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
 760	if (err)
 761		return -ENXIO;
 762
 763	pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
 764
 765	return 0;
 766}
 767
 768static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
 769{
 770	unsigned long q_size, alloc_size, pages, order;
 771	int i;
 772
 773	q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 774	alloc_size = (pbm->msiq_num * q_size);
 775	order = get_order(alloc_size);
 776	pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
 777	if (pages == 0UL) {
 778		printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
 779		       order);
 780		return -ENOMEM;
 781	}
 782	memset((char *)pages, 0, PAGE_SIZE << order);
 783	pbm->msi_queues = (void *) pages;
 784
 785	for (i = 0; i < pbm->msiq_num; i++) {
 786		unsigned long err, base = __pa(pages + (i * q_size));
 787		unsigned long ret1, ret2;
 788
 789		err = pci_sun4v_msiq_conf(pbm->devhandle,
 790					  pbm->msiq_first + i,
 791					  base, pbm->msiq_ent_count);
 792		if (err) {
 793			printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
 794			       err);
 795			goto h_error;
 796		}
 797
 798		err = pci_sun4v_msiq_info(pbm->devhandle,
 799					  pbm->msiq_first + i,
 800					  &ret1, &ret2);
 801		if (err) {
 802			printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
 803			       err);
 804			goto h_error;
 805		}
 806		if (ret1 != base || ret2 != pbm->msiq_ent_count) {
 807			printk(KERN_ERR "MSI: Bogus qconf "
 808			       "expected[%lx:%x] got[%lx:%lx]\n",
 809			       base, pbm->msiq_ent_count,
 810			       ret1, ret2);
 811			goto h_error;
 812		}
 813	}
 814
 815	return 0;
 816
 817h_error:
 818	free_pages(pages, order);
 819	return -EINVAL;
 820}
 821
 822static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
 823{
 824	unsigned long q_size, alloc_size, pages, order;
 825	int i;
 826
 827	for (i = 0; i < pbm->msiq_num; i++) {
 828		unsigned long msiqid = pbm->msiq_first + i;
 829
 830		(void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
 831	}
 832
 833	q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 834	alloc_size = (pbm->msiq_num * q_size);
 835	order = get_order(alloc_size);
 836
 837	pages = (unsigned long) pbm->msi_queues;
 838
 839	free_pages(pages, order);
 840
 841	pbm->msi_queues = NULL;
 842}
 843
 844static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
 845				    unsigned long msiqid,
 846				    unsigned long devino)
 847{
 848	unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
 849
 850	if (!irq)
 851		return -ENOMEM;
 852
 853	if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
 854		return -EINVAL;
 855	if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
 856		return -EINVAL;
 857
 858	return irq;
 859}
 860
 861static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
 862	.get_head	=	pci_sun4v_get_head,
 863	.dequeue_msi	=	pci_sun4v_dequeue_msi,
 864	.set_head	=	pci_sun4v_set_head,
 865	.msi_setup	=	pci_sun4v_msi_setup,
 866	.msi_teardown	=	pci_sun4v_msi_teardown,
 867	.msiq_alloc	=	pci_sun4v_msiq_alloc,
 868	.msiq_free	=	pci_sun4v_msiq_free,
 869	.msiq_build_irq	=	pci_sun4v_msiq_build_irq,
 870};
 871
 872static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
 873{
 874	sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
 875}
 876#else /* CONFIG_PCI_MSI */
 877static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
 878{
 879}
 880#endif /* !(CONFIG_PCI_MSI) */
 881
 882static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
 883					struct platform_device *op, u32 devhandle)
 884{
 885	struct device_node *dp = op->dev.of_node;
 886	int err;
 887
 888	pbm->numa_node = of_node_to_nid(dp);
 889
 890	pbm->pci_ops = &sun4v_pci_ops;
 891	pbm->config_space_reg_bits = 12;
 892
 893	pbm->index = pci_num_pbms++;
 894
 895	pbm->op = op;
 896
 897	pbm->devhandle = devhandle;
 898
 899	pbm->name = dp->full_name;
 900
 901	printk("%s: SUN4V PCI Bus Module\n", pbm->name);
 902	printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
 903
 904	pci_determine_mem_io_space(pbm);
 905
 906	pci_get_pbm_props(pbm);
 907
 908	err = pci_sun4v_iommu_init(pbm);
 909	if (err)
 910		return err;
 911
 912	pci_sun4v_msi_init(pbm);
 913
 914	pci_sun4v_scan_bus(pbm, &op->dev);
 915
 
 
 
 
 
 
 
 
 
 
 
 
 916	pbm->next = pci_pbm_root;
 917	pci_pbm_root = pbm;
 918
 919	return 0;
 920}
 921
 922static int __devinit pci_sun4v_probe(struct platform_device *op)
 923{
 924	const struct linux_prom64_registers *regs;
 925	static int hvapi_negotiated = 0;
 926	struct pci_pbm_info *pbm;
 927	struct device_node *dp;
 928	struct iommu *iommu;
 
 929	u32 devhandle;
 930	int i, err;
 
 931
 932	dp = op->dev.of_node;
 933
 934	if (!hvapi_negotiated++) {
 935		err = sun4v_hvapi_register(HV_GRP_PCI,
 936					   vpci_major,
 937					   &vpci_minor);
 
 
 
 
 
 
 938
 939		if (err) {
 940			printk(KERN_ERR PFX "Could not register hvapi, "
 941			       "err=%d\n", err);
 942			return err;
 943		}
 944		printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
 945		       vpci_major, vpci_minor);
 
 
 
 
 
 
 
 
 
 
 
 946
 947		dma_ops = &sun4v_dma_ops;
 948	}
 949
 950	regs = of_get_property(dp, "reg", NULL);
 951	err = -ENODEV;
 952	if (!regs) {
 953		printk(KERN_ERR PFX "Could not find config registers\n");
 954		goto out_err;
 955	}
 956	devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
 957
 958	err = -ENOMEM;
 959	if (!iommu_batch_initialized) {
 960		for_each_possible_cpu(i) {
 961			unsigned long page = get_zeroed_page(GFP_KERNEL);
 962
 963			if (!page)
 964				goto out_err;
 965
 966			per_cpu(iommu_batch, i).pglist = (u64 *) page;
 967		}
 968		iommu_batch_initialized = 1;
 969	}
 970
 971	pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
 972	if (!pbm) {
 973		printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
 974		goto out_err;
 975	}
 976
 977	iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
 978	if (!iommu) {
 979		printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
 980		goto out_free_controller;
 981	}
 982
 983	pbm->iommu = iommu;
 
 
 
 
 
 
 
 
 984
 985	err = pci_sun4v_pbm_init(pbm, op, devhandle);
 986	if (err)
 987		goto out_free_iommu;
 988
 989	dev_set_drvdata(&op->dev, pbm);
 990
 991	return 0;
 992
 993out_free_iommu:
 
 994	kfree(pbm->iommu);
 995
 996out_free_controller:
 997	kfree(pbm);
 998
 999out_err:
1000	return err;
1001}
1002
1003static const struct of_device_id pci_sun4v_match[] = {
1004	{
1005		.name = "pci",
1006		.compatible = "SUNW,sun4v-pci",
1007	},
1008	{},
1009};
1010
1011static struct platform_driver pci_sun4v_driver = {
1012	.driver = {
1013		.name = DRIVER_NAME,
1014		.owner = THIS_MODULE,
1015		.of_match_table = pci_sun4v_match,
1016	},
1017	.probe		= pci_sun4v_probe,
1018};
1019
1020static int __init pci_sun4v_init(void)
1021{
1022	return platform_driver_register(&pci_sun4v_driver);
1023}
1024
1025subsys_initcall(pci_sun4v_init);