Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* pci_sun4v.c: SUN4V specific PCI controller support.
   3 *
   4 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/types.h>
   9#include <linux/pci.h>
  10#include <linux/init.h>
  11#include <linux/slab.h>
  12#include <linux/interrupt.h>
  13#include <linux/percpu.h>
  14#include <linux/irq.h>
  15#include <linux/msi.h>
  16#include <linux/export.h>
  17#include <linux/log2.h>
  18#include <linux/of.h>
  19#include <linux/platform_device.h>
  20#include <linux/dma-map-ops.h>
  21#include <asm/iommu-common.h>
  22
  23#include <asm/iommu.h>
  24#include <asm/irq.h>
  25#include <asm/hypervisor.h>
  26#include <asm/prom.h>
  27
  28#include "pci_impl.h"
  29#include "iommu_common.h"
  30#include "kernel.h"
  31
  32#include "pci_sun4v.h"
  33
  34#define DRIVER_NAME	"pci_sun4v"
  35#define PFX		DRIVER_NAME ": "
  36
  37static unsigned long vpci_major;
  38static unsigned long vpci_minor;
  39
  40struct vpci_version {
  41	unsigned long major;
  42	unsigned long minor;
  43};
  44
  45/* Ordered from largest major to lowest */
  46static struct vpci_version vpci_versions[] = {
  47	{ .major = 2, .minor = 0 },
  48	{ .major = 1, .minor = 1 },
  49};
  50
  51static unsigned long vatu_major = 1;
  52static unsigned long vatu_minor = 1;
  53
  54#define PGLIST_NENTS	(PAGE_SIZE / sizeof(u64))
  55
  56struct iommu_batch {
  57	struct device	*dev;		/* Device mapping is for.	*/
  58	unsigned long	prot;		/* IOMMU page protections	*/
  59	unsigned long	entry;		/* Index into IOTSB.		*/
  60	u64		*pglist;	/* List of physical pages	*/
  61	unsigned long	npages;		/* Number of pages in list.	*/
  62};
  63
  64static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
  65static int iommu_batch_initialized;
  66
  67/* Interrupts must be disabled.  */
  68static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
  69{
  70	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
  71
  72	p->dev		= dev;
  73	p->prot		= prot;
  74	p->entry	= entry;
  75	p->npages	= 0;
  76}
  77
  78static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
  79{
  80	return iommu->atu && mask > DMA_BIT_MASK(32);
  81}
  82
  83/* Interrupts must be disabled.  */
  84static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
  85{
  86	struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
  87	u64 *pglist = p->pglist;
  88	u64 index_count;
  89	unsigned long devhandle = pbm->devhandle;
  90	unsigned long prot = p->prot;
  91	unsigned long entry = p->entry;
 
  92	unsigned long npages = p->npages;
  93	unsigned long iotsb_num;
  94	unsigned long ret;
  95	long num;
  96
  97	/* VPCI maj=1, min=[0,1] only supports read and write */
  98	if (vpci_major < 2)
  99		prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
 100
 101	while (npages != 0) {
 102		if (!iommu_use_atu(pbm->iommu, mask)) {
 103			num = pci_sun4v_iommu_map(devhandle,
 104						  HV_PCI_TSBID(0, entry),
 105						  npages,
 106						  prot,
 107						  __pa(pglist));
 108			if (unlikely(num < 0)) {
 109				pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
 110						   __func__,
 111						   devhandle,
 112						   HV_PCI_TSBID(0, entry),
 113						   npages, prot, __pa(pglist),
 114						   num);
 115				return -1;
 116			}
 117		} else {
 118			index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
 119			iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
 120			ret = pci_sun4v_iotsb_map(devhandle,
 121						  iotsb_num,
 122						  index_count,
 123						  prot,
 124						  __pa(pglist),
 125						  &num);
 126			if (unlikely(ret != HV_EOK)) {
 127				pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
 128						   __func__,
 129						   devhandle, iotsb_num,
 130						   index_count, prot,
 131						   __pa(pglist), ret);
 132				return -1;
 133			}
 134		}
 
 135		entry += num;
 136		npages -= num;
 137		pglist += num;
 138	}
 139
 140	p->entry = entry;
 141	p->npages = 0;
 142
 143	return 0;
 144}
 145
 146static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
 147{
 148	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 149
 150	if (p->entry + p->npages == entry)
 151		return;
 152	if (p->entry != ~0UL)
 153		iommu_batch_flush(p, mask);
 154	p->entry = entry;
 155}
 156
 157/* Interrupts must be disabled.  */
 158static inline long iommu_batch_add(u64 phys_page, u64 mask)
 159{
 160	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 161
 162	BUG_ON(p->npages >= PGLIST_NENTS);
 163
 164	p->pglist[p->npages++] = phys_page;
 165	if (p->npages == PGLIST_NENTS)
 166		return iommu_batch_flush(p, mask);
 167
 168	return 0;
 169}
 170
 171/* Interrupts must be disabled.  */
 172static inline long iommu_batch_end(u64 mask)
 173{
 174	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 175
 176	BUG_ON(p->npages >= PGLIST_NENTS);
 177
 178	return iommu_batch_flush(p, mask);
 179}
 180
 181static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
 182				   dma_addr_t *dma_addrp, gfp_t gfp,
 183				   unsigned long attrs)
 184{
 185	u64 mask;
 186	unsigned long flags, order, first_page, npages, n;
 187	unsigned long prot = 0;
 188	struct iommu *iommu;
 189	struct iommu_map_table *tbl;
 190	struct page *page;
 191	void *ret;
 192	long entry;
 193	int nid;
 194
 195	size = IO_PAGE_ALIGN(size);
 196	order = get_order(size);
 197	if (unlikely(order > MAX_PAGE_ORDER))
 198		return NULL;
 199
 200	npages = size >> IO_PAGE_SHIFT;
 201
 202	if (attrs & DMA_ATTR_WEAK_ORDERING)
 203		prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
 204
 205	nid = dev->archdata.numa_node;
 206	page = alloc_pages_node(nid, gfp, order);
 207	if (unlikely(!page))
 208		return NULL;
 209
 210	first_page = (unsigned long) page_address(page);
 211	memset((char *)first_page, 0, PAGE_SIZE << order);
 212
 213	iommu = dev->archdata.iommu;
 214	mask = dev->coherent_dma_mask;
 215	if (!iommu_use_atu(iommu, mask))
 216		tbl = &iommu->tbl;
 217	else
 218		tbl = &iommu->atu->tbl;
 219
 220	entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
 221				      (unsigned long)(-1), 0);
 
 222
 223	if (unlikely(entry == IOMMU_ERROR_CODE))
 224		goto range_alloc_fail;
 225
 226	*dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
 
 227	ret = (void *) first_page;
 228	first_page = __pa(first_page);
 229
 230	local_irq_save(flags);
 231
 232	iommu_batch_start(dev,
 233			  (HV_PCI_MAP_ATTR_READ | prot |
 234			   HV_PCI_MAP_ATTR_WRITE),
 235			  entry);
 236
 237	for (n = 0; n < npages; n++) {
 238		long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
 239		if (unlikely(err < 0L))
 240			goto iommu_map_fail;
 241	}
 242
 243	if (unlikely(iommu_batch_end(mask) < 0L))
 244		goto iommu_map_fail;
 245
 246	local_irq_restore(flags);
 247
 248	return ret;
 249
 250iommu_map_fail:
 251	local_irq_restore(flags);
 252	iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
 
 
 253
 254range_alloc_fail:
 255	free_pages(first_page, order);
 256	return NULL;
 257}
 258
 259static unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
 260				       unsigned long iotsb_num,
 261				       struct pci_bus *bus_dev)
 262{
 263	struct pci_dev *pdev;
 264	unsigned long err;
 265	unsigned int bus;
 266	unsigned int device;
 267	unsigned int fun;
 268
 269	list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
 270		if (pdev->subordinate) {
 271			/* No need to bind pci bridge */
 272			dma_4v_iotsb_bind(devhandle, iotsb_num,
 273					  pdev->subordinate);
 274		} else {
 275			bus = bus_dev->number;
 276			device = PCI_SLOT(pdev->devfn);
 277			fun = PCI_FUNC(pdev->devfn);
 278			err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
 279						   HV_PCI_DEVICE_BUILD(bus,
 280								       device,
 281								       fun));
 282
 283			/* If bind fails for one device it is going to fail
 284			 * for rest of the devices because we are sharing
 285			 * IOTSB. So in case of failure simply return with
 286			 * error.
 287			 */
 288			if (err)
 289				return err;
 290		}
 291	}
 292
 293	return 0;
 294}
 295
 296static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
 297			       dma_addr_t dvma, unsigned long iotsb_num,
 298			       unsigned long entry, unsigned long npages)
 299{
 300	unsigned long num, flags;
 301	unsigned long ret;
 302
 303	local_irq_save(flags);
 304	do {
 305		if (dvma <= DMA_BIT_MASK(32)) {
 306			num = pci_sun4v_iommu_demap(devhandle,
 307						    HV_PCI_TSBID(0, entry),
 308						    npages);
 309		} else {
 310			ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
 311						    entry, npages, &num);
 312			if (unlikely(ret != HV_EOK)) {
 313				pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
 314						   ret);
 315			}
 316		}
 317		entry += num;
 318		npages -= num;
 319	} while (npages != 0);
 320	local_irq_restore(flags);
 321}
 322
 323static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
 324				 dma_addr_t dvma, unsigned long attrs)
 325{
 326	struct pci_pbm_info *pbm;
 327	struct iommu *iommu;
 328	struct atu *atu;
 329	struct iommu_map_table *tbl;
 330	unsigned long order, npages, entry;
 331	unsigned long iotsb_num;
 332	u32 devhandle;
 333
 334	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
 335	iommu = dev->archdata.iommu;
 336	pbm = dev->archdata.host_controller;
 337	atu = iommu->atu;
 338	devhandle = pbm->devhandle;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 339
 340	if (!iommu_use_atu(iommu, dvma)) {
 341		tbl = &iommu->tbl;
 342		iotsb_num = 0; /* we don't care for legacy iommu */
 343	} else {
 344		tbl = &atu->tbl;
 345		iotsb_num = atu->iotsb->iotsb_num;
 346	}
 347	entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
 348	dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
 349	iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
 350	order = get_order(size);
 351	if (order < 10)
 352		free_pages((unsigned long)cpu, order);
 353}
 354
 355static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
 356				  unsigned long offset, size_t sz,
 357				  enum dma_data_direction direction,
 358				  unsigned long attrs)
 359{
 360	struct iommu *iommu;
 361	struct atu *atu;
 362	struct iommu_map_table *tbl;
 363	u64 mask;
 364	unsigned long flags, npages, oaddr;
 365	unsigned long i, base_paddr;
 
 366	unsigned long prot;
 367	dma_addr_t bus_addr, ret;
 368	long entry;
 369
 370	iommu = dev->archdata.iommu;
 371	atu = iommu->atu;
 372
 373	if (unlikely(direction == DMA_NONE))
 374		goto bad;
 375
 376	oaddr = (unsigned long)(page_address(page) + offset);
 377	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
 378	npages >>= IO_PAGE_SHIFT;
 379
 380	mask = *dev->dma_mask;
 381	if (!iommu_use_atu(iommu, mask))
 382		tbl = &iommu->tbl;
 383	else
 384		tbl = &atu->tbl;
 385
 386	entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
 387				      (unsigned long)(-1), 0);
 388
 389	if (unlikely(entry == IOMMU_ERROR_CODE))
 390		goto bad;
 391
 392	bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
 
 393	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
 394	base_paddr = __pa(oaddr & IO_PAGE_MASK);
 395	prot = HV_PCI_MAP_ATTR_READ;
 396	if (direction != DMA_TO_DEVICE)
 397		prot |= HV_PCI_MAP_ATTR_WRITE;
 398
 399	if (attrs & DMA_ATTR_WEAK_ORDERING)
 400		prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
 401
 402	local_irq_save(flags);
 403
 404	iommu_batch_start(dev, prot, entry);
 405
 406	for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
 407		long err = iommu_batch_add(base_paddr, mask);
 408		if (unlikely(err < 0L))
 409			goto iommu_map_fail;
 410	}
 411	if (unlikely(iommu_batch_end(mask) < 0L))
 412		goto iommu_map_fail;
 413
 414	local_irq_restore(flags);
 415
 416	return ret;
 417
 418bad:
 419	if (printk_ratelimit())
 420		WARN_ON(1);
 421	return DMA_MAPPING_ERROR;
 422
 423iommu_map_fail:
 424	local_irq_restore(flags);
 425	iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
 426	return DMA_MAPPING_ERROR;
 
 
 
 427}
 428
 429static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
 430			      size_t sz, enum dma_data_direction direction,
 431			      unsigned long attrs)
 432{
 433	struct pci_pbm_info *pbm;
 434	struct iommu *iommu;
 435	struct atu *atu;
 436	struct iommu_map_table *tbl;
 437	unsigned long npages;
 438	unsigned long iotsb_num;
 439	long entry;
 440	u32 devhandle;
 441
 442	if (unlikely(direction == DMA_NONE)) {
 443		if (printk_ratelimit())
 444			WARN_ON(1);
 445		return;
 446	}
 447
 448	iommu = dev->archdata.iommu;
 449	pbm = dev->archdata.host_controller;
 450	atu = iommu->atu;
 451	devhandle = pbm->devhandle;
 452
 453	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
 454	npages >>= IO_PAGE_SHIFT;
 455	bus_addr &= IO_PAGE_MASK;
 456
 457	if (bus_addr <= DMA_BIT_MASK(32)) {
 458		iotsb_num = 0; /* we don't care for legacy iommu */
 459		tbl = &iommu->tbl;
 460	} else {
 461		iotsb_num = atu->iotsb->iotsb_num;
 462		tbl = &atu->tbl;
 463	}
 464	entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
 465	dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
 466	iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
 
 
 
 
 
 467}
 468
 469static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 470			 int nelems, enum dma_data_direction direction,
 471			 unsigned long attrs)
 472{
 473	struct scatterlist *s, *outs, *segstart;
 474	unsigned long flags, handle, prot;
 475	dma_addr_t dma_next = 0, dma_addr;
 476	unsigned int max_seg_size;
 477	unsigned long seg_boundary_size;
 478	int outcount, incount, i;
 479	struct iommu *iommu;
 480	struct atu *atu;
 481	struct iommu_map_table *tbl;
 482	u64 mask;
 483	unsigned long base_shift;
 484	long err;
 485
 486	BUG_ON(direction == DMA_NONE);
 487
 488	iommu = dev->archdata.iommu;
 489	if (nelems == 0 || !iommu)
 490		return -EINVAL;
 491	atu = iommu->atu;
 492
 493	prot = HV_PCI_MAP_ATTR_READ;
 494	if (direction != DMA_TO_DEVICE)
 495		prot |= HV_PCI_MAP_ATTR_WRITE;
 496
 497	if (attrs & DMA_ATTR_WEAK_ORDERING)
 498		prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
 499
 500	outs = s = segstart = &sglist[0];
 501	outcount = 1;
 502	incount = nelems;
 503	handle = 0;
 504
 505	/* Init first segment length for backout at failure */
 506	outs->dma_length = 0;
 507
 508	local_irq_save(flags);
 509
 510	iommu_batch_start(dev, prot, ~0UL);
 511
 512	max_seg_size = dma_get_max_seg_size(dev);
 513	seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
 514
 515	mask = *dev->dma_mask;
 516	if (!iommu_use_atu(iommu, mask))
 517		tbl = &iommu->tbl;
 518	else
 519		tbl = &atu->tbl;
 520
 521	base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
 522
 523	for_each_sg(sglist, s, nelems, i) {
 524		unsigned long paddr, npages, entry, out_entry = 0, slen;
 525
 526		slen = s->length;
 527		/* Sanity check */
 528		if (slen == 0) {
 529			dma_next = 0;
 530			continue;
 531		}
 532		/* Allocate iommu entries for that segment */
 533		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
 534		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
 535		entry = iommu_tbl_range_alloc(dev, tbl, npages,
 536					      &handle, (unsigned long)(-1), 0);
 537
 538		/* Handle failure */
 539		if (unlikely(entry == IOMMU_ERROR_CODE)) {
 540			pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
 541					   tbl, paddr, npages);
 
 542			goto iommu_map_failed;
 543		}
 544
 545		iommu_batch_new_entry(entry, mask);
 546
 547		/* Convert entry to a dma_addr_t */
 548		dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
 
 549		dma_addr |= (s->offset & ~IO_PAGE_MASK);
 550
 551		/* Insert into HW table */
 552		paddr &= IO_PAGE_MASK;
 553		while (npages--) {
 554			err = iommu_batch_add(paddr, mask);
 555			if (unlikely(err < 0L))
 556				goto iommu_map_failed;
 557			paddr += IO_PAGE_SIZE;
 558		}
 559
 560		/* If we are in an open segment, try merging */
 561		if (segstart != s) {
 562			/* We cannot merge if:
 563			 * - allocated dma_addr isn't contiguous to previous allocation
 564			 */
 565			if ((dma_addr != dma_next) ||
 566			    (outs->dma_length + s->length > max_seg_size) ||
 567			    (is_span_boundary(out_entry, base_shift,
 568					      seg_boundary_size, outs, s))) {
 569				/* Can't merge: create a new segment */
 570				segstart = s;
 571				outcount++;
 572				outs = sg_next(outs);
 573			} else {
 574				outs->dma_length += s->length;
 575			}
 576		}
 577
 578		if (segstart == s) {
 579			/* This is a new segment, fill entries */
 580			outs->dma_address = dma_addr;
 581			outs->dma_length = slen;
 582			out_entry = entry;
 583		}
 584
 585		/* Calculate next page pointer for contiguous check */
 586		dma_next = dma_addr + slen;
 587	}
 588
 589	err = iommu_batch_end(mask);
 590
 591	if (unlikely(err < 0L))
 592		goto iommu_map_failed;
 593
 594	local_irq_restore(flags);
 595
 596	if (outcount < incount) {
 597		outs = sg_next(outs);
 
 598		outs->dma_length = 0;
 599	}
 600
 601	return outcount;
 602
 603iommu_map_failed:
 604	for_each_sg(sglist, s, nelems, i) {
 605		if (s->dma_length != 0) {
 606			unsigned long vaddr, npages;
 607
 608			vaddr = s->dma_address & IO_PAGE_MASK;
 609			npages = iommu_num_pages(s->dma_address, s->dma_length,
 610						 IO_PAGE_SIZE);
 611			iommu_tbl_range_free(tbl, vaddr, npages,
 612					     IOMMU_ERROR_CODE);
 613			/* XXX demap? XXX */
 
 614			s->dma_length = 0;
 615		}
 616		if (s == outs)
 617			break;
 618	}
 619	local_irq_restore(flags);
 620
 621	return -EINVAL;
 622}
 623
 624static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
 625			    int nelems, enum dma_data_direction direction,
 626			    unsigned long attrs)
 627{
 628	struct pci_pbm_info *pbm;
 629	struct scatterlist *sg;
 630	struct iommu *iommu;
 631	struct atu *atu;
 632	unsigned long flags, entry;
 633	unsigned long iotsb_num;
 634	u32 devhandle;
 635
 636	BUG_ON(direction == DMA_NONE);
 637
 638	iommu = dev->archdata.iommu;
 639	pbm = dev->archdata.host_controller;
 640	atu = iommu->atu;
 641	devhandle = pbm->devhandle;
 642	
 643	local_irq_save(flags);
 644
 645	sg = sglist;
 646	while (nelems--) {
 647		dma_addr_t dma_handle = sg->dma_address;
 648		unsigned int len = sg->dma_length;
 649		unsigned long npages;
 650		struct iommu_map_table *tbl;
 651		unsigned long shift = IO_PAGE_SHIFT;
 652
 653		if (!len)
 654			break;
 655		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
 
 
 
 
 
 656
 657		if (dma_handle <= DMA_BIT_MASK(32)) {
 658			iotsb_num = 0; /* we don't care for legacy iommu */
 659			tbl = &iommu->tbl;
 660		} else {
 661			iotsb_num = atu->iotsb->iotsb_num;
 662			tbl = &atu->tbl;
 663		}
 664		entry = ((dma_handle - tbl->table_map_base) >> shift);
 665		dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
 666				   entry, npages);
 667		iommu_tbl_range_free(tbl, dma_handle, npages,
 668				     IOMMU_ERROR_CODE);
 669		sg = sg_next(sg);
 670	}
 671
 672	local_irq_restore(flags);
 673}
 674
 675static int dma_4v_supported(struct device *dev, u64 device_mask)
 676{
 677	struct iommu *iommu = dev->archdata.iommu;
 678
 679	if (ali_sound_dma_hack(dev, device_mask))
 680		return 1;
 681	if (device_mask < iommu->dma_addr_mask)
 682		return 0;
 683	return 1;
 684}
 685
 686static const struct dma_map_ops sun4v_dma_ops = {
 687	.alloc				= dma_4v_alloc_coherent,
 688	.free				= dma_4v_free_coherent,
 689	.map_page			= dma_4v_map_page,
 690	.unmap_page			= dma_4v_unmap_page,
 691	.map_sg				= dma_4v_map_sg,
 692	.unmap_sg			= dma_4v_unmap_sg,
 693	.dma_supported			= dma_4v_supported,
 694};
 695
 696static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
 
 697{
 698	struct property *prop;
 699	struct device_node *dp;
 700
 701	dp = pbm->op->dev.of_node;
 702	prop = of_find_property(dp, "66mhz-capable", NULL);
 703	pbm->is_66mhz_capable = (prop != NULL);
 704	pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
 705
 706	/* XXX register error interrupt handlers XXX */
 707}
 708
 709static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
 710					    struct iommu_map_table *iommu)
 711{
 712	struct iommu_pool *pool;
 713	unsigned long i, pool_nr, cnt = 0;
 714	u32 devhandle;
 715
 716	devhandle = pbm->devhandle;
 717	for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
 718		pool = &(iommu->pools[pool_nr]);
 719		for (i = pool->start; i <= pool->end; i++) {
 720			unsigned long ret, io_attrs, ra;
 721
 722			ret = pci_sun4v_iommu_getmap(devhandle,
 723						     HV_PCI_TSBID(0, i),
 724						     &io_attrs, &ra);
 725			if (ret == HV_EOK) {
 726				if (page_in_phys_avail(ra)) {
 727					pci_sun4v_iommu_demap(devhandle,
 728							      HV_PCI_TSBID(0,
 729							      i), 1);
 730				} else {
 731					cnt++;
 732					__set_bit(i, iommu->map);
 733				}
 734			}
 735		}
 736	}
 737	return cnt;
 738}
 739
 740static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
 741{
 742	struct atu *atu = pbm->iommu->atu;
 743	struct atu_iotsb *iotsb;
 744	void *table;
 745	u64 table_size;
 746	u64 iotsb_num;
 747	unsigned long order;
 748	unsigned long err;
 749
 750	iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
 751	if (!iotsb) {
 752		err = -ENOMEM;
 753		goto out_err;
 754	}
 755	atu->iotsb = iotsb;
 756
 757	/* calculate size of IOTSB */
 758	table_size = (atu->size / IO_PAGE_SIZE) * 8;
 759	order = get_order(table_size);
 760	table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 761	if (!table) {
 762		err = -ENOMEM;
 763		goto table_failed;
 764	}
 765	iotsb->table = table;
 766	iotsb->ra = __pa(table);
 767	iotsb->dvma_size = atu->size;
 768	iotsb->dvma_base = atu->base;
 769	iotsb->table_size = table_size;
 770	iotsb->page_size = IO_PAGE_SIZE;
 771
 772	/* configure and register IOTSB with HV */
 773	err = pci_sun4v_iotsb_conf(pbm->devhandle,
 774				   iotsb->ra,
 775				   iotsb->table_size,
 776				   iotsb->page_size,
 777				   iotsb->dvma_base,
 778				   &iotsb_num);
 779	if (err) {
 780		pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
 781		goto iotsb_conf_failed;
 782	}
 783	iotsb->iotsb_num = iotsb_num;
 784
 785	err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
 786	if (err) {
 787		pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
 788		goto iotsb_conf_failed;
 789	}
 790
 791	return 0;
 792
 793iotsb_conf_failed:
 794	free_pages((unsigned long)table, order);
 795table_failed:
 796	kfree(iotsb);
 797out_err:
 798	return err;
 799}
 800
 801static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
 802{
 803	struct atu *atu = pbm->iommu->atu;
 804	unsigned long err;
 805	const u64 *ranges;
 806	u64 map_size, num_iotte;
 807	u64 dma_mask;
 808	const u32 *page_size;
 809	int len;
 810
 811	ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
 812				 &len);
 813	if (!ranges) {
 814		pr_err(PFX "No iommu-address-ranges\n");
 815		return -EINVAL;
 816	}
 817
 818	page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
 819				    NULL);
 820	if (!page_size) {
 821		pr_err(PFX "No iommu-pagesizes\n");
 822		return -EINVAL;
 823	}
 824
 825	/* There are 4 iommu-address-ranges supported. Each range is pair of
 826	 * {base, size}. The ranges[0] and ranges[1] are 32bit address space
 827	 * while ranges[2] and ranges[3] are 64bit space.  We want to use 64bit
 828	 * address ranges to support 64bit addressing. Because 'size' for
 829	 * address ranges[2] and ranges[3] are same we can select either of
 830	 * ranges[2] or ranges[3] for mapping. However due to 'size' is too
 831	 * large for OS to allocate IOTSB we are using fix size 32G
 832	 * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
 833	 * to share.
 834	 */
 835	atu->ranges = (struct atu_ranges *)ranges;
 836	atu->base = atu->ranges[3].base;
 837	atu->size = ATU_64_SPACE_SIZE;
 838
 839	/* Create IOTSB */
 840	err = pci_sun4v_atu_alloc_iotsb(pbm);
 841	if (err) {
 842		pr_err(PFX "Error creating ATU IOTSB\n");
 843		return err;
 844	}
 845
 846	/* Create ATU iommu map.
 847	 * One bit represents one iotte in IOTSB table.
 848	 */
 849	dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
 850	num_iotte = atu->size / IO_PAGE_SIZE;
 851	map_size = num_iotte / 8;
 852	atu->tbl.table_map_base = atu->base;
 853	atu->dma_addr_mask = dma_mask;
 854	atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
 855	if (!atu->tbl.map)
 856		return -ENOMEM;
 857
 858	iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
 859			    NULL, false /* no large_pool */,
 860			    0 /* default npools */,
 861			    false /* want span boundary checking */);
 862
 863	return 0;
 864}
 865
 866static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
 867{
 868	static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
 869	struct iommu *iommu = pbm->iommu;
 870	unsigned long num_tsb_entries, sz;
 871	u32 dma_mask, dma_offset;
 872	const u32 *vdma;
 873
 874	vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
 875	if (!vdma)
 876		vdma = vdma_default;
 877
 878	if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
 879		printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
 880		       vdma[0], vdma[1]);
 881		return -EINVAL;
 882	}
 883
 884	dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
 885	num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
 886
 887	dma_offset = vdma[0];
 888
 889	/* Setup initial software IOMMU state. */
 890	spin_lock_init(&iommu->lock);
 891	iommu->ctx_lowest_free = 1;
 892	iommu->tbl.table_map_base = dma_offset;
 893	iommu->dma_addr_mask = dma_mask;
 894
 895	/* Allocate and initialize the free area map.  */
 896	sz = (num_tsb_entries + 7) / 8;
 897	sz = (sz + 7UL) & ~7UL;
 898	iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
 899	if (!iommu->tbl.map) {
 900		printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
 901		return -ENOMEM;
 902	}
 903	iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
 904			    NULL, false /* no large_pool */,
 905			    0 /* default npools */,
 906			    false /* want span boundary checking */);
 907	sz = probe_existing_entries(pbm, &iommu->tbl);
 908	if (sz)
 909		printk("%s: Imported %lu TSB entries from OBP\n",
 910		       pbm->name, sz);
 911
 912	return 0;
 913}
 914
 915#ifdef CONFIG_PCI_MSI
 916struct pci_sun4v_msiq_entry {
 917	u64		version_type;
 918#define MSIQ_VERSION_MASK		0xffffffff00000000UL
 919#define MSIQ_VERSION_SHIFT		32
 920#define MSIQ_TYPE_MASK			0x00000000000000ffUL
 921#define MSIQ_TYPE_SHIFT			0
 922#define MSIQ_TYPE_NONE			0x00
 923#define MSIQ_TYPE_MSG			0x01
 924#define MSIQ_TYPE_MSI32			0x02
 925#define MSIQ_TYPE_MSI64			0x03
 926#define MSIQ_TYPE_INTX			0x08
 927#define MSIQ_TYPE_NONE2			0xff
 928
 929	u64		intx_sysino;
 930	u64		reserved1;
 931	u64		stick;
 932	u64		req_id;  /* bus/device/func */
 933#define MSIQ_REQID_BUS_MASK		0xff00UL
 934#define MSIQ_REQID_BUS_SHIFT		8
 935#define MSIQ_REQID_DEVICE_MASK		0x00f8UL
 936#define MSIQ_REQID_DEVICE_SHIFT		3
 937#define MSIQ_REQID_FUNC_MASK		0x0007UL
 938#define MSIQ_REQID_FUNC_SHIFT		0
 939
 940	u64		msi_address;
 941
 942	/* The format of this value is message type dependent.
 943	 * For MSI bits 15:0 are the data from the MSI packet.
 944	 * For MSI-X bits 31:0 are the data from the MSI packet.
 945	 * For MSG, the message code and message routing code where:
 946	 * 	bits 39:32 is the bus/device/fn of the msg target-id
 947	 *	bits 18:16 is the message routing code
 948	 *	bits 7:0 is the message code
 949	 * For INTx the low order 2-bits are:
 950	 *	00 - INTA
 951	 *	01 - INTB
 952	 *	10 - INTC
 953	 *	11 - INTD
 954	 */
 955	u64		msi_data;
 956
 957	u64		reserved2;
 958};
 959
 960static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
 961			      unsigned long *head)
 962{
 963	unsigned long err, limit;
 964
 965	err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
 966	if (unlikely(err))
 967		return -ENXIO;
 968
 969	limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 970	if (unlikely(*head >= limit))
 971		return -EFBIG;
 972
 973	return 0;
 974}
 975
 976static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
 977				 unsigned long msiqid, unsigned long *head,
 978				 unsigned long *msi)
 979{
 980	struct pci_sun4v_msiq_entry *ep;
 981	unsigned long err, type;
 982
 983	/* Note: void pointer arithmetic, 'head' is a byte offset  */
 984	ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
 985				 (pbm->msiq_ent_count *
 986				  sizeof(struct pci_sun4v_msiq_entry))) +
 987	      *head);
 988
 989	if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
 990		return 0;
 991
 992	type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
 993	if (unlikely(type != MSIQ_TYPE_MSI32 &&
 994		     type != MSIQ_TYPE_MSI64))
 995		return -EINVAL;
 996
 997	*msi = ep->msi_data;
 998
 999	err = pci_sun4v_msi_setstate(pbm->devhandle,
1000				     ep->msi_data /* msi_num */,
1001				     HV_MSISTATE_IDLE);
1002	if (unlikely(err))
1003		return -ENXIO;
1004
1005	/* Clear the entry.  */
1006	ep->version_type &= ~MSIQ_TYPE_MASK;
1007
1008	(*head) += sizeof(struct pci_sun4v_msiq_entry);
1009	if (*head >=
1010	    (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
1011		*head = 0;
1012
1013	return 1;
1014}
1015
1016static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
1017			      unsigned long head)
1018{
1019	unsigned long err;
1020
1021	err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
1022	if (unlikely(err))
1023		return -EINVAL;
1024
1025	return 0;
1026}
1027
1028static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
1029			       unsigned long msi, int is_msi64)
1030{
1031	if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
1032				  (is_msi64 ?
1033				   HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
1034		return -ENXIO;
1035	if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
1036		return -ENXIO;
1037	if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
1038		return -ENXIO;
1039	return 0;
1040}
1041
1042static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
1043{
1044	unsigned long err, msiqid;
1045
1046	err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
1047	if (err)
1048		return -ENXIO;
1049
1050	pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
1051
1052	return 0;
1053}
1054
1055static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
1056{
1057	unsigned long q_size, alloc_size, pages, order;
1058	int i;
1059
1060	q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1061	alloc_size = (pbm->msiq_num * q_size);
1062	order = get_order(alloc_size);
1063	pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
1064	if (pages == 0UL) {
1065		printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
1066		       order);
1067		return -ENOMEM;
1068	}
1069	memset((char *)pages, 0, PAGE_SIZE << order);
1070	pbm->msi_queues = (void *) pages;
1071
1072	for (i = 0; i < pbm->msiq_num; i++) {
1073		unsigned long err, base = __pa(pages + (i * q_size));
1074		unsigned long ret1, ret2;
1075
1076		err = pci_sun4v_msiq_conf(pbm->devhandle,
1077					  pbm->msiq_first + i,
1078					  base, pbm->msiq_ent_count);
1079		if (err) {
1080			printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
1081			       err);
1082			goto h_error;
1083		}
1084
1085		err = pci_sun4v_msiq_info(pbm->devhandle,
1086					  pbm->msiq_first + i,
1087					  &ret1, &ret2);
1088		if (err) {
1089			printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
1090			       err);
1091			goto h_error;
1092		}
1093		if (ret1 != base || ret2 != pbm->msiq_ent_count) {
1094			printk(KERN_ERR "MSI: Bogus qconf "
1095			       "expected[%lx:%x] got[%lx:%lx]\n",
1096			       base, pbm->msiq_ent_count,
1097			       ret1, ret2);
1098			goto h_error;
1099		}
1100	}
1101
1102	return 0;
1103
1104h_error:
1105	free_pages(pages, order);
1106	return -EINVAL;
1107}
1108
1109static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
1110{
1111	unsigned long q_size, alloc_size, pages, order;
1112	int i;
1113
1114	for (i = 0; i < pbm->msiq_num; i++) {
1115		unsigned long msiqid = pbm->msiq_first + i;
1116
1117		(void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
1118	}
1119
1120	q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1121	alloc_size = (pbm->msiq_num * q_size);
1122	order = get_order(alloc_size);
1123
1124	pages = (unsigned long) pbm->msi_queues;
1125
1126	free_pages(pages, order);
1127
1128	pbm->msi_queues = NULL;
1129}
1130
1131static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
1132				    unsigned long msiqid,
1133				    unsigned long devino)
1134{
1135	unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
1136
1137	if (!irq)
1138		return -ENOMEM;
1139
1140	if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
1141		return -EINVAL;
1142	if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
1143		return -EINVAL;
 
 
1144
1145	return irq;
1146}
1147
1148static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
1149	.get_head	=	pci_sun4v_get_head,
1150	.dequeue_msi	=	pci_sun4v_dequeue_msi,
1151	.set_head	=	pci_sun4v_set_head,
1152	.msi_setup	=	pci_sun4v_msi_setup,
1153	.msi_teardown	=	pci_sun4v_msi_teardown,
1154	.msiq_alloc	=	pci_sun4v_msiq_alloc,
1155	.msiq_free	=	pci_sun4v_msiq_free,
1156	.msiq_build_irq	=	pci_sun4v_msiq_build_irq,
1157};
1158
1159static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1160{
1161	sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
1162}
1163#else /* CONFIG_PCI_MSI */
1164static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1165{
1166}
1167#endif /* !(CONFIG_PCI_MSI) */
1168
1169static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
1170			      struct platform_device *op, u32 devhandle)
1171{
1172	struct device_node *dp = op->dev.of_node;
1173	int err;
1174
1175	pbm->numa_node = of_node_to_nid(dp);
1176
1177	pbm->pci_ops = &sun4v_pci_ops;
1178	pbm->config_space_reg_bits = 12;
1179
1180	pbm->index = pci_num_pbms++;
1181
1182	pbm->op = op;
1183
1184	pbm->devhandle = devhandle;
1185
1186	pbm->name = dp->full_name;
1187
1188	printk("%s: SUN4V PCI Bus Module\n", pbm->name);
1189	printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
1190
1191	pci_determine_mem_io_space(pbm);
1192
1193	pci_get_pbm_props(pbm);
1194
1195	err = pci_sun4v_iommu_init(pbm);
1196	if (err)
1197		return err;
1198
1199	pci_sun4v_msi_init(pbm);
1200
1201	pci_sun4v_scan_bus(pbm, &op->dev);
1202
1203	/* if atu_init fails its not complete failure.
1204	 * we can still continue using legacy iommu.
1205	 */
1206	if (pbm->iommu->atu) {
1207		err = pci_sun4v_atu_init(pbm);
1208		if (err) {
1209			kfree(pbm->iommu->atu);
1210			pbm->iommu->atu = NULL;
1211			pr_err(PFX "ATU init failed, err=%d\n", err);
1212		}
1213	}
1214
1215	pbm->next = pci_pbm_root;
1216	pci_pbm_root = pbm;
1217
1218	return 0;
1219}
1220
1221static int pci_sun4v_probe(struct platform_device *op)
1222{
1223	const struct linux_prom64_registers *regs;
1224	static int hvapi_negotiated = 0;
1225	struct pci_pbm_info *pbm;
1226	struct device_node *dp;
1227	struct iommu *iommu;
1228	struct atu *atu;
1229	u32 devhandle;
1230	int i, err = -ENODEV;
1231	static bool hv_atu = true;
1232
1233	dp = op->dev.of_node;
1234
1235	if (!hvapi_negotiated++) {
1236		for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
1237			vpci_major = vpci_versions[i].major;
1238			vpci_minor = vpci_versions[i].minor;
1239
1240			err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
1241						   &vpci_minor);
1242			if (!err)
1243				break;
1244		}
1245
1246		if (err) {
1247			pr_err(PFX "Could not register hvapi, err=%d\n", err);
 
1248			return err;
1249		}
1250		pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
1251			vpci_major, vpci_minor);
1252
1253		err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
1254		if (err) {
1255			/* don't return an error if we fail to register the
1256			 * ATU group, but ATU hcalls won't be available.
1257			 */
1258			hv_atu = false;
1259		} else {
1260			pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
1261				vatu_major, vatu_minor);
1262		}
1263
1264		dma_ops = &sun4v_dma_ops;
1265	}
1266
1267	regs = of_get_property(dp, "reg", NULL);
1268	err = -ENODEV;
1269	if (!regs) {
1270		printk(KERN_ERR PFX "Could not find config registers\n");
1271		goto out_err;
1272	}
1273	devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1274
1275	err = -ENOMEM;
1276	if (!iommu_batch_initialized) {
1277		for_each_possible_cpu(i) {
1278			unsigned long page = get_zeroed_page(GFP_KERNEL);
1279
1280			if (!page)
1281				goto out_err;
1282
1283			per_cpu(iommu_batch, i).pglist = (u64 *) page;
1284		}
1285		iommu_batch_initialized = 1;
1286	}
1287
1288	pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
1289	if (!pbm) {
1290		printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
1291		goto out_err;
1292	}
1293
1294	iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
1295	if (!iommu) {
1296		printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
1297		goto out_free_controller;
1298	}
1299
1300	pbm->iommu = iommu;
1301	iommu->atu = NULL;
1302	if (hv_atu) {
1303		atu = kzalloc(sizeof(*atu), GFP_KERNEL);
1304		if (!atu)
1305			pr_err(PFX "Could not allocate atu\n");
1306		else
1307			iommu->atu = atu;
1308	}
1309
1310	err = pci_sun4v_pbm_init(pbm, op, devhandle);
1311	if (err)
1312		goto out_free_iommu;
1313
1314	dev_set_drvdata(&op->dev, pbm);
1315
1316	return 0;
1317
1318out_free_iommu:
1319	kfree(iommu->atu);
1320	kfree(pbm->iommu);
1321
1322out_free_controller:
1323	kfree(pbm);
1324
1325out_err:
1326	return err;
1327}
1328
1329static const struct of_device_id pci_sun4v_match[] = {
1330	{
1331		.name = "pci",
1332		.compatible = "SUNW,sun4v-pci",
1333	},
1334	{},
1335};
1336
1337static struct platform_driver pci_sun4v_driver = {
1338	.driver = {
1339		.name = DRIVER_NAME,
 
1340		.of_match_table = pci_sun4v_match,
1341	},
1342	.probe		= pci_sun4v_probe,
1343};
1344
1345static int __init pci_sun4v_init(void)
1346{
1347	return platform_driver_register(&pci_sun4v_driver);
1348}
1349
1350subsys_initcall(pci_sun4v_init);
v3.1
 
   1/* pci_sun4v.c: SUN4V specific PCI controller support.
   2 *
   3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/types.h>
   8#include <linux/pci.h>
   9#include <linux/init.h>
  10#include <linux/slab.h>
  11#include <linux/interrupt.h>
  12#include <linux/percpu.h>
  13#include <linux/irq.h>
  14#include <linux/msi.h>
 
  15#include <linux/log2.h>
  16#include <linux/of_device.h>
 
 
 
  17
  18#include <asm/iommu.h>
  19#include <asm/irq.h>
  20#include <asm/hypervisor.h>
  21#include <asm/prom.h>
  22
  23#include "pci_impl.h"
  24#include "iommu_common.h"
 
  25
  26#include "pci_sun4v.h"
  27
  28#define DRIVER_NAME	"pci_sun4v"
  29#define PFX		DRIVER_NAME ": "
  30
  31static unsigned long vpci_major = 1;
  32static unsigned long vpci_minor = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  33
  34#define PGLIST_NENTS	(PAGE_SIZE / sizeof(u64))
  35
  36struct iommu_batch {
  37	struct device	*dev;		/* Device mapping is for.	*/
  38	unsigned long	prot;		/* IOMMU page protections	*/
  39	unsigned long	entry;		/* Index into IOTSB.		*/
  40	u64		*pglist;	/* List of physical pages	*/
  41	unsigned long	npages;		/* Number of pages in list.	*/
  42};
  43
  44static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
  45static int iommu_batch_initialized;
  46
  47/* Interrupts must be disabled.  */
  48static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
  49{
  50	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
  51
  52	p->dev		= dev;
  53	p->prot		= prot;
  54	p->entry	= entry;
  55	p->npages	= 0;
  56}
  57
 
 
 
 
 
  58/* Interrupts must be disabled.  */
  59static long iommu_batch_flush(struct iommu_batch *p)
  60{
  61	struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
 
 
  62	unsigned long devhandle = pbm->devhandle;
  63	unsigned long prot = p->prot;
  64	unsigned long entry = p->entry;
  65	u64 *pglist = p->pglist;
  66	unsigned long npages = p->npages;
 
 
 
 
 
 
 
  67
  68	while (npages != 0) {
  69		long num;
  70
  71		num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
  72					  npages, prot, __pa(pglist));
  73		if (unlikely(num < 0)) {
  74			if (printk_ratelimit())
  75				printk("iommu_batch_flush: IOMMU map of "
  76				       "[%08lx:%08llx:%lx:%lx:%lx] failed with "
  77				       "status %ld\n",
  78				       devhandle, HV_PCI_TSBID(0, entry),
  79				       npages, prot, __pa(pglist), num);
  80			return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  81		}
  82
  83		entry += num;
  84		npages -= num;
  85		pglist += num;
  86	}
  87
  88	p->entry = entry;
  89	p->npages = 0;
  90
  91	return 0;
  92}
  93
  94static inline void iommu_batch_new_entry(unsigned long entry)
  95{
  96	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
  97
  98	if (p->entry + p->npages == entry)
  99		return;
 100	if (p->entry != ~0UL)
 101		iommu_batch_flush(p);
 102	p->entry = entry;
 103}
 104
 105/* Interrupts must be disabled.  */
 106static inline long iommu_batch_add(u64 phys_page)
 107{
 108	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
 109
 110	BUG_ON(p->npages >= PGLIST_NENTS);
 111
 112	p->pglist[p->npages++] = phys_page;
 113	if (p->npages == PGLIST_NENTS)
 114		return iommu_batch_flush(p);
 115
 116	return 0;
 117}
 118
 119/* Interrupts must be disabled.  */
 120static inline long iommu_batch_end(void)
 121{
 122	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
 123
 124	BUG_ON(p->npages >= PGLIST_NENTS);
 125
 126	return iommu_batch_flush(p);
 127}
 128
 129static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
 130				   dma_addr_t *dma_addrp, gfp_t gfp)
 
 131{
 
 132	unsigned long flags, order, first_page, npages, n;
 
 133	struct iommu *iommu;
 
 134	struct page *page;
 135	void *ret;
 136	long entry;
 137	int nid;
 138
 139	size = IO_PAGE_ALIGN(size);
 140	order = get_order(size);
 141	if (unlikely(order >= MAX_ORDER))
 142		return NULL;
 143
 144	npages = size >> IO_PAGE_SHIFT;
 145
 
 
 
 146	nid = dev->archdata.numa_node;
 147	page = alloc_pages_node(nid, gfp, order);
 148	if (unlikely(!page))
 149		return NULL;
 150
 151	first_page = (unsigned long) page_address(page);
 152	memset((char *)first_page, 0, PAGE_SIZE << order);
 153
 154	iommu = dev->archdata.iommu;
 
 
 
 
 
 155
 156	spin_lock_irqsave(&iommu->lock, flags);
 157	entry = iommu_range_alloc(dev, iommu, npages, NULL);
 158	spin_unlock_irqrestore(&iommu->lock, flags);
 159
 160	if (unlikely(entry == DMA_ERROR_CODE))
 161		goto range_alloc_fail;
 162
 163	*dma_addrp = (iommu->page_table_map_base +
 164		      (entry << IO_PAGE_SHIFT));
 165	ret = (void *) first_page;
 166	first_page = __pa(first_page);
 167
 168	local_irq_save(flags);
 169
 170	iommu_batch_start(dev,
 171			  (HV_PCI_MAP_ATTR_READ |
 172			   HV_PCI_MAP_ATTR_WRITE),
 173			  entry);
 174
 175	for (n = 0; n < npages; n++) {
 176		long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
 177		if (unlikely(err < 0L))
 178			goto iommu_map_fail;
 179	}
 180
 181	if (unlikely(iommu_batch_end() < 0L))
 182		goto iommu_map_fail;
 183
 184	local_irq_restore(flags);
 185
 186	return ret;
 187
 188iommu_map_fail:
 189	/* Interrupts are disabled.  */
 190	spin_lock(&iommu->lock);
 191	iommu_range_free(iommu, *dma_addrp, npages);
 192	spin_unlock_irqrestore(&iommu->lock, flags);
 193
 194range_alloc_fail:
 195	free_pages(first_page, order);
 196	return NULL;
 197}
 198
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 199static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
 200				 dma_addr_t dvma)
 201{
 202	struct pci_pbm_info *pbm;
 203	struct iommu *iommu;
 204	unsigned long flags, order, npages, entry;
 
 
 
 205	u32 devhandle;
 206
 207	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
 208	iommu = dev->archdata.iommu;
 209	pbm = dev->archdata.host_controller;
 
 210	devhandle = pbm->devhandle;
 211	entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 212
 213	spin_lock_irqsave(&iommu->lock, flags);
 214
 215	iommu_range_free(iommu, dvma, npages);
 216
 217	do {
 218		unsigned long num;
 219
 220		num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
 221					    npages);
 222		entry += num;
 223		npages -= num;
 224	} while (npages != 0);
 225
 226	spin_unlock_irqrestore(&iommu->lock, flags);
 227
 
 
 
 
 
 
 
 
 
 
 228	order = get_order(size);
 229	if (order < 10)
 230		free_pages((unsigned long)cpu, order);
 231}
 232
 233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
 234				  unsigned long offset, size_t sz,
 235				  enum dma_data_direction direction,
 236				  struct dma_attrs *attrs)
 237{
 238	struct iommu *iommu;
 
 
 
 239	unsigned long flags, npages, oaddr;
 240	unsigned long i, base_paddr;
 241	u32 bus_addr, ret;
 242	unsigned long prot;
 
 243	long entry;
 244
 245	iommu = dev->archdata.iommu;
 
 246
 247	if (unlikely(direction == DMA_NONE))
 248		goto bad;
 249
 250	oaddr = (unsigned long)(page_address(page) + offset);
 251	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
 252	npages >>= IO_PAGE_SHIFT;
 253
 254	spin_lock_irqsave(&iommu->lock, flags);
 255	entry = iommu_range_alloc(dev, iommu, npages, NULL);
 256	spin_unlock_irqrestore(&iommu->lock, flags);
 
 
 257
 258	if (unlikely(entry == DMA_ERROR_CODE))
 
 
 
 259		goto bad;
 260
 261	bus_addr = (iommu->page_table_map_base +
 262		    (entry << IO_PAGE_SHIFT));
 263	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
 264	base_paddr = __pa(oaddr & IO_PAGE_MASK);
 265	prot = HV_PCI_MAP_ATTR_READ;
 266	if (direction != DMA_TO_DEVICE)
 267		prot |= HV_PCI_MAP_ATTR_WRITE;
 268
 
 
 
 269	local_irq_save(flags);
 270
 271	iommu_batch_start(dev, prot, entry);
 272
 273	for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
 274		long err = iommu_batch_add(base_paddr);
 275		if (unlikely(err < 0L))
 276			goto iommu_map_fail;
 277	}
 278	if (unlikely(iommu_batch_end() < 0L))
 279		goto iommu_map_fail;
 280
 281	local_irq_restore(flags);
 282
 283	return ret;
 284
 285bad:
 286	if (printk_ratelimit())
 287		WARN_ON(1);
 288	return DMA_ERROR_CODE;
 289
 290iommu_map_fail:
 291	/* Interrupts are disabled.  */
 292	spin_lock(&iommu->lock);
 293	iommu_range_free(iommu, bus_addr, npages);
 294	spin_unlock_irqrestore(&iommu->lock, flags);
 295
 296	return DMA_ERROR_CODE;
 297}
 298
 299static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
 300			      size_t sz, enum dma_data_direction direction,
 301			      struct dma_attrs *attrs)
 302{
 303	struct pci_pbm_info *pbm;
 304	struct iommu *iommu;
 305	unsigned long flags, npages;
 
 
 
 306	long entry;
 307	u32 devhandle;
 308
 309	if (unlikely(direction == DMA_NONE)) {
 310		if (printk_ratelimit())
 311			WARN_ON(1);
 312		return;
 313	}
 314
 315	iommu = dev->archdata.iommu;
 316	pbm = dev->archdata.host_controller;
 
 317	devhandle = pbm->devhandle;
 318
 319	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
 320	npages >>= IO_PAGE_SHIFT;
 321	bus_addr &= IO_PAGE_MASK;
 322
 323	spin_lock_irqsave(&iommu->lock, flags);
 324
 325	iommu_range_free(iommu, bus_addr, npages);
 326
 327	entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
 328	do {
 329		unsigned long num;
 330
 331		num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
 332					    npages);
 333		entry += num;
 334		npages -= num;
 335	} while (npages != 0);
 336
 337	spin_unlock_irqrestore(&iommu->lock, flags);
 338}
 339
 340static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 341			 int nelems, enum dma_data_direction direction,
 342			 struct dma_attrs *attrs)
 343{
 344	struct scatterlist *s, *outs, *segstart;
 345	unsigned long flags, handle, prot;
 346	dma_addr_t dma_next = 0, dma_addr;
 347	unsigned int max_seg_size;
 348	unsigned long seg_boundary_size;
 349	int outcount, incount, i;
 350	struct iommu *iommu;
 
 
 
 351	unsigned long base_shift;
 352	long err;
 353
 354	BUG_ON(direction == DMA_NONE);
 355
 356	iommu = dev->archdata.iommu;
 357	if (nelems == 0 || !iommu)
 358		return 0;
 359	
 
 360	prot = HV_PCI_MAP_ATTR_READ;
 361	if (direction != DMA_TO_DEVICE)
 362		prot |= HV_PCI_MAP_ATTR_WRITE;
 363
 
 
 
 364	outs = s = segstart = &sglist[0];
 365	outcount = 1;
 366	incount = nelems;
 367	handle = 0;
 368
 369	/* Init first segment length for backout at failure */
 370	outs->dma_length = 0;
 371
 372	spin_lock_irqsave(&iommu->lock, flags);
 373
 374	iommu_batch_start(dev, prot, ~0UL);
 375
 376	max_seg_size = dma_get_max_seg_size(dev);
 377	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 378				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
 379	base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
 
 
 
 
 
 
 
 380	for_each_sg(sglist, s, nelems, i) {
 381		unsigned long paddr, npages, entry, out_entry = 0, slen;
 382
 383		slen = s->length;
 384		/* Sanity check */
 385		if (slen == 0) {
 386			dma_next = 0;
 387			continue;
 388		}
 389		/* Allocate iommu entries for that segment */
 390		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
 391		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
 392		entry = iommu_range_alloc(dev, iommu, npages, &handle);
 
 393
 394		/* Handle failure */
 395		if (unlikely(entry == DMA_ERROR_CODE)) {
 396			if (printk_ratelimit())
 397				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
 398				       " npages %lx\n", iommu, paddr, npages);
 399			goto iommu_map_failed;
 400		}
 401
 402		iommu_batch_new_entry(entry);
 403
 404		/* Convert entry to a dma_addr_t */
 405		dma_addr = iommu->page_table_map_base +
 406			(entry << IO_PAGE_SHIFT);
 407		dma_addr |= (s->offset & ~IO_PAGE_MASK);
 408
 409		/* Insert into HW table */
 410		paddr &= IO_PAGE_MASK;
 411		while (npages--) {
 412			err = iommu_batch_add(paddr);
 413			if (unlikely(err < 0L))
 414				goto iommu_map_failed;
 415			paddr += IO_PAGE_SIZE;
 416		}
 417
 418		/* If we are in an open segment, try merging */
 419		if (segstart != s) {
 420			/* We cannot merge if:
 421			 * - allocated dma_addr isn't contiguous to previous allocation
 422			 */
 423			if ((dma_addr != dma_next) ||
 424			    (outs->dma_length + s->length > max_seg_size) ||
 425			    (is_span_boundary(out_entry, base_shift,
 426					      seg_boundary_size, outs, s))) {
 427				/* Can't merge: create a new segment */
 428				segstart = s;
 429				outcount++;
 430				outs = sg_next(outs);
 431			} else {
 432				outs->dma_length += s->length;
 433			}
 434		}
 435
 436		if (segstart == s) {
 437			/* This is a new segment, fill entries */
 438			outs->dma_address = dma_addr;
 439			outs->dma_length = slen;
 440			out_entry = entry;
 441		}
 442
 443		/* Calculate next page pointer for contiguous check */
 444		dma_next = dma_addr + slen;
 445	}
 446
 447	err = iommu_batch_end();
 448
 449	if (unlikely(err < 0L))
 450		goto iommu_map_failed;
 451
 452	spin_unlock_irqrestore(&iommu->lock, flags);
 453
 454	if (outcount < incount) {
 455		outs = sg_next(outs);
 456		outs->dma_address = DMA_ERROR_CODE;
 457		outs->dma_length = 0;
 458	}
 459
 460	return outcount;
 461
 462iommu_map_failed:
 463	for_each_sg(sglist, s, nelems, i) {
 464		if (s->dma_length != 0) {
 465			unsigned long vaddr, npages;
 466
 467			vaddr = s->dma_address & IO_PAGE_MASK;
 468			npages = iommu_num_pages(s->dma_address, s->dma_length,
 469						 IO_PAGE_SIZE);
 470			iommu_range_free(iommu, vaddr, npages);
 
 471			/* XXX demap? XXX */
 472			s->dma_address = DMA_ERROR_CODE;
 473			s->dma_length = 0;
 474		}
 475		if (s == outs)
 476			break;
 477	}
 478	spin_unlock_irqrestore(&iommu->lock, flags);
 479
 480	return 0;
 481}
 482
 483static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
 484			    int nelems, enum dma_data_direction direction,
 485			    struct dma_attrs *attrs)
 486{
 487	struct pci_pbm_info *pbm;
 488	struct scatterlist *sg;
 489	struct iommu *iommu;
 490	unsigned long flags;
 
 
 491	u32 devhandle;
 492
 493	BUG_ON(direction == DMA_NONE);
 494
 495	iommu = dev->archdata.iommu;
 496	pbm = dev->archdata.host_controller;
 
 497	devhandle = pbm->devhandle;
 498	
 499	spin_lock_irqsave(&iommu->lock, flags);
 500
 501	sg = sglist;
 502	while (nelems--) {
 503		dma_addr_t dma_handle = sg->dma_address;
 504		unsigned int len = sg->dma_length;
 505		unsigned long npages, entry;
 
 
 506
 507		if (!len)
 508			break;
 509		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
 510		iommu_range_free(iommu, dma_handle, npages);
 511
 512		entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 513		while (npages) {
 514			unsigned long num;
 515
 516			num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
 517						    npages);
 518			entry += num;
 519			npages -= num;
 
 
 520		}
 521
 
 
 
 
 522		sg = sg_next(sg);
 523	}
 524
 525	spin_unlock_irqrestore(&iommu->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 526}
 527
 528static struct dma_map_ops sun4v_dma_ops = {
 529	.alloc_coherent			= dma_4v_alloc_coherent,
 530	.free_coherent			= dma_4v_free_coherent,
 531	.map_page			= dma_4v_map_page,
 532	.unmap_page			= dma_4v_unmap_page,
 533	.map_sg				= dma_4v_map_sg,
 534	.unmap_sg			= dma_4v_unmap_sg,
 
 535};
 536
 537static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
 538					 struct device *parent)
 539{
 540	struct property *prop;
 541	struct device_node *dp;
 542
 543	dp = pbm->op->dev.of_node;
 544	prop = of_find_property(dp, "66mhz-capable", NULL);
 545	pbm->is_66mhz_capable = (prop != NULL);
 546	pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
 547
 548	/* XXX register error interrupt handlers XXX */
 549}
 550
 551static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm,
 552						      struct iommu *iommu)
 553{
 554	struct iommu_arena *arena = &iommu->arena;
 555	unsigned long i, cnt = 0;
 556	u32 devhandle;
 557
 558	devhandle = pbm->devhandle;
 559	for (i = 0; i < arena->limit; i++) {
 560		unsigned long ret, io_attrs, ra;
 561
 562		ret = pci_sun4v_iommu_getmap(devhandle,
 563					     HV_PCI_TSBID(0, i),
 564					     &io_attrs, &ra);
 565		if (ret == HV_EOK) {
 566			if (page_in_phys_avail(ra)) {
 567				pci_sun4v_iommu_demap(devhandle,
 568						      HV_PCI_TSBID(0, i), 1);
 569			} else {
 570				cnt++;
 571				__set_bit(i, arena->map);
 
 
 
 
 572			}
 573		}
 574	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 575
 576	return cnt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 577}
 578
 579static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
 580{
 581	static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
 582	struct iommu *iommu = pbm->iommu;
 583	unsigned long num_tsb_entries, sz;
 584	u32 dma_mask, dma_offset;
 585	const u32 *vdma;
 586
 587	vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
 588	if (!vdma)
 589		vdma = vdma_default;
 590
 591	if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
 592		printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
 593		       vdma[0], vdma[1]);
 594		return -EINVAL;
 595	};
 596
 597	dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
 598	num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
 599
 600	dma_offset = vdma[0];
 601
 602	/* Setup initial software IOMMU state. */
 603	spin_lock_init(&iommu->lock);
 604	iommu->ctx_lowest_free = 1;
 605	iommu->page_table_map_base = dma_offset;
 606	iommu->dma_addr_mask = dma_mask;
 607
 608	/* Allocate and initialize the free area map.  */
 609	sz = (num_tsb_entries + 7) / 8;
 610	sz = (sz + 7UL) & ~7UL;
 611	iommu->arena.map = kzalloc(sz, GFP_KERNEL);
 612	if (!iommu->arena.map) {
 613		printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
 614		return -ENOMEM;
 615	}
 616	iommu->arena.limit = num_tsb_entries;
 617
 618	sz = probe_existing_entries(pbm, iommu);
 
 
 619	if (sz)
 620		printk("%s: Imported %lu TSB entries from OBP\n",
 621		       pbm->name, sz);
 622
 623	return 0;
 624}
 625
 626#ifdef CONFIG_PCI_MSI
 627struct pci_sun4v_msiq_entry {
 628	u64		version_type;
 629#define MSIQ_VERSION_MASK		0xffffffff00000000UL
 630#define MSIQ_VERSION_SHIFT		32
 631#define MSIQ_TYPE_MASK			0x00000000000000ffUL
 632#define MSIQ_TYPE_SHIFT			0
 633#define MSIQ_TYPE_NONE			0x00
 634#define MSIQ_TYPE_MSG			0x01
 635#define MSIQ_TYPE_MSI32			0x02
 636#define MSIQ_TYPE_MSI64			0x03
 637#define MSIQ_TYPE_INTX			0x08
 638#define MSIQ_TYPE_NONE2			0xff
 639
 640	u64		intx_sysino;
 641	u64		reserved1;
 642	u64		stick;
 643	u64		req_id;  /* bus/device/func */
 644#define MSIQ_REQID_BUS_MASK		0xff00UL
 645#define MSIQ_REQID_BUS_SHIFT		8
 646#define MSIQ_REQID_DEVICE_MASK		0x00f8UL
 647#define MSIQ_REQID_DEVICE_SHIFT		3
 648#define MSIQ_REQID_FUNC_MASK		0x0007UL
 649#define MSIQ_REQID_FUNC_SHIFT		0
 650
 651	u64		msi_address;
 652
 653	/* The format of this value is message type dependent.
 654	 * For MSI bits 15:0 are the data from the MSI packet.
 655	 * For MSI-X bits 31:0 are the data from the MSI packet.
 656	 * For MSG, the message code and message routing code where:
 657	 * 	bits 39:32 is the bus/device/fn of the msg target-id
 658	 *	bits 18:16 is the message routing code
 659	 *	bits 7:0 is the message code
 660	 * For INTx the low order 2-bits are:
 661	 *	00 - INTA
 662	 *	01 - INTB
 663	 *	10 - INTC
 664	 *	11 - INTD
 665	 */
 666	u64		msi_data;
 667
 668	u64		reserved2;
 669};
 670
 671static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
 672			      unsigned long *head)
 673{
 674	unsigned long err, limit;
 675
 676	err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
 677	if (unlikely(err))
 678		return -ENXIO;
 679
 680	limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 681	if (unlikely(*head >= limit))
 682		return -EFBIG;
 683
 684	return 0;
 685}
 686
 687static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
 688				 unsigned long msiqid, unsigned long *head,
 689				 unsigned long *msi)
 690{
 691	struct pci_sun4v_msiq_entry *ep;
 692	unsigned long err, type;
 693
 694	/* Note: void pointer arithmetic, 'head' is a byte offset  */
 695	ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
 696				 (pbm->msiq_ent_count *
 697				  sizeof(struct pci_sun4v_msiq_entry))) +
 698	      *head);
 699
 700	if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
 701		return 0;
 702
 703	type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
 704	if (unlikely(type != MSIQ_TYPE_MSI32 &&
 705		     type != MSIQ_TYPE_MSI64))
 706		return -EINVAL;
 707
 708	*msi = ep->msi_data;
 709
 710	err = pci_sun4v_msi_setstate(pbm->devhandle,
 711				     ep->msi_data /* msi_num */,
 712				     HV_MSISTATE_IDLE);
 713	if (unlikely(err))
 714		return -ENXIO;
 715
 716	/* Clear the entry.  */
 717	ep->version_type &= ~MSIQ_TYPE_MASK;
 718
 719	(*head) += sizeof(struct pci_sun4v_msiq_entry);
 720	if (*head >=
 721	    (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
 722		*head = 0;
 723
 724	return 1;
 725}
 726
 727static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
 728			      unsigned long head)
 729{
 730	unsigned long err;
 731
 732	err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
 733	if (unlikely(err))
 734		return -EINVAL;
 735
 736	return 0;
 737}
 738
 739static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
 740			       unsigned long msi, int is_msi64)
 741{
 742	if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
 743				  (is_msi64 ?
 744				   HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
 745		return -ENXIO;
 746	if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
 747		return -ENXIO;
 748	if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
 749		return -ENXIO;
 750	return 0;
 751}
 752
 753static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
 754{
 755	unsigned long err, msiqid;
 756
 757	err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
 758	if (err)
 759		return -ENXIO;
 760
 761	pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
 762
 763	return 0;
 764}
 765
 766static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
 767{
 768	unsigned long q_size, alloc_size, pages, order;
 769	int i;
 770
 771	q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 772	alloc_size = (pbm->msiq_num * q_size);
 773	order = get_order(alloc_size);
 774	pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
 775	if (pages == 0UL) {
 776		printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
 777		       order);
 778		return -ENOMEM;
 779	}
 780	memset((char *)pages, 0, PAGE_SIZE << order);
 781	pbm->msi_queues = (void *) pages;
 782
 783	for (i = 0; i < pbm->msiq_num; i++) {
 784		unsigned long err, base = __pa(pages + (i * q_size));
 785		unsigned long ret1, ret2;
 786
 787		err = pci_sun4v_msiq_conf(pbm->devhandle,
 788					  pbm->msiq_first + i,
 789					  base, pbm->msiq_ent_count);
 790		if (err) {
 791			printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
 792			       err);
 793			goto h_error;
 794		}
 795
 796		err = pci_sun4v_msiq_info(pbm->devhandle,
 797					  pbm->msiq_first + i,
 798					  &ret1, &ret2);
 799		if (err) {
 800			printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
 801			       err);
 802			goto h_error;
 803		}
 804		if (ret1 != base || ret2 != pbm->msiq_ent_count) {
 805			printk(KERN_ERR "MSI: Bogus qconf "
 806			       "expected[%lx:%x] got[%lx:%lx]\n",
 807			       base, pbm->msiq_ent_count,
 808			       ret1, ret2);
 809			goto h_error;
 810		}
 811	}
 812
 813	return 0;
 814
 815h_error:
 816	free_pages(pages, order);
 817	return -EINVAL;
 818}
 819
 820static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
 821{
 822	unsigned long q_size, alloc_size, pages, order;
 823	int i;
 824
 825	for (i = 0; i < pbm->msiq_num; i++) {
 826		unsigned long msiqid = pbm->msiq_first + i;
 827
 828		(void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
 829	}
 830
 831	q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
 832	alloc_size = (pbm->msiq_num * q_size);
 833	order = get_order(alloc_size);
 834
 835	pages = (unsigned long) pbm->msi_queues;
 836
 837	free_pages(pages, order);
 838
 839	pbm->msi_queues = NULL;
 840}
 841
 842static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
 843				    unsigned long msiqid,
 844				    unsigned long devino)
 845{
 846	unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
 847
 848	if (!irq)
 849		return -ENOMEM;
 850
 
 
 851	if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
 852		return -EINVAL;
 853	if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
 854		return -EINVAL;
 855
 856	return irq;
 857}
 858
 859static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
 860	.get_head	=	pci_sun4v_get_head,
 861	.dequeue_msi	=	pci_sun4v_dequeue_msi,
 862	.set_head	=	pci_sun4v_set_head,
 863	.msi_setup	=	pci_sun4v_msi_setup,
 864	.msi_teardown	=	pci_sun4v_msi_teardown,
 865	.msiq_alloc	=	pci_sun4v_msiq_alloc,
 866	.msiq_free	=	pci_sun4v_msiq_free,
 867	.msiq_build_irq	=	pci_sun4v_msiq_build_irq,
 868};
 869
 870static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
 871{
 872	sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
 873}
 874#else /* CONFIG_PCI_MSI */
 875static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
 876{
 877}
 878#endif /* !(CONFIG_PCI_MSI) */
 879
 880static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
 881					struct platform_device *op, u32 devhandle)
 882{
 883	struct device_node *dp = op->dev.of_node;
 884	int err;
 885
 886	pbm->numa_node = of_node_to_nid(dp);
 887
 888	pbm->pci_ops = &sun4v_pci_ops;
 889	pbm->config_space_reg_bits = 12;
 890
 891	pbm->index = pci_num_pbms++;
 892
 893	pbm->op = op;
 894
 895	pbm->devhandle = devhandle;
 896
 897	pbm->name = dp->full_name;
 898
 899	printk("%s: SUN4V PCI Bus Module\n", pbm->name);
 900	printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
 901
 902	pci_determine_mem_io_space(pbm);
 903
 904	pci_get_pbm_props(pbm);
 905
 906	err = pci_sun4v_iommu_init(pbm);
 907	if (err)
 908		return err;
 909
 910	pci_sun4v_msi_init(pbm);
 911
 912	pci_sun4v_scan_bus(pbm, &op->dev);
 913
 
 
 
 
 
 
 
 
 
 
 
 
 914	pbm->next = pci_pbm_root;
 915	pci_pbm_root = pbm;
 916
 917	return 0;
 918}
 919
 920static int __devinit pci_sun4v_probe(struct platform_device *op)
 921{
 922	const struct linux_prom64_registers *regs;
 923	static int hvapi_negotiated = 0;
 924	struct pci_pbm_info *pbm;
 925	struct device_node *dp;
 926	struct iommu *iommu;
 
 927	u32 devhandle;
 928	int i, err;
 
 929
 930	dp = op->dev.of_node;
 931
 932	if (!hvapi_negotiated++) {
 933		err = sun4v_hvapi_register(HV_GRP_PCI,
 934					   vpci_major,
 935					   &vpci_minor);
 
 
 
 
 
 
 936
 937		if (err) {
 938			printk(KERN_ERR PFX "Could not register hvapi, "
 939			       "err=%d\n", err);
 940			return err;
 941		}
 942		printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
 943		       vpci_major, vpci_minor);
 
 
 
 
 
 
 
 
 
 
 
 944
 945		dma_ops = &sun4v_dma_ops;
 946	}
 947
 948	regs = of_get_property(dp, "reg", NULL);
 949	err = -ENODEV;
 950	if (!regs) {
 951		printk(KERN_ERR PFX "Could not find config registers\n");
 952		goto out_err;
 953	}
 954	devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
 955
 956	err = -ENOMEM;
 957	if (!iommu_batch_initialized) {
 958		for_each_possible_cpu(i) {
 959			unsigned long page = get_zeroed_page(GFP_KERNEL);
 960
 961			if (!page)
 962				goto out_err;
 963
 964			per_cpu(iommu_batch, i).pglist = (u64 *) page;
 965		}
 966		iommu_batch_initialized = 1;
 967	}
 968
 969	pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
 970	if (!pbm) {
 971		printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
 972		goto out_err;
 973	}
 974
 975	iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
 976	if (!iommu) {
 977		printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
 978		goto out_free_controller;
 979	}
 980
 981	pbm->iommu = iommu;
 
 
 
 
 
 
 
 
 982
 983	err = pci_sun4v_pbm_init(pbm, op, devhandle);
 984	if (err)
 985		goto out_free_iommu;
 986
 987	dev_set_drvdata(&op->dev, pbm);
 988
 989	return 0;
 990
 991out_free_iommu:
 
 992	kfree(pbm->iommu);
 993
 994out_free_controller:
 995	kfree(pbm);
 996
 997out_err:
 998	return err;
 999}
1000
1001static const struct of_device_id pci_sun4v_match[] = {
1002	{
1003		.name = "pci",
1004		.compatible = "SUNW,sun4v-pci",
1005	},
1006	{},
1007};
1008
1009static struct platform_driver pci_sun4v_driver = {
1010	.driver = {
1011		.name = DRIVER_NAME,
1012		.owner = THIS_MODULE,
1013		.of_match_table = pci_sun4v_match,
1014	},
1015	.probe		= pci_sun4v_probe,
1016};
1017
1018static int __init pci_sun4v_init(void)
1019{
1020	return platform_driver_register(&pci_sun4v_driver);
1021}
1022
1023subsys_initcall(pci_sun4v_init);