Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCI Peer 2 Peer DMA support.
   4 *
   5 * Copyright (c) 2016-2018, Logan Gunthorpe
   6 * Copyright (c) 2016-2017, Microsemi Corporation
   7 * Copyright (c) 2017, Christoph Hellwig
   8 * Copyright (c) 2018, Eideticom Inc.
   9 */
  10
  11#define pr_fmt(fmt) "pci-p2pdma: " fmt
  12#include <linux/ctype.h>
  13#include <linux/pci-p2pdma.h>
  14#include <linux/module.h>
  15#include <linux/slab.h>
  16#include <linux/genalloc.h>
  17#include <linux/memremap.h>
  18#include <linux/percpu-refcount.h>
  19#include <linux/random.h>
  20#include <linux/seq_buf.h>
  21#include <linux/xarray.h>
  22
  23enum pci_p2pdma_map_type {
  24	PCI_P2PDMA_MAP_UNKNOWN = 0,
  25	PCI_P2PDMA_MAP_NOT_SUPPORTED,
  26	PCI_P2PDMA_MAP_BUS_ADDR,
  27	PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
  28};
  29
  30struct pci_p2pdma {
  31	struct gen_pool *pool;
  32	bool p2pmem_published;
  33	struct xarray map_types;
  34};
  35
  36struct pci_p2pdma_pagemap {
  37	struct dev_pagemap pgmap;
  38	struct pci_dev *provider;
  39	u64 bus_offset;
  40};
  41
  42static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap)
  43{
  44	return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap);
  45}
  46
  47static ssize_t size_show(struct device *dev, struct device_attribute *attr,
  48			 char *buf)
  49{
  50	struct pci_dev *pdev = to_pci_dev(dev);
  51	size_t size = 0;
  52
  53	if (pdev->p2pdma->pool)
  54		size = gen_pool_size(pdev->p2pdma->pool);
  55
  56	return snprintf(buf, PAGE_SIZE, "%zd\n", size);
  57}
  58static DEVICE_ATTR_RO(size);
  59
  60static ssize_t available_show(struct device *dev, struct device_attribute *attr,
  61			      char *buf)
  62{
  63	struct pci_dev *pdev = to_pci_dev(dev);
  64	size_t avail = 0;
  65
  66	if (pdev->p2pdma->pool)
  67		avail = gen_pool_avail(pdev->p2pdma->pool);
  68
  69	return snprintf(buf, PAGE_SIZE, "%zd\n", avail);
  70}
  71static DEVICE_ATTR_RO(available);
  72
  73static ssize_t published_show(struct device *dev, struct device_attribute *attr,
  74			      char *buf)
  75{
  76	struct pci_dev *pdev = to_pci_dev(dev);
  77
  78	return snprintf(buf, PAGE_SIZE, "%d\n",
  79			pdev->p2pdma->p2pmem_published);
  80}
  81static DEVICE_ATTR_RO(published);
  82
  83static struct attribute *p2pmem_attrs[] = {
  84	&dev_attr_size.attr,
  85	&dev_attr_available.attr,
  86	&dev_attr_published.attr,
  87	NULL,
  88};
  89
  90static const struct attribute_group p2pmem_group = {
  91	.attrs = p2pmem_attrs,
  92	.name = "p2pmem",
  93};
  94
  95static void pci_p2pdma_release(void *data)
  96{
  97	struct pci_dev *pdev = data;
  98	struct pci_p2pdma *p2pdma = pdev->p2pdma;
  99
 100	if (!p2pdma)
 101		return;
 102
 103	/* Flush and disable pci_alloc_p2p_mem() */
 104	pdev->p2pdma = NULL;
 105	synchronize_rcu();
 106
 107	gen_pool_destroy(p2pdma->pool);
 108	sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
 109	xa_destroy(&p2pdma->map_types);
 110}
 111
 112static int pci_p2pdma_setup(struct pci_dev *pdev)
 113{
 114	int error = -ENOMEM;
 115	struct pci_p2pdma *p2p;
 116
 117	p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL);
 118	if (!p2p)
 119		return -ENOMEM;
 120
 121	xa_init(&p2p->map_types);
 122
 123	p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
 124	if (!p2p->pool)
 125		goto out;
 126
 127	error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
 128	if (error)
 129		goto out_pool_destroy;
 130
 131	pdev->p2pdma = p2p;
 132
 133	error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
 134	if (error)
 135		goto out_pool_destroy;
 136
 137	return 0;
 138
 139out_pool_destroy:
 140	pdev->p2pdma = NULL;
 141	gen_pool_destroy(p2p->pool);
 142out:
 143	devm_kfree(&pdev->dev, p2p);
 144	return error;
 145}
 146
 147/**
 148 * pci_p2pdma_add_resource - add memory for use as p2p memory
 149 * @pdev: the device to add the memory to
 150 * @bar: PCI BAR to add
 151 * @size: size of the memory to add, may be zero to use the whole BAR
 152 * @offset: offset into the PCI BAR
 153 *
 154 * The memory will be given ZONE_DEVICE struct pages so that it may
 155 * be used with any DMA request.
 156 */
 157int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
 158			    u64 offset)
 159{
 160	struct pci_p2pdma_pagemap *p2p_pgmap;
 161	struct dev_pagemap *pgmap;
 162	void *addr;
 163	int error;
 164
 165	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
 166		return -EINVAL;
 167
 168	if (offset >= pci_resource_len(pdev, bar))
 169		return -EINVAL;
 170
 171	if (!size)
 172		size = pci_resource_len(pdev, bar) - offset;
 173
 174	if (size + offset > pci_resource_len(pdev, bar))
 175		return -EINVAL;
 176
 177	if (!pdev->p2pdma) {
 178		error = pci_p2pdma_setup(pdev);
 179		if (error)
 180			return error;
 181	}
 182
 183	p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
 184	if (!p2p_pgmap)
 185		return -ENOMEM;
 186
 187	pgmap = &p2p_pgmap->pgmap;
 188	pgmap->res.start = pci_resource_start(pdev, bar) + offset;
 189	pgmap->res.end = pgmap->res.start + size - 1;
 190	pgmap->res.flags = pci_resource_flags(pdev, bar);
 191	pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
 192
 193	p2p_pgmap->provider = pdev;
 194	p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) -
 195		pci_resource_start(pdev, bar);
 196
 197	addr = devm_memremap_pages(&pdev->dev, pgmap);
 198	if (IS_ERR(addr)) {
 199		error = PTR_ERR(addr);
 200		goto pgmap_free;
 201	}
 202
 203	error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
 204			pci_bus_address(pdev, bar) + offset,
 205			resource_size(&pgmap->res), dev_to_node(&pdev->dev),
 206			pgmap->ref);
 207	if (error)
 208		goto pages_free;
 209
 210	pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
 211		 &pgmap->res);
 212
 213	return 0;
 214
 215pages_free:
 216	devm_memunmap_pages(&pdev->dev, pgmap);
 217pgmap_free:
 218	devm_kfree(&pdev->dev, pgmap);
 219	return error;
 220}
 221EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
 222
 223/*
 224 * Note this function returns the parent PCI device with a
 225 * reference taken. It is the caller's responsibility to drop
 226 * the reference.
 227 */
 228static struct pci_dev *find_parent_pci_dev(struct device *dev)
 229{
 230	struct device *parent;
 231
 232	dev = get_device(dev);
 233
 234	while (dev) {
 235		if (dev_is_pci(dev))
 236			return to_pci_dev(dev);
 237
 238		parent = get_device(dev->parent);
 239		put_device(dev);
 240		dev = parent;
 241	}
 242
 243	return NULL;
 244}
 245
 246/*
 247 * Check if a PCI bridge has its ACS redirection bits set to redirect P2P
 248 * TLPs upstream via ACS. Returns 1 if the packets will be redirected
 249 * upstream, 0 otherwise.
 250 */
 251static int pci_bridge_has_acs_redir(struct pci_dev *pdev)
 252{
 253	int pos;
 254	u16 ctrl;
 255
 256	pos = pdev->acs_cap;
 257	if (!pos)
 258		return 0;
 259
 260	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
 261
 262	if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC))
 263		return 1;
 264
 265	return 0;
 266}
 267
 268static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
 269{
 270	if (!buf)
 271		return;
 272
 273	seq_buf_printf(buf, "%s;", pci_name(pdev));
 274}
 275
 276static bool cpu_supports_p2pdma(void)
 277{
 278#ifdef CONFIG_X86
 279	struct cpuinfo_x86 *c = &cpu_data(0);
 280
 281	/* Any AMD CPU whose family ID is Zen or newer supports p2pdma */
 282	if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17)
 283		return true;
 284#endif
 285
 286	return false;
 287}
 288
 289static const struct pci_p2pdma_whitelist_entry {
 290	unsigned short vendor;
 291	unsigned short device;
 292	enum {
 293		REQ_SAME_HOST_BRIDGE	= 1 << 0,
 294	} flags;
 295} pci_p2pdma_whitelist[] = {
 296	/* Intel Xeon E5/Core i7 */
 297	{PCI_VENDOR_ID_INTEL,	0x3c00, REQ_SAME_HOST_BRIDGE},
 298	{PCI_VENDOR_ID_INTEL,	0x3c01, REQ_SAME_HOST_BRIDGE},
 299	/* Intel Xeon E7 v3/Xeon E5 v3/Core i7 */
 300	{PCI_VENDOR_ID_INTEL,	0x2f00, REQ_SAME_HOST_BRIDGE},
 301	{PCI_VENDOR_ID_INTEL,	0x2f01, REQ_SAME_HOST_BRIDGE},
 302	/* Intel SkyLake-E */
 303	{PCI_VENDOR_ID_INTEL,	0x2030, 0},
 304	{PCI_VENDOR_ID_INTEL,	0x2031, 0},
 305	{PCI_VENDOR_ID_INTEL,	0x2032, 0},
 306	{PCI_VENDOR_ID_INTEL,	0x2033, 0},
 307	{PCI_VENDOR_ID_INTEL,	0x2020, 0},
 308	{}
 309};
 310
 311static bool __host_bridge_whitelist(struct pci_host_bridge *host,
 312				    bool same_host_bridge)
 313{
 314	struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0));
 315	const struct pci_p2pdma_whitelist_entry *entry;
 316	unsigned short vendor, device;
 317
 318	if (!root)
 319		return false;
 320
 321	vendor = root->vendor;
 322	device = root->device;
 323	pci_dev_put(root);
 324
 325	for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) {
 326		if (vendor != entry->vendor || device != entry->device)
 327			continue;
 328		if (entry->flags & REQ_SAME_HOST_BRIDGE && !same_host_bridge)
 329			return false;
 330
 331		return true;
 332	}
 333
 334	return false;
 335}
 336
 337/*
 338 * If we can't find a common upstream bridge take a look at the root
 339 * complex and compare it to a whitelist of known good hardware.
 340 */
 341static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b)
 342{
 343	struct pci_host_bridge *host_a = pci_find_host_bridge(a->bus);
 344	struct pci_host_bridge *host_b = pci_find_host_bridge(b->bus);
 345
 346	if (host_a == host_b)
 347		return __host_bridge_whitelist(host_a, true);
 348
 349	if (__host_bridge_whitelist(host_a, false) &&
 350	    __host_bridge_whitelist(host_b, false))
 351		return true;
 352
 353	return false;
 354}
 355
 356static enum pci_p2pdma_map_type
 357__upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client,
 358		int *dist, bool *acs_redirects, struct seq_buf *acs_list)
 359{
 360	struct pci_dev *a = provider, *b = client, *bb;
 361	int dist_a = 0;
 362	int dist_b = 0;
 363	int acs_cnt = 0;
 364
 365	if (acs_redirects)
 366		*acs_redirects = false;
 367
 368	/*
 369	 * Note, we don't need to take references to devices returned by
 370	 * pci_upstream_bridge() seeing we hold a reference to a child
 371	 * device which will already hold a reference to the upstream bridge.
 372	 */
 373
 374	while (a) {
 375		dist_b = 0;
 376
 377		if (pci_bridge_has_acs_redir(a)) {
 378			seq_buf_print_bus_devfn(acs_list, a);
 379			acs_cnt++;
 380		}
 381
 382		bb = b;
 383
 384		while (bb) {
 385			if (a == bb)
 386				goto check_b_path_acs;
 387
 388			bb = pci_upstream_bridge(bb);
 389			dist_b++;
 390		}
 391
 392		a = pci_upstream_bridge(a);
 393		dist_a++;
 394	}
 395
 396	if (dist)
 397		*dist = dist_a + dist_b;
 398
 399	return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
 400
 401check_b_path_acs:
 402	bb = b;
 403
 404	while (bb) {
 405		if (a == bb)
 406			break;
 407
 408		if (pci_bridge_has_acs_redir(bb)) {
 409			seq_buf_print_bus_devfn(acs_list, bb);
 410			acs_cnt++;
 411		}
 412
 413		bb = pci_upstream_bridge(bb);
 414	}
 415
 416	if (dist)
 417		*dist = dist_a + dist_b;
 418
 419	if (acs_cnt) {
 420		if (acs_redirects)
 421			*acs_redirects = true;
 422
 423		return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
 424	}
 425
 426	return PCI_P2PDMA_MAP_BUS_ADDR;
 427}
 428
 429static unsigned long map_types_idx(struct pci_dev *client)
 430{
 431	return (pci_domain_nr(client->bus) << 16) |
 432		(client->bus->number << 8) | client->devfn;
 433}
 434
 435/*
 436 * Find the distance through the nearest common upstream bridge between
 437 * two PCI devices.
 438 *
 439 * If the two devices are the same device then 0 will be returned.
 440 *
 441 * If there are two virtual functions of the same device behind the same
 442 * bridge port then 2 will be returned (one step down to the PCIe switch,
 443 * then one step back to the same device).
 444 *
 445 * In the case where two devices are connected to the same PCIe switch, the
 446 * value 4 will be returned. This corresponds to the following PCI tree:
 447 *
 448 *     -+  Root Port
 449 *      \+ Switch Upstream Port
 450 *       +-+ Switch Downstream Port
 451 *       + \- Device A
 452 *       \-+ Switch Downstream Port
 453 *         \- Device B
 454 *
 455 * The distance is 4 because we traverse from Device A through the downstream
 456 * port of the switch, to the common upstream port, back up to the second
 457 * downstream port and then to Device B.
 458 *
 459 * Any two devices that cannot communicate using p2pdma will return
 460 * PCI_P2PDMA_MAP_NOT_SUPPORTED.
 461 *
 462 * Any two devices that have a data path that goes through the host bridge
 463 * will consult a whitelist. If the host bridges are on the whitelist,
 464 * this function will return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE.
 465 *
 466 * If either bridge is not on the whitelist this function returns
 467 * PCI_P2PDMA_MAP_NOT_SUPPORTED.
 468 *
 469 * If a bridge which has any ACS redirection bits set is in the path,
 470 * acs_redirects will be set to true. In this case, a list of all infringing
 471 * bridge addresses will be populated in acs_list (assuming it's non-null)
 472 * for printk purposes.
 473 */
 474static enum pci_p2pdma_map_type
 475upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client,
 476		int *dist, bool *acs_redirects, struct seq_buf *acs_list)
 477{
 478	enum pci_p2pdma_map_type map_type;
 479
 480	map_type = __upstream_bridge_distance(provider, client, dist,
 481					      acs_redirects, acs_list);
 482
 483	if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) {
 484		if (!cpu_supports_p2pdma() &&
 485		    !host_bridge_whitelist(provider, client))
 486			map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
 487	}
 488
 489	if (provider->p2pdma)
 490		xa_store(&provider->p2pdma->map_types, map_types_idx(client),
 491			 xa_mk_value(map_type), GFP_KERNEL);
 492
 493	return map_type;
 494}
 495
 496static enum pci_p2pdma_map_type
 497upstream_bridge_distance_warn(struct pci_dev *provider, struct pci_dev *client,
 498			      int *dist)
 499{
 500	struct seq_buf acs_list;
 501	bool acs_redirects;
 502	int ret;
 503
 504	seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
 505	if (!acs_list.buffer)
 506		return -ENOMEM;
 507
 508	ret = upstream_bridge_distance(provider, client, dist, &acs_redirects,
 509				       &acs_list);
 510	if (acs_redirects) {
 511		pci_warn(client, "ACS redirect is set between the client and provider (%s)\n",
 512			 pci_name(provider));
 513		/* Drop final semicolon */
 514		acs_list.buffer[acs_list.len-1] = 0;
 515		pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n",
 516			 acs_list.buffer);
 517	}
 518
 519	if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED) {
 520		pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n",
 521			 pci_name(provider));
 522	}
 523
 524	kfree(acs_list.buffer);
 525
 526	return ret;
 527}
 528
 529/**
 530 * pci_p2pdma_distance_many - Determine the cumulative distance between
 531 *	a p2pdma provider and the clients in use.
 532 * @provider: p2pdma provider to check against the client list
 533 * @clients: array of devices to check (NULL-terminated)
 534 * @num_clients: number of clients in the array
 535 * @verbose: if true, print warnings for devices when we return -1
 536 *
 537 * Returns -1 if any of the clients are not compatible, otherwise returns a
 538 * positive number where a lower number is the preferable choice. (If there's
 539 * one client that's the same as the provider it will return 0, which is best
 540 * choice).
 541 *
 542 * "compatible" means the provider and the clients are either all behind
 543 * the same PCI root port or the host bridges connected to each of the devices
 544 * are listed in the 'pci_p2pdma_whitelist'.
 545 */
 546int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
 547			     int num_clients, bool verbose)
 548{
 549	bool not_supported = false;
 550	struct pci_dev *pci_client;
 551	int total_dist = 0;
 552	int distance;
 553	int i, ret;
 554
 555	if (num_clients == 0)
 556		return -1;
 557
 558	for (i = 0; i < num_clients; i++) {
 559#ifdef CONFIG_DMA_VIRT_OPS
 560		if (clients[i]->dma_ops == &dma_virt_ops) {
 561			if (verbose)
 562				dev_warn(clients[i],
 563					 "cannot be used for peer-to-peer DMA because the driver makes use of dma_virt_ops\n");
 564			return -1;
 565		}
 566#endif
 567
 568		pci_client = find_parent_pci_dev(clients[i]);
 569		if (!pci_client) {
 570			if (verbose)
 571				dev_warn(clients[i],
 572					 "cannot be used for peer-to-peer DMA as it is not a PCI device\n");
 573			return -1;
 574		}
 575
 576		if (verbose)
 577			ret = upstream_bridge_distance_warn(provider,
 578					pci_client, &distance);
 579		else
 580			ret = upstream_bridge_distance(provider, pci_client,
 581						       &distance, NULL, NULL);
 582
 583		pci_dev_put(pci_client);
 584
 585		if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED)
 586			not_supported = true;
 587
 588		if (not_supported && !verbose)
 589			break;
 590
 591		total_dist += distance;
 592	}
 593
 594	if (not_supported)
 595		return -1;
 596
 597	return total_dist;
 598}
 599EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
 600
 601/**
 602 * pci_has_p2pmem - check if a given PCI device has published any p2pmem
 603 * @pdev: PCI device to check
 604 */
 605bool pci_has_p2pmem(struct pci_dev *pdev)
 606{
 607	return pdev->p2pdma && pdev->p2pdma->p2pmem_published;
 608}
 609EXPORT_SYMBOL_GPL(pci_has_p2pmem);
 610
 611/**
 612 * pci_p2pmem_find - find a peer-to-peer DMA memory device compatible with
 613 *	the specified list of clients and shortest distance (as determined
 614 *	by pci_p2pmem_dma())
 615 * @clients: array of devices to check (NULL-terminated)
 616 * @num_clients: number of client devices in the list
 617 *
 618 * If multiple devices are behind the same switch, the one "closest" to the
 619 * client devices in use will be chosen first. (So if one of the providers is
 620 * the same as one of the clients, that provider will be used ahead of any
 621 * other providers that are unrelated). If multiple providers are an equal
 622 * distance away, one will be chosen at random.
 623 *
 624 * Returns a pointer to the PCI device with a reference taken (use pci_dev_put
 625 * to return the reference) or NULL if no compatible device is found. The
 626 * found provider will also be assigned to the client list.
 627 */
 628struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
 629{
 630	struct pci_dev *pdev = NULL;
 631	int distance;
 632	int closest_distance = INT_MAX;
 633	struct pci_dev **closest_pdevs;
 634	int dev_cnt = 0;
 635	const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs);
 636	int i;
 637
 638	closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL);
 639	if (!closest_pdevs)
 640		return NULL;
 641
 642	while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
 643		if (!pci_has_p2pmem(pdev))
 644			continue;
 645
 646		distance = pci_p2pdma_distance_many(pdev, clients,
 647						    num_clients, false);
 648		if (distance < 0 || distance > closest_distance)
 649			continue;
 650
 651		if (distance == closest_distance && dev_cnt >= max_devs)
 652			continue;
 653
 654		if (distance < closest_distance) {
 655			for (i = 0; i < dev_cnt; i++)
 656				pci_dev_put(closest_pdevs[i]);
 657
 658			dev_cnt = 0;
 659			closest_distance = distance;
 660		}
 661
 662		closest_pdevs[dev_cnt++] = pci_dev_get(pdev);
 663	}
 664
 665	if (dev_cnt)
 666		pdev = pci_dev_get(closest_pdevs[prandom_u32_max(dev_cnt)]);
 667
 668	for (i = 0; i < dev_cnt; i++)
 669		pci_dev_put(closest_pdevs[i]);
 670
 671	kfree(closest_pdevs);
 672	return pdev;
 673}
 674EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
 675
 676/**
 677 * pci_alloc_p2p_mem - allocate peer-to-peer DMA memory
 678 * @pdev: the device to allocate memory from
 679 * @size: number of bytes to allocate
 680 *
 681 * Returns the allocated memory or NULL on error.
 682 */
 683void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
 684{
 685	void *ret = NULL;
 686	struct percpu_ref *ref;
 687
 688	/*
 689	 * Pairs with synchronize_rcu() in pci_p2pdma_release() to
 690	 * ensure pdev->p2pdma is non-NULL for the duration of the
 691	 * read-lock.
 692	 */
 693	rcu_read_lock();
 694	if (unlikely(!pdev->p2pdma))
 695		goto out;
 696
 697	ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size,
 698			(void **) &ref);
 699	if (!ret)
 700		goto out;
 701
 702	if (unlikely(!percpu_ref_tryget_live(ref))) {
 703		gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size);
 704		ret = NULL;
 705		goto out;
 706	}
 707out:
 708	rcu_read_unlock();
 709	return ret;
 710}
 711EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
 712
 713/**
 714 * pci_free_p2pmem - free peer-to-peer DMA memory
 715 * @pdev: the device the memory was allocated from
 716 * @addr: address of the memory that was allocated
 717 * @size: number of bytes that were allocated
 718 */
 719void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
 720{
 721	struct percpu_ref *ref;
 722
 723	gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size,
 724			(void **) &ref);
 725	percpu_ref_put(ref);
 726}
 727EXPORT_SYMBOL_GPL(pci_free_p2pmem);
 728
 729/**
 730 * pci_virt_to_bus - return the PCI bus address for a given virtual
 731 *	address obtained with pci_alloc_p2pmem()
 732 * @pdev: the device the memory was allocated from
 733 * @addr: address of the memory that was allocated
 734 */
 735pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr)
 736{
 737	if (!addr)
 738		return 0;
 739	if (!pdev->p2pdma)
 740		return 0;
 741
 742	/*
 743	 * Note: when we added the memory to the pool we used the PCI
 744	 * bus address as the physical address. So gen_pool_virt_to_phys()
 745	 * actually returns the bus address despite the misleading name.
 746	 */
 747	return gen_pool_virt_to_phys(pdev->p2pdma->pool, (unsigned long)addr);
 748}
 749EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus);
 750
 751/**
 752 * pci_p2pmem_alloc_sgl - allocate peer-to-peer DMA memory in a scatterlist
 753 * @pdev: the device to allocate memory from
 754 * @nents: the number of SG entries in the list
 755 * @length: number of bytes to allocate
 756 *
 757 * Return: %NULL on error or &struct scatterlist pointer and @nents on success
 758 */
 759struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
 760					 unsigned int *nents, u32 length)
 761{
 762	struct scatterlist *sg;
 763	void *addr;
 764
 765	sg = kzalloc(sizeof(*sg), GFP_KERNEL);
 766	if (!sg)
 767		return NULL;
 768
 769	sg_init_table(sg, 1);
 770
 771	addr = pci_alloc_p2pmem(pdev, length);
 772	if (!addr)
 773		goto out_free_sg;
 774
 775	sg_set_buf(sg, addr, length);
 776	*nents = 1;
 777	return sg;
 778
 779out_free_sg:
 780	kfree(sg);
 781	return NULL;
 782}
 783EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl);
 784
 785/**
 786 * pci_p2pmem_free_sgl - free a scatterlist allocated by pci_p2pmem_alloc_sgl()
 787 * @pdev: the device to allocate memory from
 788 * @sgl: the allocated scatterlist
 789 */
 790void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl)
 791{
 792	struct scatterlist *sg;
 793	int count;
 794
 795	for_each_sg(sgl, sg, INT_MAX, count) {
 796		if (!sg)
 797			break;
 798
 799		pci_free_p2pmem(pdev, sg_virt(sg), sg->length);
 800	}
 801	kfree(sgl);
 802}
 803EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl);
 804
 805/**
 806 * pci_p2pmem_publish - publish the peer-to-peer DMA memory for use by
 807 *	other devices with pci_p2pmem_find()
 808 * @pdev: the device with peer-to-peer DMA memory to publish
 809 * @publish: set to true to publish the memory, false to unpublish it
 810 *
 811 * Published memory can be used by other PCI device drivers for
 812 * peer-2-peer DMA operations. Non-published memory is reserved for
 813 * exclusive use of the device driver that registers the peer-to-peer
 814 * memory.
 815 */
 816void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
 817{
 818	if (pdev->p2pdma)
 819		pdev->p2pdma->p2pmem_published = publish;
 820}
 821EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
 822
 823static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct pci_dev *provider,
 824						    struct pci_dev *client)
 825{
 826	if (!provider->p2pdma)
 827		return PCI_P2PDMA_MAP_NOT_SUPPORTED;
 828
 829	return xa_to_value(xa_load(&provider->p2pdma->map_types,
 830				   map_types_idx(client)));
 831}
 832
 833static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
 834		struct device *dev, struct scatterlist *sg, int nents)
 835{
 836	struct scatterlist *s;
 837	phys_addr_t paddr;
 838	int i;
 839
 840	/*
 841	 * p2pdma mappings are not compatible with devices that use
 842	 * dma_virt_ops. If the upper layers do the right thing
 843	 * this should never happen because it will be prevented
 844	 * by the check in pci_p2pdma_distance_many()
 845	 */
 846#ifdef CONFIG_DMA_VIRT_OPS
 847	if (WARN_ON_ONCE(dev->dma_ops == &dma_virt_ops))
 848		return 0;
 849#endif
 850
 851	for_each_sg(sg, s, nents, i) {
 852		paddr = sg_phys(s);
 853
 854		s->dma_address = paddr - p2p_pgmap->bus_offset;
 855		sg_dma_len(s) = s->length;
 856	}
 857
 858	return nents;
 859}
 860
 861/**
 862 * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA
 863 * @dev: device doing the DMA request
 864 * @sg: scatter list to map
 865 * @nents: elements in the scatterlist
 866 * @dir: DMA direction
 867 * @attrs: DMA attributes passed to dma_map_sg() (if called)
 868 *
 869 * Scatterlists mapped with this function should be unmapped using
 870 * pci_p2pdma_unmap_sg_attrs().
 871 *
 872 * Returns the number of SG entries mapped or 0 on error.
 873 */
 874int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
 875		int nents, enum dma_data_direction dir, unsigned long attrs)
 876{
 877	struct pci_p2pdma_pagemap *p2p_pgmap =
 878		to_p2p_pgmap(sg_page(sg)->pgmap);
 879	struct pci_dev *client;
 880
 881	if (WARN_ON_ONCE(!dev_is_pci(dev)))
 882		return 0;
 883
 884	client = to_pci_dev(dev);
 885
 886	switch (pci_p2pdma_map_type(p2p_pgmap->provider, client)) {
 887	case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
 888		return dma_map_sg_attrs(dev, sg, nents, dir, attrs);
 889	case PCI_P2PDMA_MAP_BUS_ADDR:
 890		return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents);
 891	default:
 892		WARN_ON_ONCE(1);
 893		return 0;
 894	}
 895}
 896EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
 897
 898/**
 899 * pci_p2pdma_unmap_sg - unmap a PCI peer-to-peer scatterlist that was
 900 *	mapped with pci_p2pdma_map_sg()
 901 * @dev: device doing the DMA request
 902 * @sg: scatter list to map
 903 * @nents: number of elements returned by pci_p2pdma_map_sg()
 904 * @dir: DMA direction
 905 * @attrs: DMA attributes passed to dma_unmap_sg() (if called)
 906 */
 907void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
 908		int nents, enum dma_data_direction dir, unsigned long attrs)
 909{
 910	struct pci_p2pdma_pagemap *p2p_pgmap =
 911		to_p2p_pgmap(sg_page(sg)->pgmap);
 912	enum pci_p2pdma_map_type map_type;
 913	struct pci_dev *client;
 914
 915	if (WARN_ON_ONCE(!dev_is_pci(dev)))
 916		return;
 917
 918	client = to_pci_dev(dev);
 919
 920	map_type = pci_p2pdma_map_type(p2p_pgmap->provider, client);
 921
 922	if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
 923		dma_unmap_sg_attrs(dev, sg, nents, dir, attrs);
 924}
 925EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs);
 926
 927/**
 928 * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store
 929 *		to enable p2pdma
 930 * @page: contents of the value to be stored
 931 * @p2p_dev: returns the PCI device that was selected to be used
 932 *		(if one was specified in the stored value)
 933 * @use_p2pdma: returns whether to enable p2pdma or not
 934 *
 935 * Parses an attribute value to decide whether to enable p2pdma.
 936 * The value can select a PCI device (using its full BDF device
 937 * name) or a boolean (in any format strtobool() accepts). A false
 938 * value disables p2pdma, a true value expects the caller
 939 * to automatically find a compatible device and specifying a PCI device
 940 * expects the caller to use the specific provider.
 941 *
 942 * pci_p2pdma_enable_show() should be used as the show operation for
 943 * the attribute.
 944 *
 945 * Returns 0 on success
 946 */
 947int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
 948			    bool *use_p2pdma)
 949{
 950	struct device *dev;
 951
 952	dev = bus_find_device_by_name(&pci_bus_type, NULL, page);
 953	if (dev) {
 954		*use_p2pdma = true;
 955		*p2p_dev = to_pci_dev(dev);
 956
 957		if (!pci_has_p2pmem(*p2p_dev)) {
 958			pci_err(*p2p_dev,
 959				"PCI device has no peer-to-peer memory: %s\n",
 960				page);
 961			pci_dev_put(*p2p_dev);
 962			return -ENODEV;
 963		}
 964
 965		return 0;
 966	} else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) {
 967		/*
 968		 * If the user enters a PCI device that  doesn't exist
 969		 * like "0000:01:00.1", we don't want strtobool to think
 970		 * it's a '0' when it's clearly not what the user wanted.
 971		 * So we require 0's and 1's to be exactly one character.
 972		 */
 973	} else if (!strtobool(page, use_p2pdma)) {
 974		return 0;
 975	}
 976
 977	pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page);
 978	return -ENODEV;
 979}
 980EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store);
 981
 982/**
 983 * pci_p2pdma_enable_show - show a configfs/sysfs attribute indicating
 984 *		whether p2pdma is enabled
 985 * @page: contents of the stored value
 986 * @p2p_dev: the selected p2p device (NULL if no device is selected)
 987 * @use_p2pdma: whether p2pdma has been enabled
 988 *
 989 * Attributes that use pci_p2pdma_enable_store() should use this function
 990 * to show the value of the attribute.
 991 *
 992 * Returns 0 on success
 993 */
 994ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
 995			       bool use_p2pdma)
 996{
 997	if (!use_p2pdma)
 998		return sprintf(page, "0\n");
 999
1000	if (!p2p_dev)
1001		return sprintf(page, "1\n");
1002
1003	return sprintf(page, "%s\n", pci_name(p2p_dev));
1004}
1005EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show);