Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2015, Sony Mobile Communications AB.
   4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/hwspinlock.h>
   8#include <linux/io.h>
   9#include <linux/module.h>
  10#include <linux/of.h>
  11#include <linux/of_address.h>
  12#include <linux/platform_device.h>
  13#include <linux/sizes.h>
  14#include <linux/slab.h>
  15#include <linux/soc/qcom/smem.h>
  16
  17/*
  18 * The Qualcomm shared memory system is a allocate only heap structure that
  19 * consists of one of more memory areas that can be accessed by the processors
  20 * in the SoC.
  21 *
  22 * All systems contains a global heap, accessible by all processors in the SoC,
  23 * with a table of contents data structure (@smem_header) at the beginning of
  24 * the main shared memory block.
  25 *
  26 * The global header contains meta data for allocations as well as a fixed list
  27 * of 512 entries (@smem_global_entry) that can be initialized to reference
  28 * parts of the shared memory space.
  29 *
  30 *
  31 * In addition to this global heap a set of "private" heaps can be set up at
  32 * boot time with access restrictions so that only certain processor pairs can
  33 * access the data.
  34 *
  35 * These partitions are referenced from an optional partition table
  36 * (@smem_ptable), that is found 4kB from the end of the main smem region. The
  37 * partition table entries (@smem_ptable_entry) lists the involved processors
  38 * (or hosts) and their location in the main shared memory region.
  39 *
  40 * Each partition starts with a header (@smem_partition_header) that identifies
  41 * the partition and holds properties for the two internal memory regions. The
  42 * two regions are cached and non-cached memory respectively. Each region
  43 * contain a link list of allocation headers (@smem_private_entry) followed by
  44 * their data.
  45 *
  46 * Items in the non-cached region are allocated from the start of the partition
  47 * while items in the cached region are allocated from the end. The free area
  48 * is hence the region between the cached and non-cached offsets. The header of
  49 * cached items comes after the data.
  50 *
  51 * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
  52 * for the global heap. A new global partition is created from the global heap
  53 * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
  54 * set by the bootloader.
  55 *
  56 * To synchronize allocations in the shared memory heaps a remote spinlock must
  57 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
  58 * platforms.
  59 *
  60 */
  61
  62/*
  63 * The version member of the smem header contains an array of versions for the
  64 * various software components in the SoC. We verify that the boot loader
  65 * version is a valid version as a sanity check.
  66 */
  67#define SMEM_MASTER_SBL_VERSION_INDEX	7
  68#define SMEM_GLOBAL_HEAP_VERSION	11
  69#define SMEM_GLOBAL_PART_VERSION	12
  70
  71/*
  72 * The first 8 items are only to be allocated by the boot loader while
  73 * initializing the heap.
  74 */
  75#define SMEM_ITEM_LAST_FIXED	8
  76
  77/* Highest accepted item number, for both global and private heaps */
  78#define SMEM_ITEM_COUNT		512
  79
  80/* Processor/host identifier for the application processor */
  81#define SMEM_HOST_APPS		0
  82
  83/* Processor/host identifier for the global partition */
  84#define SMEM_GLOBAL_HOST	0xfffe
  85
  86/* Max number of processors/hosts in a system */
  87#define SMEM_HOST_COUNT		11
  88
  89/**
  90  * struct smem_proc_comm - proc_comm communication struct (legacy)
  91  * @command:	current command to be executed
  92  * @status:	status of the currently requested command
  93  * @params:	parameters to the command
  94  */
  95struct smem_proc_comm {
  96	__le32 command;
  97	__le32 status;
  98	__le32 params[2];
  99};
 100
 101/**
 102 * struct smem_global_entry - entry to reference smem items on the heap
 103 * @allocated:	boolean to indicate if this entry is used
 104 * @offset:	offset to the allocated space
 105 * @size:	size of the allocated space, 8 byte aligned
 106 * @aux_base:	base address for the memory region used by this unit, or 0 for
 107 *		the default region. bits 0,1 are reserved
 108 */
 109struct smem_global_entry {
 110	__le32 allocated;
 111	__le32 offset;
 112	__le32 size;
 113	__le32 aux_base; /* bits 1:0 reserved */
 114};
 115#define AUX_BASE_MASK		0xfffffffc
 116
 117/**
 118 * struct smem_header - header found in beginning of primary smem region
 119 * @proc_comm:		proc_comm communication interface (legacy)
 120 * @version:		array of versions for the various subsystems
 121 * @initialized:	boolean to indicate that smem is initialized
 122 * @free_offset:	index of the first unallocated byte in smem
 123 * @available:		number of bytes available for allocation
 124 * @reserved:		reserved field, must be 0
 125 * toc:			array of references to items
 126 */
 127struct smem_header {
 128	struct smem_proc_comm proc_comm[4];
 129	__le32 version[32];
 130	__le32 initialized;
 131	__le32 free_offset;
 132	__le32 available;
 133	__le32 reserved;
 134	struct smem_global_entry toc[SMEM_ITEM_COUNT];
 135};
 136
 137/**
 138 * struct smem_ptable_entry - one entry in the @smem_ptable list
 139 * @offset:	offset, within the main shared memory region, of the partition
 140 * @size:	size of the partition
 141 * @flags:	flags for the partition (currently unused)
 142 * @host0:	first processor/host with access to this partition
 143 * @host1:	second processor/host with access to this partition
 144 * @cacheline:	alignment for "cached" entries
 145 * @reserved:	reserved entries for later use
 146 */
 147struct smem_ptable_entry {
 148	__le32 offset;
 149	__le32 size;
 150	__le32 flags;
 151	__le16 host0;
 152	__le16 host1;
 153	__le32 cacheline;
 154	__le32 reserved[7];
 155};
 156
 157/**
 158 * struct smem_ptable - partition table for the private partitions
 159 * @magic:	magic number, must be SMEM_PTABLE_MAGIC
 160 * @version:	version of the partition table
 161 * @num_entries: number of partitions in the table
 162 * @reserved:	for now reserved entries
 163 * @entry:	list of @smem_ptable_entry for the @num_entries partitions
 164 */
 165struct smem_ptable {
 166	u8 magic[4];
 167	__le32 version;
 168	__le32 num_entries;
 169	__le32 reserved[5];
 170	struct smem_ptable_entry entry[];
 171};
 172
 173static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
 174
 175/**
 176 * struct smem_partition_header - header of the partitions
 177 * @magic:	magic number, must be SMEM_PART_MAGIC
 178 * @host0:	first processor/host with access to this partition
 179 * @host1:	second processor/host with access to this partition
 180 * @size:	size of the partition
 181 * @offset_free_uncached: offset to the first free byte of uncached memory in
 182 *		this partition
 183 * @offset_free_cached: offset to the first free byte of cached memory in this
 184 *		partition
 185 * @reserved:	for now reserved entries
 186 */
 187struct smem_partition_header {
 188	u8 magic[4];
 189	__le16 host0;
 190	__le16 host1;
 191	__le32 size;
 192	__le32 offset_free_uncached;
 193	__le32 offset_free_cached;
 194	__le32 reserved[3];
 195};
 196
 197static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
 198
 199/**
 200 * struct smem_private_entry - header of each item in the private partition
 201 * @canary:	magic number, must be SMEM_PRIVATE_CANARY
 202 * @item:	identifying number of the smem item
 203 * @size:	size of the data, including padding bytes
 204 * @padding_data: number of bytes of padding of data
 205 * @padding_hdr: number of bytes of padding between the header and the data
 206 * @reserved:	for now reserved entry
 207 */
 208struct smem_private_entry {
 209	u16 canary; /* bytes are the same so no swapping needed */
 210	__le16 item;
 211	__le32 size; /* includes padding bytes */
 212	__le16 padding_data;
 213	__le16 padding_hdr;
 214	__le32 reserved;
 215};
 216#define SMEM_PRIVATE_CANARY	0xa5a5
 217
 218/**
 219 * struct smem_info - smem region info located after the table of contents
 220 * @magic:	magic number, must be SMEM_INFO_MAGIC
 221 * @size:	size of the smem region
 222 * @base_addr:	base address of the smem region
 223 * @reserved:	for now reserved entry
 224 * @num_items:	highest accepted item number
 225 */
 226struct smem_info {
 227	u8 magic[4];
 228	__le32 size;
 229	__le32 base_addr;
 230	__le32 reserved;
 231	__le16 num_items;
 232};
 233
 234static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
 235
 236/**
 237 * struct smem_region - representation of a chunk of memory used for smem
 238 * @aux_base:	identifier of aux_mem base
 239 * @virt_base:	virtual base address of memory with this aux_mem identifier
 240 * @size:	size of the memory region
 241 */
 242struct smem_region {
 243	u32 aux_base;
 244	void __iomem *virt_base;
 245	size_t size;
 246};
 247
 248/**
 249 * struct qcom_smem - device data for the smem device
 250 * @dev:	device pointer
 251 * @hwlock:	reference to a hwspinlock
 252 * @global_partition:	pointer to global partition when in use
 253 * @global_cacheline:	cacheline size for global partition
 254 * @partitions:	list of pointers to partitions affecting the current
 255 *		processor/host
 256 * @cacheline:	list of cacheline sizes for each host
 257 * @item_count: max accepted item number
 258 * @num_regions: number of @regions
 259 * @regions:	list of the memory regions defining the shared memory
 260 */
 261struct qcom_smem {
 262	struct device *dev;
 263
 264	struct hwspinlock *hwlock;
 265
 266	struct smem_partition_header *global_partition;
 267	size_t global_cacheline;
 268	struct smem_partition_header *partitions[SMEM_HOST_COUNT];
 269	size_t cacheline[SMEM_HOST_COUNT];
 270	u32 item_count;
 271	struct platform_device *socinfo;
 272
 273	unsigned num_regions;
 274	struct smem_region regions[];
 275};
 276
 277static void *
 278phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
 279{
 280	void *p = phdr;
 281
 282	return p + le32_to_cpu(phdr->offset_free_uncached);
 283}
 284
 285static struct smem_private_entry *
 286phdr_to_first_cached_entry(struct smem_partition_header *phdr,
 287					size_t cacheline)
 288{
 289	void *p = phdr;
 290	struct smem_private_entry *e;
 291
 292	return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
 293}
 294
 295static void *
 296phdr_to_last_cached_entry(struct smem_partition_header *phdr)
 297{
 298	void *p = phdr;
 299
 300	return p + le32_to_cpu(phdr->offset_free_cached);
 301}
 302
 303static struct smem_private_entry *
 304phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
 305{
 306	void *p = phdr;
 307
 308	return p + sizeof(*phdr);
 309}
 310
 311static struct smem_private_entry *
 312uncached_entry_next(struct smem_private_entry *e)
 313{
 314	void *p = e;
 315
 316	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
 317	       le32_to_cpu(e->size);
 318}
 319
 320static struct smem_private_entry *
 321cached_entry_next(struct smem_private_entry *e, size_t cacheline)
 322{
 323	void *p = e;
 324
 325	return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
 326}
 327
 328static void *uncached_entry_to_item(struct smem_private_entry *e)
 329{
 330	void *p = e;
 331
 332	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
 333}
 334
 335static void *cached_entry_to_item(struct smem_private_entry *e)
 336{
 337	void *p = e;
 338
 339	return p - le32_to_cpu(e->size);
 340}
 341
 342/* Pointer to the one and only smem handle */
 343static struct qcom_smem *__smem;
 344
 345/* Timeout (ms) for the trylock of remote spinlocks */
 346#define HWSPINLOCK_TIMEOUT	1000
 347
 348static int qcom_smem_alloc_private(struct qcom_smem *smem,
 349				   struct smem_partition_header *phdr,
 350				   unsigned item,
 351				   size_t size)
 352{
 
 353	struct smem_private_entry *hdr, *end;
 354	size_t alloc_size;
 355	void *cached;
 356
 357	hdr = phdr_to_first_uncached_entry(phdr);
 358	end = phdr_to_last_uncached_entry(phdr);
 359	cached = phdr_to_last_cached_entry(phdr);
 
 360
 361	while (hdr < end) {
 362		if (hdr->canary != SMEM_PRIVATE_CANARY)
 363			goto bad_canary;
 
 
 
 
 
 364		if (le16_to_cpu(hdr->item) == item)
 365			return -EEXIST;
 366
 367		hdr = uncached_entry_next(hdr);
 368	}
 369
 370	/* Check that we don't grow into the cached region */
 371	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
 372	if ((void *)hdr + alloc_size > cached) {
 373		dev_err(smem->dev, "Out of memory\n");
 374		return -ENOSPC;
 375	}
 376
 377	hdr->canary = SMEM_PRIVATE_CANARY;
 378	hdr->item = cpu_to_le16(item);
 379	hdr->size = cpu_to_le32(ALIGN(size, 8));
 380	hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
 381	hdr->padding_hdr = 0;
 382
 383	/*
 384	 * Ensure the header is written before we advance the free offset, so
 385	 * that remote processors that does not take the remote spinlock still
 386	 * gets a consistent view of the linked list.
 387	 */
 388	wmb();
 389	le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
 390
 391	return 0;
 392bad_canary:
 393	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
 394		le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
 395
 396	return -EINVAL;
 397}
 398
 399static int qcom_smem_alloc_global(struct qcom_smem *smem,
 400				  unsigned item,
 401				  size_t size)
 402{
 403	struct smem_global_entry *entry;
 404	struct smem_header *header;
 
 
 
 
 405
 406	header = smem->regions[0].virt_base;
 407	entry = &header->toc[item];
 408	if (entry->allocated)
 409		return -EEXIST;
 410
 411	size = ALIGN(size, 8);
 412	if (WARN_ON(size > le32_to_cpu(header->available)))
 413		return -ENOMEM;
 414
 415	entry->offset = header->free_offset;
 416	entry->size = cpu_to_le32(size);
 417
 418	/*
 419	 * Ensure the header is consistent before we mark the item allocated,
 420	 * so that remote processors will get a consistent view of the item
 421	 * even though they do not take the spinlock on read.
 422	 */
 423	wmb();
 424	entry->allocated = cpu_to_le32(1);
 425
 426	le32_add_cpu(&header->free_offset, size);
 427	le32_add_cpu(&header->available, -size);
 428
 429	return 0;
 430}
 431
 432/**
 433 * qcom_smem_alloc() - allocate space for a smem item
 434 * @host:	remote processor id, or -1
 435 * @item:	smem item handle
 436 * @size:	number of bytes to be allocated
 437 *
 438 * Allocate space for a given smem item of size @size, given that the item is
 439 * not yet allocated.
 440 */
 441int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
 442{
 443	struct smem_partition_header *phdr;
 444	unsigned long flags;
 445	int ret;
 446
 447	if (!__smem)
 448		return -EPROBE_DEFER;
 449
 450	if (item < SMEM_ITEM_LAST_FIXED) {
 451		dev_err(__smem->dev,
 452			"Rejecting allocation of static entry %d\n", item);
 453		return -EINVAL;
 454	}
 455
 456	if (WARN_ON(item >= __smem->item_count))
 457		return -EINVAL;
 458
 459	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
 460					  HWSPINLOCK_TIMEOUT,
 461					  &flags);
 462	if (ret)
 463		return ret;
 464
 465	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
 466		phdr = __smem->partitions[host];
 467		ret = qcom_smem_alloc_private(__smem, phdr, item, size);
 468	} else if (__smem->global_partition) {
 469		phdr = __smem->global_partition;
 470		ret = qcom_smem_alloc_private(__smem, phdr, item, size);
 471	} else {
 472		ret = qcom_smem_alloc_global(__smem, item, size);
 473	}
 474
 475	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
 476
 477	return ret;
 478}
 479EXPORT_SYMBOL(qcom_smem_alloc);
 480
 481static void *qcom_smem_get_global(struct qcom_smem *smem,
 482				  unsigned item,
 483				  size_t *size)
 484{
 485	struct smem_header *header;
 486	struct smem_region *region;
 487	struct smem_global_entry *entry;
 488	u32 aux_base;
 489	unsigned i;
 490
 
 
 
 491	header = smem->regions[0].virt_base;
 492	entry = &header->toc[item];
 493	if (!entry->allocated)
 494		return ERR_PTR(-ENXIO);
 495
 496	aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
 497
 498	for (i = 0; i < smem->num_regions; i++) {
 499		region = &smem->regions[i];
 500
 501		if (region->aux_base == aux_base || !aux_base) {
 502			if (size != NULL)
 503				*size = le32_to_cpu(entry->size);
 504			return region->virt_base + le32_to_cpu(entry->offset);
 505		}
 506	}
 507
 508	return ERR_PTR(-ENOENT);
 509}
 510
 511static void *qcom_smem_get_private(struct qcom_smem *smem,
 512				   struct smem_partition_header *phdr,
 513				   size_t cacheline,
 514				   unsigned item,
 515				   size_t *size)
 516{
 
 517	struct smem_private_entry *e, *end;
 518
 519	e = phdr_to_first_uncached_entry(phdr);
 520	end = phdr_to_last_uncached_entry(phdr);
 
 521
 522	while (e < end) {
 523		if (e->canary != SMEM_PRIVATE_CANARY)
 524			goto invalid_canary;
 525
 526		if (le16_to_cpu(e->item) == item) {
 527			if (size != NULL)
 528				*size = le32_to_cpu(e->size) -
 529					le16_to_cpu(e->padding_data);
 530
 531			return uncached_entry_to_item(e);
 532		}
 533
 534		e = uncached_entry_next(e);
 535	}
 536
 537	/* Item was not found in the uncached list, search the cached list */
 538
 539	e = phdr_to_first_cached_entry(phdr, cacheline);
 540	end = phdr_to_last_cached_entry(phdr);
 541
 542	while (e > end) {
 543		if (e->canary != SMEM_PRIVATE_CANARY)
 544			goto invalid_canary;
 545
 546		if (le16_to_cpu(e->item) == item) {
 547			if (size != NULL)
 548				*size = le32_to_cpu(e->size) -
 549					le16_to_cpu(e->padding_data);
 550
 551			return cached_entry_to_item(e);
 552		}
 553
 554		e = cached_entry_next(e, cacheline);
 555	}
 556
 557	return ERR_PTR(-ENOENT);
 558
 559invalid_canary:
 560	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
 561			le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
 562
 563	return ERR_PTR(-EINVAL);
 564}
 565
 566/**
 567 * qcom_smem_get() - resolve ptr of size of a smem item
 568 * @host:	the remote processor, or -1
 569 * @item:	smem item handle
 570 * @size:	pointer to be filled out with size of the item
 571 *
 572 * Looks up smem item and returns pointer to it. Size of smem
 573 * item is returned in @size.
 574 */
 575void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
 576{
 577	struct smem_partition_header *phdr;
 578	unsigned long flags;
 579	size_t cacheln;
 580	int ret;
 581	void *ptr = ERR_PTR(-EPROBE_DEFER);
 582
 583	if (!__smem)
 584		return ptr;
 585
 586	if (WARN_ON(item >= __smem->item_count))
 587		return ERR_PTR(-EINVAL);
 588
 589	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
 590					  HWSPINLOCK_TIMEOUT,
 591					  &flags);
 592	if (ret)
 593		return ERR_PTR(ret);
 594
 595	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
 596		phdr = __smem->partitions[host];
 597		cacheln = __smem->cacheline[host];
 598		ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
 599	} else if (__smem->global_partition) {
 600		phdr = __smem->global_partition;
 601		cacheln = __smem->global_cacheline;
 602		ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
 603	} else {
 604		ptr = qcom_smem_get_global(__smem, item, size);
 605	}
 606
 607	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
 608
 609	return ptr;
 610
 611}
 612EXPORT_SYMBOL(qcom_smem_get);
 613
 614/**
 615 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
 616 * @host:	the remote processor identifying a partition, or -1
 617 *
 618 * To be used by smem clients as a quick way to determine if any new
 619 * allocations has been made.
 620 */
 621int qcom_smem_get_free_space(unsigned host)
 622{
 623	struct smem_partition_header *phdr;
 624	struct smem_header *header;
 625	unsigned ret;
 626
 627	if (!__smem)
 628		return -EPROBE_DEFER;
 629
 630	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
 631		phdr = __smem->partitions[host];
 632		ret = le32_to_cpu(phdr->offset_free_cached) -
 633		      le32_to_cpu(phdr->offset_free_uncached);
 634	} else if (__smem->global_partition) {
 635		phdr = __smem->global_partition;
 636		ret = le32_to_cpu(phdr->offset_free_cached) -
 637		      le32_to_cpu(phdr->offset_free_uncached);
 638	} else {
 639		header = __smem->regions[0].virt_base;
 640		ret = le32_to_cpu(header->available);
 641	}
 642
 643	return ret;
 644}
 645EXPORT_SYMBOL(qcom_smem_get_free_space);
 646
 647/**
 648 * qcom_smem_virt_to_phys() - return the physical address associated
 649 * with an smem item pointer (previously returned by qcom_smem_get()
 650 * @p:	the virtual address to convert
 651 *
 652 * Returns 0 if the pointer provided is not within any smem region.
 653 */
 654phys_addr_t qcom_smem_virt_to_phys(void *p)
 655{
 656	unsigned i;
 657
 658	for (i = 0; i < __smem->num_regions; i++) {
 659		struct smem_region *region = &__smem->regions[i];
 660
 661		if (p < region->virt_base)
 662			continue;
 663		if (p < region->virt_base + region->size) {
 664			u64 offset = p - region->virt_base;
 665
 666			return (phys_addr_t)region->aux_base + offset;
 667		}
 668	}
 669
 670	return 0;
 671}
 672EXPORT_SYMBOL(qcom_smem_virt_to_phys);
 673
 674static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
 675{
 676	struct smem_header *header;
 677	__le32 *versions;
 
 678
 679	header = smem->regions[0].virt_base;
 680	versions = header->version;
 
 
 
 
 
 
 
 
 681
 682	return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
 683}
 684
 685static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
 
 686{
 
 
 687	struct smem_ptable *ptable;
 688	u32 version;
 
 
 689
 690	ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
 691	if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
 692		return ERR_PTR(-ENOENT);
 693
 694	version = le32_to_cpu(ptable->version);
 695	if (version != 1) {
 696		dev_err(smem->dev,
 697			"Unsupported partition header version %d\n", version);
 698		return ERR_PTR(-EINVAL);
 699	}
 700	return ptable;
 701}
 702
 703static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
 704{
 705	struct smem_ptable *ptable;
 706	struct smem_info *info;
 707
 708	ptable = qcom_smem_get_ptable(smem);
 709	if (IS_ERR_OR_NULL(ptable))
 710		return SMEM_ITEM_COUNT;
 711
 712	info = (struct smem_info *)&ptable->entry[ptable->num_entries];
 713	if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
 714		return SMEM_ITEM_COUNT;
 715
 716	return le16_to_cpu(info->num_items);
 717}
 718
 719/*
 720 * Validate the partition header for a partition whose partition
 721 * table entry is supplied.  Returns a pointer to its header if
 722 * valid, or a null pointer otherwise.
 723 */
 724static struct smem_partition_header *
 725qcom_smem_partition_header(struct qcom_smem *smem,
 726		struct smem_ptable_entry *entry, u16 host0, u16 host1)
 727{
 728	struct smem_partition_header *header;
 729	u32 size;
 730
 731	header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
 732
 733	if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
 734		dev_err(smem->dev, "bad partition magic %02x %02x %02x %02x\n",
 735			header->magic[0], header->magic[1],
 736			header->magic[2], header->magic[3]);
 737		return NULL;
 738	}
 739
 740	if (host0 != le16_to_cpu(header->host0)) {
 741		dev_err(smem->dev, "bad host0 (%hu != %hu)\n",
 742				host0, le16_to_cpu(header->host0));
 743		return NULL;
 744	}
 745	if (host1 != le16_to_cpu(header->host1)) {
 746		dev_err(smem->dev, "bad host1 (%hu != %hu)\n",
 747				host1, le16_to_cpu(header->host1));
 748		return NULL;
 749	}
 750
 751	size = le32_to_cpu(header->size);
 752	if (size != le32_to_cpu(entry->size)) {
 753		dev_err(smem->dev, "bad partition size (%u != %u)\n",
 754			size, le32_to_cpu(entry->size));
 755		return NULL;
 756	}
 757
 758	if (le32_to_cpu(header->offset_free_uncached) > size) {
 759		dev_err(smem->dev, "bad partition free uncached (%u > %u)\n",
 760			le32_to_cpu(header->offset_free_uncached), size);
 761		return NULL;
 762	}
 763
 764	return header;
 765}
 766
 767static int qcom_smem_set_global_partition(struct qcom_smem *smem)
 768{
 769	struct smem_partition_header *header;
 770	struct smem_ptable_entry *entry;
 771	struct smem_ptable *ptable;
 772	bool found = false;
 773	int i;
 774
 775	if (smem->global_partition) {
 776		dev_err(smem->dev, "Already found the global partition\n");
 777		return -EINVAL;
 778	}
 779
 780	ptable = qcom_smem_get_ptable(smem);
 781	if (IS_ERR(ptable))
 782		return PTR_ERR(ptable);
 783
 784	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
 785		entry = &ptable->entry[i];
 786		if (!le32_to_cpu(entry->offset))
 787			continue;
 788		if (!le32_to_cpu(entry->size))
 789			continue;
 790
 791		if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST)
 792			continue;
 793
 794		if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) {
 795			found = true;
 796			break;
 797		}
 798	}
 799
 800	if (!found) {
 801		dev_err(smem->dev, "Missing entry for global partition\n");
 802		return -EINVAL;
 803	}
 804
 805	header = qcom_smem_partition_header(smem, entry,
 806				SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST);
 807	if (!header)
 808		return -EINVAL;
 809
 810	smem->global_partition = header;
 811	smem->global_cacheline = le32_to_cpu(entry->cacheline);
 812
 813	return 0;
 814}
 815
 816static int
 817qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
 818{
 819	struct smem_partition_header *header;
 820	struct smem_ptable_entry *entry;
 821	struct smem_ptable *ptable;
 822	unsigned int remote_host;
 823	u16 host0, host1;
 824	int i;
 825
 826	ptable = qcom_smem_get_ptable(smem);
 827	if (IS_ERR(ptable))
 828		return PTR_ERR(ptable);
 829
 830	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
 831		entry = &ptable->entry[i];
 832		if (!le32_to_cpu(entry->offset))
 833			continue;
 
 834		if (!le32_to_cpu(entry->size))
 835			continue;
 836
 837		host0 = le16_to_cpu(entry->host0);
 838		host1 = le16_to_cpu(entry->host1);
 839		if (host0 == local_host)
 840			remote_host = host1;
 841		else if (host1 == local_host)
 842			remote_host = host0;
 843		else
 844			continue;
 845
 846		if (remote_host >= SMEM_HOST_COUNT) {
 847			dev_err(smem->dev, "bad host %hu\n", remote_host);
 
 
 848			return -EINVAL;
 849		}
 850
 851		if (smem->partitions[remote_host]) {
 852			dev_err(smem->dev, "duplicate host %hu\n", remote_host);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 853			return -EINVAL;
 854		}
 855
 856		header = qcom_smem_partition_header(smem, entry, host0, host1);
 857		if (!header)
 
 858			return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 859
 860		smem->partitions[remote_host] = header;
 861		smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
 862	}
 863
 864	return 0;
 865}
 866
 867static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
 868				const char *name, int i)
 869{
 870	struct device_node *np;
 871	struct resource r;
 872	resource_size_t size;
 873	int ret;
 874
 875	np = of_parse_phandle(dev->of_node, name, 0);
 876	if (!np) {
 877		dev_err(dev, "No %s specified\n", name);
 878		return -EINVAL;
 879	}
 880
 881	ret = of_address_to_resource(np, 0, &r);
 882	of_node_put(np);
 883	if (ret)
 884		return ret;
 885	size = resource_size(&r);
 886
 887	smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, size);
 
 
 
 888	if (!smem->regions[i].virt_base)
 889		return -ENOMEM;
 890	smem->regions[i].aux_base = (u32)r.start;
 891	smem->regions[i].size = size;
 892
 893	return 0;
 894}
 895
 896static int qcom_smem_probe(struct platform_device *pdev)
 897{
 898	struct smem_header *header;
 899	struct qcom_smem *smem;
 900	size_t array_size;
 901	int num_regions;
 902	int hwlock_id;
 903	u32 version;
 904	int ret;
 905
 906	num_regions = 1;
 907	if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
 908		num_regions++;
 909
 910	array_size = num_regions * sizeof(struct smem_region);
 911	smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
 912	if (!smem)
 913		return -ENOMEM;
 914
 915	smem->dev = &pdev->dev;
 916	smem->num_regions = num_regions;
 917
 918	ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
 919	if (ret)
 920		return ret;
 921
 922	if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
 923					"qcom,rpm-msg-ram", 1)))
 924		return ret;
 925
 926	header = smem->regions[0].virt_base;
 927	if (le32_to_cpu(header->initialized) != 1 ||
 928	    le32_to_cpu(header->reserved)) {
 929		dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
 930		return -EINVAL;
 931	}
 932
 933	version = qcom_smem_get_sbl_version(smem);
 934	switch (version >> 16) {
 935	case SMEM_GLOBAL_PART_VERSION:
 936		ret = qcom_smem_set_global_partition(smem);
 937		if (ret < 0)
 938			return ret;
 939		smem->item_count = qcom_smem_get_item_count(smem);
 940		break;
 941	case SMEM_GLOBAL_HEAP_VERSION:
 942		smem->item_count = SMEM_ITEM_COUNT;
 943		break;
 944	default:
 945		dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
 946		return -EINVAL;
 947	}
 948
 949	BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT);
 950	ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
 951	if (ret < 0 && ret != -ENOENT)
 952		return ret;
 953
 954	hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
 955	if (hwlock_id < 0) {
 956		if (hwlock_id != -EPROBE_DEFER)
 957			dev_err(&pdev->dev, "failed to retrieve hwlock\n");
 958		return hwlock_id;
 959	}
 960
 961	smem->hwlock = hwspin_lock_request_specific(hwlock_id);
 962	if (!smem->hwlock)
 963		return -ENXIO;
 964
 965	__smem = smem;
 966
 967	smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
 968						      PLATFORM_DEVID_NONE, NULL,
 969						      0);
 970	if (IS_ERR(smem->socinfo))
 971		dev_dbg(&pdev->dev, "failed to register socinfo device\n");
 972
 973	return 0;
 974}
 975
 976static int qcom_smem_remove(struct platform_device *pdev)
 977{
 978	platform_device_unregister(__smem->socinfo);
 979
 980	hwspin_lock_free(__smem->hwlock);
 981	__smem = NULL;
 982
 983	return 0;
 984}
 985
 986static const struct of_device_id qcom_smem_of_match[] = {
 987	{ .compatible = "qcom,smem" },
 988	{}
 989};
 990MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
 991
 992static struct platform_driver qcom_smem_driver = {
 993	.probe = qcom_smem_probe,
 994	.remove = qcom_smem_remove,
 995	.driver  = {
 996		.name = "qcom-smem",
 997		.of_match_table = qcom_smem_of_match,
 998		.suppress_bind_attrs = true,
 999	},
1000};
1001
1002static int __init qcom_smem_init(void)
1003{
1004	return platform_driver_register(&qcom_smem_driver);
1005}
1006arch_initcall(qcom_smem_init);
1007
1008static void __exit qcom_smem_exit(void)
1009{
1010	platform_driver_unregister(&qcom_smem_driver);
1011}
1012module_exit(qcom_smem_exit)
1013
1014MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
1015MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
1016MODULE_LICENSE("GPL v2");
v4.6
 
  1/*
  2 * Copyright (c) 2015, Sony Mobile Communications AB.
  3 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or modify
  6 * it under the terms of the GNU General Public License version 2 and
  7 * only version 2 as published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 */
 14
 15#include <linux/hwspinlock.h>
 16#include <linux/io.h>
 17#include <linux/module.h>
 18#include <linux/of.h>
 19#include <linux/of_address.h>
 20#include <linux/platform_device.h>
 
 21#include <linux/slab.h>
 22#include <linux/soc/qcom/smem.h>
 23
 24/*
 25 * The Qualcomm shared memory system is a allocate only heap structure that
 26 * consists of one of more memory areas that can be accessed by the processors
 27 * in the SoC.
 28 *
 29 * All systems contains a global heap, accessible by all processors in the SoC,
 30 * with a table of contents data structure (@smem_header) at the beginning of
 31 * the main shared memory block.
 32 *
 33 * The global header contains meta data for allocations as well as a fixed list
 34 * of 512 entries (@smem_global_entry) that can be initialized to reference
 35 * parts of the shared memory space.
 36 *
 37 *
 38 * In addition to this global heap a set of "private" heaps can be set up at
 39 * boot time with access restrictions so that only certain processor pairs can
 40 * access the data.
 41 *
 42 * These partitions are referenced from an optional partition table
 43 * (@smem_ptable), that is found 4kB from the end of the main smem region. The
 44 * partition table entries (@smem_ptable_entry) lists the involved processors
 45 * (or hosts) and their location in the main shared memory region.
 46 *
 47 * Each partition starts with a header (@smem_partition_header) that identifies
 48 * the partition and holds properties for the two internal memory regions. The
 49 * two regions are cached and non-cached memory respectively. Each region
 50 * contain a link list of allocation headers (@smem_private_entry) followed by
 51 * their data.
 52 *
 53 * Items in the non-cached region are allocated from the start of the partition
 54 * while items in the cached region are allocated from the end. The free area
 55 * is hence the region between the cached and non-cached offsets.
 
 56 *
 
 
 
 
 57 *
 58 * To synchronize allocations in the shared memory heaps a remote spinlock must
 59 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
 60 * platforms.
 61 *
 62 */
 63
 64/*
 65 * Item 3 of the global heap contains an array of versions for the various
 66 * software components in the SoC. We verify that the boot loader version is
 67 * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check.
 68 */
 69#define SMEM_ITEM_VERSION	3
 70#define  SMEM_MASTER_SBL_VERSION_INDEX	7
 71#define  SMEM_EXPECTED_VERSION		11
 72
 73/*
 74 * The first 8 items are only to be allocated by the boot loader while
 75 * initializing the heap.
 76 */
 77#define SMEM_ITEM_LAST_FIXED	8
 78
 79/* Highest accepted item number, for both global and private heaps */
 80#define SMEM_ITEM_COUNT		512
 81
 82/* Processor/host identifier for the application processor */
 83#define SMEM_HOST_APPS		0
 84
 
 
 
 85/* Max number of processors/hosts in a system */
 86#define SMEM_HOST_COUNT		9
 87
 88/**
 89  * struct smem_proc_comm - proc_comm communication struct (legacy)
 90  * @command:	current command to be executed
 91  * @status:	status of the currently requested command
 92  * @params:	parameters to the command
 93  */
 94struct smem_proc_comm {
 95	__le32 command;
 96	__le32 status;
 97	__le32 params[2];
 98};
 99
100/**
101 * struct smem_global_entry - entry to reference smem items on the heap
102 * @allocated:	boolean to indicate if this entry is used
103 * @offset:	offset to the allocated space
104 * @size:	size of the allocated space, 8 byte aligned
105 * @aux_base:	base address for the memory region used by this unit, or 0 for
106 *		the default region. bits 0,1 are reserved
107 */
108struct smem_global_entry {
109	__le32 allocated;
110	__le32 offset;
111	__le32 size;
112	__le32 aux_base; /* bits 1:0 reserved */
113};
114#define AUX_BASE_MASK		0xfffffffc
115
116/**
117 * struct smem_header - header found in beginning of primary smem region
118 * @proc_comm:		proc_comm communication interface (legacy)
119 * @version:		array of versions for the various subsystems
120 * @initialized:	boolean to indicate that smem is initialized
121 * @free_offset:	index of the first unallocated byte in smem
122 * @available:		number of bytes available for allocation
123 * @reserved:		reserved field, must be 0
124 * toc:			array of references to items
125 */
126struct smem_header {
127	struct smem_proc_comm proc_comm[4];
128	__le32 version[32];
129	__le32 initialized;
130	__le32 free_offset;
131	__le32 available;
132	__le32 reserved;
133	struct smem_global_entry toc[SMEM_ITEM_COUNT];
134};
135
136/**
137 * struct smem_ptable_entry - one entry in the @smem_ptable list
138 * @offset:	offset, within the main shared memory region, of the partition
139 * @size:	size of the partition
140 * @flags:	flags for the partition (currently unused)
141 * @host0:	first processor/host with access to this partition
142 * @host1:	second processor/host with access to this partition
 
143 * @reserved:	reserved entries for later use
144 */
145struct smem_ptable_entry {
146	__le32 offset;
147	__le32 size;
148	__le32 flags;
149	__le16 host0;
150	__le16 host1;
151	__le32 reserved[8];
 
152};
153
154/**
155 * struct smem_ptable - partition table for the private partitions
156 * @magic:	magic number, must be SMEM_PTABLE_MAGIC
157 * @version:	version of the partition table
158 * @num_entries: number of partitions in the table
159 * @reserved:	for now reserved entries
160 * @entry:	list of @smem_ptable_entry for the @num_entries partitions
161 */
162struct smem_ptable {
163	u8 magic[4];
164	__le32 version;
165	__le32 num_entries;
166	__le32 reserved[5];
167	struct smem_ptable_entry entry[];
168};
169
170static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
171
172/**
173 * struct smem_partition_header - header of the partitions
174 * @magic:	magic number, must be SMEM_PART_MAGIC
175 * @host0:	first processor/host with access to this partition
176 * @host1:	second processor/host with access to this partition
177 * @size:	size of the partition
178 * @offset_free_uncached: offset to the first free byte of uncached memory in
179 *		this partition
180 * @offset_free_cached: offset to the first free byte of cached memory in this
181 *		partition
182 * @reserved:	for now reserved entries
183 */
184struct smem_partition_header {
185	u8 magic[4];
186	__le16 host0;
187	__le16 host1;
188	__le32 size;
189	__le32 offset_free_uncached;
190	__le32 offset_free_cached;
191	__le32 reserved[3];
192};
193
194static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
195
196/**
197 * struct smem_private_entry - header of each item in the private partition
198 * @canary:	magic number, must be SMEM_PRIVATE_CANARY
199 * @item:	identifying number of the smem item
200 * @size:	size of the data, including padding bytes
201 * @padding_data: number of bytes of padding of data
202 * @padding_hdr: number of bytes of padding between the header and the data
203 * @reserved:	for now reserved entry
204 */
205struct smem_private_entry {
206	u16 canary; /* bytes are the same so no swapping needed */
207	__le16 item;
208	__le32 size; /* includes padding bytes */
209	__le16 padding_data;
210	__le16 padding_hdr;
211	__le32 reserved;
212};
213#define SMEM_PRIVATE_CANARY	0xa5a5
214
215/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216 * struct smem_region - representation of a chunk of memory used for smem
217 * @aux_base:	identifier of aux_mem base
218 * @virt_base:	virtual base address of memory with this aux_mem identifier
219 * @size:	size of the memory region
220 */
221struct smem_region {
222	u32 aux_base;
223	void __iomem *virt_base;
224	size_t size;
225};
226
227/**
228 * struct qcom_smem - device data for the smem device
229 * @dev:	device pointer
230 * @hwlock:	reference to a hwspinlock
 
 
231 * @partitions:	list of pointers to partitions affecting the current
232 *		processor/host
 
 
233 * @num_regions: number of @regions
234 * @regions:	list of the memory regions defining the shared memory
235 */
236struct qcom_smem {
237	struct device *dev;
238
239	struct hwspinlock *hwlock;
240
 
 
241	struct smem_partition_header *partitions[SMEM_HOST_COUNT];
 
 
 
242
243	unsigned num_regions;
244	struct smem_region regions[0];
245};
246
 
 
 
 
 
 
 
 
247static struct smem_private_entry *
248phdr_to_last_private_entry(struct smem_partition_header *phdr)
 
249{
250	void *p = phdr;
 
251
252	return p + le32_to_cpu(phdr->offset_free_uncached);
253}
254
255static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
 
256{
257	void *p = phdr;
258
259	return p + le32_to_cpu(phdr->offset_free_cached);
260}
261
262static struct smem_private_entry *
263phdr_to_first_private_entry(struct smem_partition_header *phdr)
264{
265	void *p = phdr;
266
267	return p + sizeof(*phdr);
268}
269
270static struct smem_private_entry *
271private_entry_next(struct smem_private_entry *e)
272{
273	void *p = e;
274
275	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
276	       le32_to_cpu(e->size);
277}
278
279static void *entry_to_item(struct smem_private_entry *e)
 
 
 
 
 
 
 
 
280{
281	void *p = e;
282
283	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
284}
285
 
 
 
 
 
 
 
286/* Pointer to the one and only smem handle */
287static struct qcom_smem *__smem;
288
289/* Timeout (ms) for the trylock of remote spinlocks */
290#define HWSPINLOCK_TIMEOUT	1000
291
292static int qcom_smem_alloc_private(struct qcom_smem *smem,
293				   unsigned host,
294				   unsigned item,
295				   size_t size)
296{
297	struct smem_partition_header *phdr;
298	struct smem_private_entry *hdr, *end;
299	size_t alloc_size;
300	void *cached;
301
302	phdr = smem->partitions[host];
303	hdr = phdr_to_first_private_entry(phdr);
304	end = phdr_to_last_private_entry(phdr);
305	cached = phdr_to_first_cached_entry(phdr);
306
307	while (hdr < end) {
308		if (hdr->canary != SMEM_PRIVATE_CANARY) {
309			dev_err(smem->dev,
310				"Found invalid canary in host %d partition\n",
311				host);
312			return -EINVAL;
313		}
314
315		if (le16_to_cpu(hdr->item) == item)
316			return -EEXIST;
317
318		hdr = private_entry_next(hdr);
319	}
320
321	/* Check that we don't grow into the cached region */
322	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
323	if ((void *)hdr + alloc_size >= cached) {
324		dev_err(smem->dev, "Out of memory\n");
325		return -ENOSPC;
326	}
327
328	hdr->canary = SMEM_PRIVATE_CANARY;
329	hdr->item = cpu_to_le16(item);
330	hdr->size = cpu_to_le32(ALIGN(size, 8));
331	hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
332	hdr->padding_hdr = 0;
333
334	/*
335	 * Ensure the header is written before we advance the free offset, so
336	 * that remote processors that does not take the remote spinlock still
337	 * gets a consistent view of the linked list.
338	 */
339	wmb();
340	le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
341
342	return 0;
 
 
 
 
 
343}
344
345static int qcom_smem_alloc_global(struct qcom_smem *smem,
346				  unsigned item,
347				  size_t size)
348{
 
349	struct smem_header *header;
350	struct smem_global_entry *entry;
351
352	if (WARN_ON(item >= SMEM_ITEM_COUNT))
353		return -EINVAL;
354
355	header = smem->regions[0].virt_base;
356	entry = &header->toc[item];
357	if (entry->allocated)
358		return -EEXIST;
359
360	size = ALIGN(size, 8);
361	if (WARN_ON(size > le32_to_cpu(header->available)))
362		return -ENOMEM;
363
364	entry->offset = header->free_offset;
365	entry->size = cpu_to_le32(size);
366
367	/*
368	 * Ensure the header is consistent before we mark the item allocated,
369	 * so that remote processors will get a consistent view of the item
370	 * even though they do not take the spinlock on read.
371	 */
372	wmb();
373	entry->allocated = cpu_to_le32(1);
374
375	le32_add_cpu(&header->free_offset, size);
376	le32_add_cpu(&header->available, -size);
377
378	return 0;
379}
380
381/**
382 * qcom_smem_alloc() - allocate space for a smem item
383 * @host:	remote processor id, or -1
384 * @item:	smem item handle
385 * @size:	number of bytes to be allocated
386 *
387 * Allocate space for a given smem item of size @size, given that the item is
388 * not yet allocated.
389 */
390int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
391{
 
392	unsigned long flags;
393	int ret;
394
395	if (!__smem)
396		return -EPROBE_DEFER;
397
398	if (item < SMEM_ITEM_LAST_FIXED) {
399		dev_err(__smem->dev,
400			"Rejecting allocation of static entry %d\n", item);
401		return -EINVAL;
402	}
403
 
 
 
404	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
405					  HWSPINLOCK_TIMEOUT,
406					  &flags);
407	if (ret)
408		return ret;
409
410	if (host < SMEM_HOST_COUNT && __smem->partitions[host])
411		ret = qcom_smem_alloc_private(__smem, host, item, size);
412	else
 
 
 
 
413		ret = qcom_smem_alloc_global(__smem, item, size);
 
414
415	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
416
417	return ret;
418}
419EXPORT_SYMBOL(qcom_smem_alloc);
420
421static void *qcom_smem_get_global(struct qcom_smem *smem,
422				  unsigned item,
423				  size_t *size)
424{
425	struct smem_header *header;
426	struct smem_region *area;
427	struct smem_global_entry *entry;
428	u32 aux_base;
429	unsigned i;
430
431	if (WARN_ON(item >= SMEM_ITEM_COUNT))
432		return ERR_PTR(-EINVAL);
433
434	header = smem->regions[0].virt_base;
435	entry = &header->toc[item];
436	if (!entry->allocated)
437		return ERR_PTR(-ENXIO);
438
439	aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
440
441	for (i = 0; i < smem->num_regions; i++) {
442		area = &smem->regions[i];
443
444		if (area->aux_base == aux_base || !aux_base) {
445			if (size != NULL)
446				*size = le32_to_cpu(entry->size);
447			return area->virt_base + le32_to_cpu(entry->offset);
448		}
449	}
450
451	return ERR_PTR(-ENOENT);
452}
453
454static void *qcom_smem_get_private(struct qcom_smem *smem,
455				   unsigned host,
 
456				   unsigned item,
457				   size_t *size)
458{
459	struct smem_partition_header *phdr;
460	struct smem_private_entry *e, *end;
461
462	phdr = smem->partitions[host];
463	e = phdr_to_first_private_entry(phdr);
464	end = phdr_to_last_private_entry(phdr);
465
466	while (e < end) {
467		if (e->canary != SMEM_PRIVATE_CANARY) {
468			dev_err(smem->dev,
469				"Found invalid canary in host %d partition\n",
470				host);
471			return ERR_PTR(-EINVAL);
 
 
 
 
472		}
473
 
 
 
 
 
 
 
 
 
 
 
 
474		if (le16_to_cpu(e->item) == item) {
475			if (size != NULL)
476				*size = le32_to_cpu(e->size) -
477					le16_to_cpu(e->padding_data);
478
479			return entry_to_item(e);
480		}
481
482		e = private_entry_next(e);
483	}
484
485	return ERR_PTR(-ENOENT);
 
 
 
 
 
 
486}
487
488/**
489 * qcom_smem_get() - resolve ptr of size of a smem item
490 * @host:	the remote processor, or -1
491 * @item:	smem item handle
492 * @size:	pointer to be filled out with size of the item
493 *
494 * Looks up smem item and returns pointer to it. Size of smem
495 * item is returned in @size.
496 */
497void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
498{
 
499	unsigned long flags;
 
500	int ret;
501	void *ptr = ERR_PTR(-EPROBE_DEFER);
502
503	if (!__smem)
504		return ptr;
505
 
 
 
506	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
507					  HWSPINLOCK_TIMEOUT,
508					  &flags);
509	if (ret)
510		return ERR_PTR(ret);
511
512	if (host < SMEM_HOST_COUNT && __smem->partitions[host])
513		ptr = qcom_smem_get_private(__smem, host, item, size);
514	else
 
 
 
 
 
 
515		ptr = qcom_smem_get_global(__smem, item, size);
 
516
517	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
518
519	return ptr;
520
521}
522EXPORT_SYMBOL(qcom_smem_get);
523
524/**
525 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
526 * @host:	the remote processor identifying a partition, or -1
527 *
528 * To be used by smem clients as a quick way to determine if any new
529 * allocations has been made.
530 */
531int qcom_smem_get_free_space(unsigned host)
532{
533	struct smem_partition_header *phdr;
534	struct smem_header *header;
535	unsigned ret;
536
537	if (!__smem)
538		return -EPROBE_DEFER;
539
540	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
541		phdr = __smem->partitions[host];
542		ret = le32_to_cpu(phdr->offset_free_cached) -
543		      le32_to_cpu(phdr->offset_free_uncached);
 
 
 
 
544	} else {
545		header = __smem->regions[0].virt_base;
546		ret = le32_to_cpu(header->available);
547	}
548
549	return ret;
550}
551EXPORT_SYMBOL(qcom_smem_get_free_space);
552
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
553static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
554{
 
555	__le32 *versions;
556	size_t size;
557
558	versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size);
559	if (IS_ERR(versions)) {
560		dev_err(smem->dev, "Unable to read the version item\n");
561		return -ENOENT;
562	}
563
564	if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
565		dev_err(smem->dev, "Version item is too small\n");
566		return -EINVAL;
567	}
568
569	return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
570}
571
572static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
573					  unsigned local_host)
574{
575	struct smem_partition_header *header;
576	struct smem_ptable_entry *entry;
577	struct smem_ptable *ptable;
578	unsigned remote_host;
579	u32 version, host0, host1;
580	int i;
581
582	ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
583	if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
584		return 0;
585
586	version = le32_to_cpu(ptable->version);
587	if (version != 1) {
588		dev_err(smem->dev,
589			"Unsupported partition header version %d\n", version);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
590		return -EINVAL;
591	}
592
 
 
 
 
593	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
594		entry = &ptable->entry[i];
595		host0 = le16_to_cpu(entry->host0);
596		host1 = le16_to_cpu(entry->host1);
 
 
597
598		if (host0 != local_host && host1 != local_host)
599			continue;
600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
601		if (!le32_to_cpu(entry->offset))
602			continue;
603
604		if (!le32_to_cpu(entry->size))
605			continue;
606
 
 
607		if (host0 == local_host)
608			remote_host = host1;
 
 
609		else
610			remote_host = host0;
611
612		if (remote_host >= SMEM_HOST_COUNT) {
613			dev_err(smem->dev,
614				"Invalid remote host %d\n",
615				remote_host);
616			return -EINVAL;
617		}
618
619		if (smem->partitions[remote_host]) {
620			dev_err(smem->dev,
621				"Already found a partition for host %d\n",
622				remote_host);
623			return -EINVAL;
624		}
625
626		header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
627		host0 = le16_to_cpu(header->host0);
628		host1 = le16_to_cpu(header->host1);
629
630		if (memcmp(header->magic, SMEM_PART_MAGIC,
631			    sizeof(header->magic))) {
632			dev_err(smem->dev,
633				"Partition %d has invalid magic\n", i);
634			return -EINVAL;
635		}
636
637		if (host0 != local_host && host1 != local_host) {
638			dev_err(smem->dev,
639				"Partition %d hosts are invalid\n", i);
640			return -EINVAL;
641		}
642
643		if (host0 != remote_host && host1 != remote_host) {
644			dev_err(smem->dev,
645				"Partition %d hosts are invalid\n", i);
646			return -EINVAL;
647		}
648
649		if (header->size != entry->size) {
650			dev_err(smem->dev,
651				"Partition %d has invalid size\n", i);
652			return -EINVAL;
653		}
654
655		if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
656			dev_err(smem->dev,
657				"Partition %d has invalid free pointer\n", i);
658			return -EINVAL;
659		}
660
661		smem->partitions[remote_host] = header;
 
662	}
663
664	return 0;
665}
666
667static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
668				const char *name, int i)
669{
670	struct device_node *np;
671	struct resource r;
 
672	int ret;
673
674	np = of_parse_phandle(dev->of_node, name, 0);
675	if (!np) {
676		dev_err(dev, "No %s specified\n", name);
677		return -EINVAL;
678	}
679
680	ret = of_address_to_resource(np, 0, &r);
681	of_node_put(np);
682	if (ret)
683		return ret;
 
684
685	smem->regions[i].aux_base = (u32)r.start;
686	smem->regions[i].size = resource_size(&r);
687	smem->regions[i].virt_base = devm_ioremap_nocache(dev, r.start,
688							  resource_size(&r));
689	if (!smem->regions[i].virt_base)
690		return -ENOMEM;
 
 
691
692	return 0;
693}
694
695static int qcom_smem_probe(struct platform_device *pdev)
696{
697	struct smem_header *header;
698	struct qcom_smem *smem;
699	size_t array_size;
700	int num_regions;
701	int hwlock_id;
702	u32 version;
703	int ret;
704
705	num_regions = 1;
706	if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
707		num_regions++;
708
709	array_size = num_regions * sizeof(struct smem_region);
710	smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
711	if (!smem)
712		return -ENOMEM;
713
714	smem->dev = &pdev->dev;
715	smem->num_regions = num_regions;
716
717	ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
718	if (ret)
719		return ret;
720
721	if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
722					"qcom,rpm-msg-ram", 1)))
723		return ret;
724
725	header = smem->regions[0].virt_base;
726	if (le32_to_cpu(header->initialized) != 1 ||
727	    le32_to_cpu(header->reserved)) {
728		dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
729		return -EINVAL;
730	}
731
732	version = qcom_smem_get_sbl_version(smem);
733	if (version >> 16 != SMEM_EXPECTED_VERSION) {
 
 
 
 
 
 
 
 
 
 
734		dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
735		return -EINVAL;
736	}
737
 
738	ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
739	if (ret < 0)
740		return ret;
741
742	hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
743	if (hwlock_id < 0) {
744		dev_err(&pdev->dev, "failed to retrieve hwlock\n");
 
745		return hwlock_id;
746	}
747
748	smem->hwlock = hwspin_lock_request_specific(hwlock_id);
749	if (!smem->hwlock)
750		return -ENXIO;
751
752	__smem = smem;
753
 
 
 
 
 
 
754	return 0;
755}
756
757static int qcom_smem_remove(struct platform_device *pdev)
758{
 
 
759	hwspin_lock_free(__smem->hwlock);
760	__smem = NULL;
761
762	return 0;
763}
764
765static const struct of_device_id qcom_smem_of_match[] = {
766	{ .compatible = "qcom,smem" },
767	{}
768};
769MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
770
771static struct platform_driver qcom_smem_driver = {
772	.probe = qcom_smem_probe,
773	.remove = qcom_smem_remove,
774	.driver  = {
775		.name = "qcom-smem",
776		.of_match_table = qcom_smem_of_match,
777		.suppress_bind_attrs = true,
778	},
779};
780
781static int __init qcom_smem_init(void)
782{
783	return platform_driver_register(&qcom_smem_driver);
784}
785arch_initcall(qcom_smem_init);
786
787static void __exit qcom_smem_exit(void)
788{
789	platform_driver_unregister(&qcom_smem_driver);
790}
791module_exit(qcom_smem_exit)
792
793MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
794MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
795MODULE_LICENSE("GPL v2");