Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2019-2023 Linaro Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <linux/device.h>
   9#include <linux/slab.h>
  10#include <linux/bitfield.h>
  11#include <linux/if_rmnet.h>
  12#include <linux/dma-direction.h>
  13
  14#include "gsi.h"
  15#include "gsi_trans.h"
  16#include "ipa.h"
  17#include "ipa_data.h"
  18#include "ipa_endpoint.h"
  19#include "ipa_cmd.h"
  20#include "ipa_mem.h"
  21#include "ipa_modem.h"
  22#include "ipa_table.h"
  23#include "ipa_gsi.h"
  24#include "ipa_power.h"
  25
  26/* Hardware is told about receive buffers once a "batch" has been queued */
  27#define IPA_REPLENISH_BATCH	16		/* Must be non-zero */
  28
  29/* The amount of RX buffer space consumed by standard skb overhead */
  30#define IPA_RX_BUFFER_OVERHEAD	(PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
  31
  32/* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
  33#define IPA_ENDPOINT_QMAP_METADATA_MASK		0x000000ff /* host byte order */
  34
  35#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX	3
  36
  37/** enum ipa_status_opcode - IPA status opcode field hardware values */
  38enum ipa_status_opcode {				/* *Not* a bitmask */
  39	IPA_STATUS_OPCODE_PACKET		= 1,
  40	IPA_STATUS_OPCODE_NEW_RULE_PACKET	= 2,
  41	IPA_STATUS_OPCODE_DROPPED_PACKET	= 4,
  42	IPA_STATUS_OPCODE_SUSPENDED_PACKET	= 8,
  43	IPA_STATUS_OPCODE_LOG			= 16,
  44	IPA_STATUS_OPCODE_DCMP			= 32,
  45	IPA_STATUS_OPCODE_PACKET_2ND_PASS	= 64,
  46};
  47
  48/** enum ipa_status_exception - IPA status exception field hardware values */
  49enum ipa_status_exception {				/* *Not* a bitmask */
  50	/* 0 means no exception */
  51	IPA_STATUS_EXCEPTION_DEAGGR		= 1,
  52	IPA_STATUS_EXCEPTION_IPTYPE		= 4,
  53	IPA_STATUS_EXCEPTION_PACKET_LENGTH	= 8,
  54	IPA_STATUS_EXCEPTION_FRAG_RULE_MISS	= 16,
  55	IPA_STATUS_EXCEPTION_SW_FILTER		= 32,
  56	IPA_STATUS_EXCEPTION_NAT		= 64,		/* IPv4 */
  57	IPA_STATUS_EXCEPTION_IPV6_CONN_TRACK	= 64,		/* IPv6 */
  58	IPA_STATUS_EXCEPTION_UC			= 128,
  59	IPA_STATUS_EXCEPTION_INVALID_ENDPOINT	= 129,
  60	IPA_STATUS_EXCEPTION_HEADER_INSERT	= 136,
  61	IPA_STATUS_EXCEPTION_CHEKCSUM		= 229,
  62};
  63
  64/** enum ipa_status_mask - IPA status mask field bitmask hardware values */
  65enum ipa_status_mask {
  66	IPA_STATUS_MASK_FRAG_PROCESS		= BIT(0),
  67	IPA_STATUS_MASK_FILT_PROCESS		= BIT(1),
  68	IPA_STATUS_MASK_NAT_PROCESS		= BIT(2),
  69	IPA_STATUS_MASK_ROUTE_PROCESS		= BIT(3),
  70	IPA_STATUS_MASK_TAG_VALID		= BIT(4),
  71	IPA_STATUS_MASK_FRAGMENT		= BIT(5),
  72	IPA_STATUS_MASK_FIRST_FRAGMENT		= BIT(6),
  73	IPA_STATUS_MASK_V4			= BIT(7),
  74	IPA_STATUS_MASK_CKSUM_PROCESS		= BIT(8),
  75	IPA_STATUS_MASK_AGGR_PROCESS		= BIT(9),
  76	IPA_STATUS_MASK_DEST_EOT		= BIT(10),
  77	IPA_STATUS_MASK_DEAGGR_PROCESS		= BIT(11),
  78	IPA_STATUS_MASK_DEAGG_FIRST		= BIT(12),
  79	IPA_STATUS_MASK_SRC_EOT			= BIT(13),
  80	IPA_STATUS_MASK_PREV_EOT		= BIT(14),
  81	IPA_STATUS_MASK_BYTE_LIMIT		= BIT(15),
  82};
  83
  84/* Special IPA filter/router rule field value indicating "rule miss" */
  85#define IPA_STATUS_RULE_MISS	0x3ff	/* 10-bit filter/router rule fields */
  86
  87/** The IPA status nat_type field uses enum ipa_nat_type hardware values */
  88
  89/* enum ipa_status_field_id - IPA packet status structure field identifiers */
  90enum ipa_status_field_id {
  91	STATUS_OPCODE,			/* enum ipa_status_opcode */
  92	STATUS_EXCEPTION,		/* enum ipa_status_exception */
  93	STATUS_MASK,			/* enum ipa_status_mask (bitmask) */
  94	STATUS_LENGTH,
  95	STATUS_SRC_ENDPOINT,
  96	STATUS_DST_ENDPOINT,
  97	STATUS_METADATA,
  98	STATUS_FILTER_LOCAL,		/* Boolean */
  99	STATUS_FILTER_HASH,		/* Boolean */
 100	STATUS_FILTER_GLOBAL,		/* Boolean */
 101	STATUS_FILTER_RETAIN,		/* Boolean */
 102	STATUS_FILTER_RULE_INDEX,
 103	STATUS_ROUTER_LOCAL,		/* Boolean */
 104	STATUS_ROUTER_HASH,		/* Boolean */
 105	STATUS_UCP,			/* Boolean */
 106	STATUS_ROUTER_TABLE,
 107	STATUS_ROUTER_RULE_INDEX,
 108	STATUS_NAT_HIT,			/* Boolean */
 109	STATUS_NAT_INDEX,
 110	STATUS_NAT_TYPE,		/* enum ipa_nat_type */
 111	STATUS_TAG_LOW32,		/* Low-order 32 bits of 48-bit tag */
 112	STATUS_TAG_HIGH16,		/* High-order 16 bits of 48-bit tag */
 113	STATUS_SEQUENCE,
 114	STATUS_TIME_OF_DAY,
 115	STATUS_HEADER_LOCAL,		/* Boolean */
 116	STATUS_HEADER_OFFSET,
 117	STATUS_FRAG_HIT,		/* Boolean */
 118	STATUS_FRAG_RULE_INDEX,
 119};
 120
 121/* Size in bytes of an IPA packet status structure */
 122#define IPA_STATUS_SIZE			sizeof(__le32[8])
 123
 124/* IPA status structure decoder; looks up field values for a structure */
 125static u32 ipa_status_extract(struct ipa *ipa, const void *data,
 126			      enum ipa_status_field_id field)
 127{
 128	enum ipa_version version = ipa->version;
 129	const __le32 *word = data;
 130
 131	switch (field) {
 132	case STATUS_OPCODE:
 133		return le32_get_bits(word[0], GENMASK(7, 0));
 134	case STATUS_EXCEPTION:
 135		return le32_get_bits(word[0], GENMASK(15, 8));
 136	case STATUS_MASK:
 137		return le32_get_bits(word[0], GENMASK(31, 16));
 138	case STATUS_LENGTH:
 139		return le32_get_bits(word[1], GENMASK(15, 0));
 140	case STATUS_SRC_ENDPOINT:
 141		if (version < IPA_VERSION_5_0)
 142			return le32_get_bits(word[1], GENMASK(20, 16));
 143		return le32_get_bits(word[1], GENMASK(23, 16));
 144	/* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */
 145	/* Status word 1, bits 24-26 are reserved (IPA v5.0+) */
 146	case STATUS_DST_ENDPOINT:
 147		if (version < IPA_VERSION_5_0)
 148			return le32_get_bits(word[1], GENMASK(28, 24));
 149		return le32_get_bits(word[7], GENMASK(23, 16));
 150	/* Status word 1, bits 29-31 are reserved */
 151	case STATUS_METADATA:
 152		return le32_to_cpu(word[2]);
 153	case STATUS_FILTER_LOCAL:
 154		return le32_get_bits(word[3], GENMASK(0, 0));
 155	case STATUS_FILTER_HASH:
 156		return le32_get_bits(word[3], GENMASK(1, 1));
 157	case STATUS_FILTER_GLOBAL:
 158		return le32_get_bits(word[3], GENMASK(2, 2));
 159	case STATUS_FILTER_RETAIN:
 160		return le32_get_bits(word[3], GENMASK(3, 3));
 161	case STATUS_FILTER_RULE_INDEX:
 162		return le32_get_bits(word[3], GENMASK(13, 4));
 163	/* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */
 164	case STATUS_ROUTER_LOCAL:
 165		if (version < IPA_VERSION_5_0)
 166			return le32_get_bits(word[3], GENMASK(14, 14));
 167		return le32_get_bits(word[1], GENMASK(27, 27));
 168	case STATUS_ROUTER_HASH:
 169		if (version < IPA_VERSION_5_0)
 170			return le32_get_bits(word[3], GENMASK(15, 15));
 171		return le32_get_bits(word[1], GENMASK(28, 28));
 172	case STATUS_UCP:
 173		if (version < IPA_VERSION_5_0)
 174			return le32_get_bits(word[3], GENMASK(16, 16));
 175		return le32_get_bits(word[7], GENMASK(31, 31));
 176	case STATUS_ROUTER_TABLE:
 177		if (version < IPA_VERSION_5_0)
 178			return le32_get_bits(word[3], GENMASK(21, 17));
 179		return le32_get_bits(word[3], GENMASK(21, 14));
 180	case STATUS_ROUTER_RULE_INDEX:
 181		return le32_get_bits(word[3], GENMASK(31, 22));
 182	case STATUS_NAT_HIT:
 183		return le32_get_bits(word[4], GENMASK(0, 0));
 184	case STATUS_NAT_INDEX:
 185		return le32_get_bits(word[4], GENMASK(13, 1));
 186	case STATUS_NAT_TYPE:
 187		return le32_get_bits(word[4], GENMASK(15, 14));
 188	case STATUS_TAG_LOW32:
 189		return le32_get_bits(word[4], GENMASK(31, 16)) |
 190			(le32_get_bits(word[5], GENMASK(15, 0)) << 16);
 191	case STATUS_TAG_HIGH16:
 192		return le32_get_bits(word[5], GENMASK(31, 16));
 193	case STATUS_SEQUENCE:
 194		return le32_get_bits(word[6], GENMASK(7, 0));
 195	case STATUS_TIME_OF_DAY:
 196		return le32_get_bits(word[6], GENMASK(31, 8));
 197	case STATUS_HEADER_LOCAL:
 198		return le32_get_bits(word[7], GENMASK(0, 0));
 199	case STATUS_HEADER_OFFSET:
 200		return le32_get_bits(word[7], GENMASK(10, 1));
 201	case STATUS_FRAG_HIT:
 202		return le32_get_bits(word[7], GENMASK(11, 11));
 203	case STATUS_FRAG_RULE_INDEX:
 204		return le32_get_bits(word[7], GENMASK(15, 12));
 205	/* Status word 7, bits 16-30 are reserved */
 206	/* Status word 7, bit 31 is reserved (not IPA v5.0+) */
 207	default:
 208		WARN(true, "%s: bad field_id %u\n", __func__, field);
 209		return 0;
 210	}
 211}
 212
 213/* Compute the aggregation size value to use for a given buffer size */
 214static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
 215{
 216	/* A hard aggregation limit will not be crossed; aggregation closes
 217	 * if saving incoming data would cross the hard byte limit boundary.
 218	 *
 219	 * With a soft limit, aggregation closes *after* the size boundary
 220	 * has been crossed.  In that case the limit must leave enough space
 221	 * after that limit to receive a full MTU of data plus overhead.
 222	 */
 223	if (!aggr_hard_limit)
 224		rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
 225
 226	/* The byte limit is encoded as a number of kilobytes */
 227
 228	return rx_buffer_size / SZ_1K;
 229}
 230
 231static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
 232			    const struct ipa_gsi_endpoint_data *all_data,
 233			    const struct ipa_gsi_endpoint_data *data)
 234{
 235	const struct ipa_gsi_endpoint_data *other_data;
 236	struct device *dev = &ipa->pdev->dev;
 237	enum ipa_endpoint_name other_name;
 238
 239	if (ipa_gsi_endpoint_data_empty(data))
 240		return true;
 241
 242	if (!data->toward_ipa) {
 243		const struct ipa_endpoint_rx *rx_config;
 244		const struct reg *reg;
 245		u32 buffer_size;
 246		u32 aggr_size;
 247		u32 limit;
 248
 249		if (data->endpoint.filter_support) {
 250			dev_err(dev, "filtering not supported for "
 251					"RX endpoint %u\n",
 252				data->endpoint_id);
 253			return false;
 254		}
 255
 256		/* Nothing more to check for non-AP RX */
 257		if (data->ee_id != GSI_EE_AP)
 258			return true;
 259
 260		rx_config = &data->endpoint.config.rx;
 261
 262		/* The buffer size must hold an MTU plus overhead */
 263		buffer_size = rx_config->buffer_size;
 264		limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
 265		if (buffer_size < limit) {
 266			dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
 267				data->endpoint_id, buffer_size, limit);
 268			return false;
 269		}
 270
 271		if (!data->endpoint.config.aggregation) {
 272			bool result = true;
 273
 274			/* No aggregation; check for bogus aggregation data */
 275			if (rx_config->aggr_time_limit) {
 276				dev_err(dev,
 277					"time limit with no aggregation for RX endpoint %u\n",
 278					data->endpoint_id);
 279				result = false;
 280			}
 281
 282			if (rx_config->aggr_hard_limit) {
 283				dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
 284					data->endpoint_id);
 285				result = false;
 286			}
 287
 288			if (rx_config->aggr_close_eof) {
 289				dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
 290					data->endpoint_id);
 291				result = false;
 292			}
 293
 294			return result;	/* Nothing more to check */
 295		}
 296
 297		/* For an endpoint supporting receive aggregation, the byte
 298		 * limit defines the point at which aggregation closes.  This
 299		 * check ensures the receive buffer size doesn't result in a
 300		 * limit that exceeds what's representable in the aggregation
 301		 * byte limit field.
 302		 */
 303		aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
 304					     rx_config->aggr_hard_limit);
 305		reg = ipa_reg(ipa, ENDP_INIT_AGGR);
 306
 307		limit = reg_field_max(reg, BYTE_LIMIT);
 308		if (aggr_size > limit) {
 309			dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
 310				data->endpoint_id, aggr_size, limit);
 311
 312			return false;
 313		}
 314
 315		return true;	/* Nothing more to check for RX */
 316	}
 317
 318	/* Starting with IPA v4.5 sequencer replication is obsolete */
 319	if (ipa->version >= IPA_VERSION_4_5) {
 320		if (data->endpoint.config.tx.seq_rep_type) {
 321			dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
 322				data->endpoint_id);
 323			return false;
 324		}
 325	}
 326
 327	if (data->endpoint.config.status_enable) {
 328		other_name = data->endpoint.config.tx.status_endpoint;
 329		if (other_name >= count) {
 330			dev_err(dev, "status endpoint name %u out of range "
 331					"for endpoint %u\n",
 332				other_name, data->endpoint_id);
 333			return false;
 334		}
 335
 336		/* Status endpoint must be defined... */
 337		other_data = &all_data[other_name];
 338		if (ipa_gsi_endpoint_data_empty(other_data)) {
 339			dev_err(dev, "DMA endpoint name %u undefined "
 340					"for endpoint %u\n",
 341				other_name, data->endpoint_id);
 342			return false;
 343		}
 344
 345		/* ...and has to be an RX endpoint... */
 346		if (other_data->toward_ipa) {
 347			dev_err(dev,
 348				"status endpoint for endpoint %u not RX\n",
 349				data->endpoint_id);
 350			return false;
 351		}
 352
 353		/* ...and if it's to be an AP endpoint... */
 354		if (other_data->ee_id == GSI_EE_AP) {
 355			/* ...make sure it has status enabled. */
 356			if (!other_data->endpoint.config.status_enable) {
 357				dev_err(dev,
 358					"status not enabled for endpoint %u\n",
 359					other_data->endpoint_id);
 360				return false;
 361			}
 362		}
 363	}
 364
 365	if (data->endpoint.config.dma_mode) {
 366		other_name = data->endpoint.config.dma_endpoint;
 367		if (other_name >= count) {
 368			dev_err(dev, "DMA endpoint name %u out of range "
 369					"for endpoint %u\n",
 370				other_name, data->endpoint_id);
 371			return false;
 372		}
 373
 374		other_data = &all_data[other_name];
 375		if (ipa_gsi_endpoint_data_empty(other_data)) {
 376			dev_err(dev, "DMA endpoint name %u undefined "
 377					"for endpoint %u\n",
 378				other_name, data->endpoint_id);
 379			return false;
 380		}
 381	}
 382
 383	return true;
 384}
 385
 386/* Validate endpoint configuration data.  Return max defined endpoint ID */
 387static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
 388			    const struct ipa_gsi_endpoint_data *data)
 389{
 390	const struct ipa_gsi_endpoint_data *dp = data;
 391	struct device *dev = &ipa->pdev->dev;
 392	enum ipa_endpoint_name name;
 393	u32 max;
 394
 395	if (count > IPA_ENDPOINT_COUNT) {
 396		dev_err(dev, "too many endpoints specified (%u > %u)\n",
 397			count, IPA_ENDPOINT_COUNT);
 398		return 0;
 399	}
 400
 401	/* Make sure needed endpoints have defined data */
 402	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
 403		dev_err(dev, "command TX endpoint not defined\n");
 404		return 0;
 405	}
 406	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
 407		dev_err(dev, "LAN RX endpoint not defined\n");
 408		return 0;
 409	}
 410	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
 411		dev_err(dev, "AP->modem TX endpoint not defined\n");
 412		return 0;
 413	}
 414	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
 415		dev_err(dev, "AP<-modem RX endpoint not defined\n");
 416		return 0;
 417	}
 418
 419	max = 0;
 420	for (name = 0; name < count; name++, dp++) {
 421		if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
 422			return 0;
 423		max = max_t(u32, max, dp->endpoint_id);
 424	}
 425
 426	return max;
 427}
 428
 429/* Allocate a transaction to use on a non-command endpoint */
 430static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
 431						  u32 tre_count)
 432{
 433	struct gsi *gsi = &endpoint->ipa->gsi;
 434	u32 channel_id = endpoint->channel_id;
 435	enum dma_data_direction direction;
 436
 437	direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 438
 439	return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
 440}
 441
 442/* suspend_delay represents suspend for RX, delay for TX endpoints.
 443 * Note that suspend is not supported starting with IPA v4.0, and
 444 * delay mode should not be used starting with IPA v4.2.
 445 */
 446static bool
 447ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
 448{
 449	struct ipa *ipa = endpoint->ipa;
 450	const struct reg *reg;
 451	u32 field_id;
 452	u32 offset;
 453	bool state;
 454	u32 mask;
 455	u32 val;
 456
 457	if (endpoint->toward_ipa)
 458		WARN_ON(ipa->version >= IPA_VERSION_4_2);
 459	else
 460		WARN_ON(ipa->version >= IPA_VERSION_4_0);
 461
 462	reg = ipa_reg(ipa, ENDP_INIT_CTRL);
 463	offset = reg_n_offset(reg, endpoint->endpoint_id);
 464	val = ioread32(ipa->reg_virt + offset);
 465
 466	field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
 467	mask = reg_bit(reg, field_id);
 468
 469	state = !!(val & mask);
 470
 471	/* Don't bother if it's already in the requested state */
 472	if (suspend_delay != state) {
 473		val ^= mask;
 474		iowrite32(val, ipa->reg_virt + offset);
 475	}
 476
 477	return state;
 478}
 479
 480/* We don't care what the previous state was for delay mode */
 481static void
 482ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
 483{
 484	/* Delay mode should not be used for IPA v4.2+ */
 485	WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
 486	WARN_ON(!endpoint->toward_ipa);
 487
 488	(void)ipa_endpoint_init_ctrl(endpoint, enable);
 489}
 490
 491static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
 492{
 493	u32 endpoint_id = endpoint->endpoint_id;
 494	struct ipa *ipa = endpoint->ipa;
 495	u32 unit = endpoint_id / 32;
 496	const struct reg *reg;
 497	u32 val;
 498
 499	WARN_ON(!test_bit(endpoint_id, ipa->available));
 500
 501	reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
 502	val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
 503
 504	return !!(val & BIT(endpoint_id % 32));
 505}
 506
 507static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
 508{
 509	u32 endpoint_id = endpoint->endpoint_id;
 510	u32 mask = BIT(endpoint_id % 32);
 511	struct ipa *ipa = endpoint->ipa;
 512	u32 unit = endpoint_id / 32;
 513	const struct reg *reg;
 514
 515	WARN_ON(!test_bit(endpoint_id, ipa->available));
 516
 517	reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
 518	iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit));
 519}
 520
 521/**
 522 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
 523 * @endpoint:	Endpoint on which to emulate a suspend
 524 *
 525 *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
 526 *  with an open aggregation frame.  This is to work around a hardware
 527 *  issue in IPA version 3.5.1 where the suspend interrupt will not be
 528 *  generated when it should be.
 529 */
 530static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
 531{
 532	struct ipa *ipa = endpoint->ipa;
 533
 534	if (!endpoint->config.aggregation)
 535		return;
 536
 537	/* Nothing to do if the endpoint doesn't have aggregation open */
 538	if (!ipa_endpoint_aggr_active(endpoint))
 539		return;
 540
 541	/* Force close aggregation */
 542	ipa_endpoint_force_close(endpoint);
 543
 544	ipa_interrupt_simulate_suspend(ipa->interrupt);
 545}
 546
 547/* Returns previous suspend state (true means suspend was enabled) */
 548static bool
 549ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
 550{
 551	bool suspended;
 552
 553	if (endpoint->ipa->version >= IPA_VERSION_4_0)
 554		return enable;	/* For IPA v4.0+, no change made */
 555
 556	WARN_ON(endpoint->toward_ipa);
 557
 558	suspended = ipa_endpoint_init_ctrl(endpoint, enable);
 559
 560	/* A client suspended with an open aggregation frame will not
 561	 * generate a SUSPEND IPA interrupt.  If enabling suspend, have
 562	 * ipa_endpoint_suspend_aggr() handle this.
 563	 */
 564	if (enable && !suspended)
 565		ipa_endpoint_suspend_aggr(endpoint);
 566
 567	return suspended;
 568}
 569
 570/* Put all modem RX endpoints into suspend mode, and stop transmission
 571 * on all modem TX endpoints.  Prior to IPA v4.2, endpoint DELAY mode is
 572 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
 573 * control instead.
 574 */
 575void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
 576{
 577	u32 endpoint_id = 0;
 578
 579	while (endpoint_id < ipa->endpoint_count) {
 580		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
 581
 582		if (endpoint->ee_id != GSI_EE_MODEM)
 583			continue;
 584
 585		if (!endpoint->toward_ipa)
 586			(void)ipa_endpoint_program_suspend(endpoint, enable);
 587		else if (ipa->version < IPA_VERSION_4_2)
 588			ipa_endpoint_program_delay(endpoint, enable);
 589		else
 590			gsi_modem_channel_flow_control(&ipa->gsi,
 591						       endpoint->channel_id,
 592						       enable);
 593	}
 594}
 595
 596/* Reset all modem endpoints to use the default exception endpoint */
 597int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
 598{
 599	struct gsi_trans *trans;
 600	u32 endpoint_id;
 601	u32 count;
 602
 603	/* We need one command per modem TX endpoint, plus the commands
 604	 * that clear the pipeline.
 605	 */
 606	count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
 607	trans = ipa_cmd_trans_alloc(ipa, count);
 608	if (!trans) {
 609		dev_err(&ipa->pdev->dev,
 610			"no transaction to reset modem exception endpoints\n");
 611		return -EBUSY;
 612	}
 613
 614	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
 615		struct ipa_endpoint *endpoint;
 616		const struct reg *reg;
 617		u32 offset;
 618
 619		/* We only reset modem TX endpoints */
 620		endpoint = &ipa->endpoint[endpoint_id];
 621		if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
 622			continue;
 623
 624		reg = ipa_reg(ipa, ENDP_STATUS);
 625		offset = reg_n_offset(reg, endpoint_id);
 626
 627		/* Value written is 0, and all bits are updated.  That
 628		 * means status is disabled on the endpoint, and as a
 629		 * result all other fields in the register are ignored.
 630		 */
 631		ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
 632	}
 633
 634	ipa_cmd_pipeline_clear_add(trans);
 635
 636	gsi_trans_commit_wait(trans);
 637
 638	ipa_cmd_pipeline_clear_wait(ipa);
 639
 640	return 0;
 641}
 642
 643static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
 644{
 645	u32 endpoint_id = endpoint->endpoint_id;
 646	struct ipa *ipa = endpoint->ipa;
 647	enum ipa_cs_offload_en enabled;
 648	const struct reg *reg;
 649	u32 val = 0;
 650
 651	reg = ipa_reg(ipa, ENDP_INIT_CFG);
 652	/* FRAG_OFFLOAD_EN is 0 */
 653	if (endpoint->config.checksum) {
 654		enum ipa_version version = ipa->version;
 655
 656		if (endpoint->toward_ipa) {
 657			u32 off;
 658
 659			/* Checksum header offset is in 4-byte units */
 660			off = sizeof(struct rmnet_map_header) / sizeof(u32);
 661			val |= reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
 662
 663			enabled = version < IPA_VERSION_4_5
 664					? IPA_CS_OFFLOAD_UL
 665					: IPA_CS_OFFLOAD_INLINE;
 666		} else {
 667			enabled = version < IPA_VERSION_4_5
 668					? IPA_CS_OFFLOAD_DL
 669					: IPA_CS_OFFLOAD_INLINE;
 670		}
 671	} else {
 672		enabled = IPA_CS_OFFLOAD_NONE;
 673	}
 674	val |= reg_encode(reg, CS_OFFLOAD_EN, enabled);
 675	/* CS_GEN_QMB_MASTER_SEL is 0 */
 676
 677	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
 678}
 679
 680static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
 681{
 682	u32 endpoint_id = endpoint->endpoint_id;
 683	struct ipa *ipa = endpoint->ipa;
 684	const struct reg *reg;
 685	u32 val;
 686
 687	if (!endpoint->toward_ipa)
 688		return;
 689
 690	reg = ipa_reg(ipa, ENDP_INIT_NAT);
 691	val = reg_encode(reg, NAT_EN, IPA_NAT_TYPE_BYPASS);
 692
 693	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
 694}
 695
 696static u32
 697ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
 698{
 699	u32 header_size = sizeof(struct rmnet_map_header);
 700
 701	/* Without checksum offload, we just have the MAP header */
 702	if (!endpoint->config.checksum)
 703		return header_size;
 704
 705	if (version < IPA_VERSION_4_5) {
 706		/* Checksum header inserted for AP TX endpoints only */
 707		if (endpoint->toward_ipa)
 708			header_size += sizeof(struct rmnet_map_ul_csum_header);
 709	} else {
 710		/* Checksum header is used in both directions */
 711		header_size += sizeof(struct rmnet_map_v5_csum_header);
 712	}
 713
 714	return header_size;
 715}
 716
 717/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
 718static u32 ipa_header_size_encode(enum ipa_version version,
 719				  const struct reg *reg, u32 header_size)
 720{
 721	u32 field_max = reg_field_max(reg, HDR_LEN);
 722	u32 val;
 723
 724	/* We know field_max can be used as a mask (2^n - 1) */
 725	val = reg_encode(reg, HDR_LEN, header_size & field_max);
 726	if (version < IPA_VERSION_4_5) {
 727		WARN_ON(header_size > field_max);
 728		return val;
 729	}
 730
 731	/* IPA v4.5 adds a few more most-significant bits */
 732	header_size >>= hweight32(field_max);
 733	WARN_ON(header_size > reg_field_max(reg, HDR_LEN_MSB));
 734	val |= reg_encode(reg, HDR_LEN_MSB, header_size);
 735
 736	return val;
 737}
 738
 739/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
 740static u32 ipa_metadata_offset_encode(enum ipa_version version,
 741				      const struct reg *reg, u32 offset)
 742{
 743	u32 field_max = reg_field_max(reg, HDR_OFST_METADATA);
 744	u32 val;
 745
 746	/* We know field_max can be used as a mask (2^n - 1) */
 747	val = reg_encode(reg, HDR_OFST_METADATA, offset);
 748	if (version < IPA_VERSION_4_5) {
 749		WARN_ON(offset > field_max);
 750		return val;
 751	}
 752
 753	/* IPA v4.5 adds a few more most-significant bits */
 754	offset >>= hweight32(field_max);
 755	WARN_ON(offset > reg_field_max(reg, HDR_OFST_METADATA_MSB));
 756	val |= reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
 757
 758	return val;
 759}
 760
 761/**
 762 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
 763 * @endpoint:	Endpoint pointer
 764 *
 765 * We program QMAP endpoints so each packet received is preceded by a QMAP
 766 * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
 767 * packet size field, and we have the IPA hardware populate both for each
 768 * received packet.  The header is configured (in the HDR_EXT register)
 769 * to use big endian format.
 770 *
 771 * The packet size is written into the QMAP header's pkt_len field.  That
 772 * location is defined here using the HDR_OFST_PKT_SIZE field.
 773 *
 774 * The mux_id comes from a 4-byte metadata value supplied with each packet
 775 * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
 776 * value that we want, in its low-order byte.  A bitmask defined in the
 777 * endpoint's METADATA_MASK register defines which byte within the modem
 778 * metadata contains the mux_id.  And the OFST_METADATA field programmed
 779 * here indicates where the extracted byte should be placed within the QMAP
 780 * header.
 781 */
 782static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
 783{
 784	u32 endpoint_id = endpoint->endpoint_id;
 785	struct ipa *ipa = endpoint->ipa;
 786	const struct reg *reg;
 787	u32 val = 0;
 788
 789	reg = ipa_reg(ipa, ENDP_INIT_HDR);
 790	if (endpoint->config.qmap) {
 791		enum ipa_version version = ipa->version;
 792		size_t header_size;
 793
 794		header_size = ipa_qmap_header_size(version, endpoint);
 795		val = ipa_header_size_encode(version, reg, header_size);
 796
 797		/* Define how to fill fields in a received QMAP header */
 798		if (!endpoint->toward_ipa) {
 799			u32 off;     /* Field offset within header */
 800
 801			/* Where IPA will write the metadata value */
 802			off = offsetof(struct rmnet_map_header, mux_id);
 803			val |= ipa_metadata_offset_encode(version, reg, off);
 804
 805			/* Where IPA will write the length */
 806			off = offsetof(struct rmnet_map_header, pkt_len);
 807			/* Upper bits are stored in HDR_EXT with IPA v4.5 */
 808			if (version >= IPA_VERSION_4_5)
 809				off &= reg_field_max(reg, HDR_OFST_PKT_SIZE);
 810
 811			val |= reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
 812			val |= reg_encode(reg, HDR_OFST_PKT_SIZE, off);
 813		}
 814		/* For QMAP TX, metadata offset is 0 (modem assumes this) */
 815		val |= reg_bit(reg, HDR_OFST_METADATA_VALID);
 816
 817		/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
 818		/* HDR_A5_MUX is 0 */
 819		/* HDR_LEN_INC_DEAGG_HDR is 0 */
 820		/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
 821	}
 822
 823	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
 824}
 825
 826static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
 827{
 828	u32 pad_align = endpoint->config.rx.pad_align;
 829	u32 endpoint_id = endpoint->endpoint_id;
 830	struct ipa *ipa = endpoint->ipa;
 831	const struct reg *reg;
 832	u32 val = 0;
 833
 834	reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
 835	if (endpoint->config.qmap) {
 836		/* We have a header, so we must specify its endianness */
 837		val |= reg_bit(reg, HDR_ENDIANNESS);	/* big endian */
 838
 839		/* A QMAP header contains a 6 bit pad field at offset 0.
 840		 * The RMNet driver assumes this field is meaningful in
 841		 * packets it receives, and assumes the header's payload
 842		 * length includes that padding.  The RMNet driver does
 843		 * *not* pad packets it sends, however, so the pad field
 844		 * (although 0) should be ignored.
 845		 */
 846		if (!endpoint->toward_ipa) {
 847			val |= reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
 848			/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
 849			val |= reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
 850			/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
 851		}
 852	}
 853
 854	/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
 855	if (!endpoint->toward_ipa)
 856		val |= reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
 857
 858	/* IPA v4.5 adds some most-significant bits to a few fields,
 859	 * two of which are defined in the HDR (not HDR_EXT) register.
 860	 */
 861	if (ipa->version >= IPA_VERSION_4_5) {
 862		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
 863		if (endpoint->config.qmap && !endpoint->toward_ipa) {
 864			u32 mask = reg_field_max(reg, HDR_OFST_PKT_SIZE);
 865			u32 off;     /* Field offset within header */
 866
 867			off = offsetof(struct rmnet_map_header, pkt_len);
 868			/* Low bits are in the ENDP_INIT_HDR register */
 869			off >>= hweight32(mask);
 870			val |= reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
 871			/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
 872		}
 873	}
 874
 875	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
 876}
 877
 878static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
 879{
 880	u32 endpoint_id = endpoint->endpoint_id;
 881	struct ipa *ipa = endpoint->ipa;
 882	const struct reg *reg;
 883	u32 val = 0;
 884	u32 offset;
 885
 886	if (endpoint->toward_ipa)
 887		return;		/* Register not valid for TX endpoints */
 888
 889	reg = ipa_reg(ipa,  ENDP_INIT_HDR_METADATA_MASK);
 890	offset = reg_n_offset(reg, endpoint_id);
 891
 892	/* Note that HDR_ENDIANNESS indicates big endian header fields */
 893	if (endpoint->config.qmap)
 894		val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
 895
 896	iowrite32(val, ipa->reg_virt + offset);
 897}
 898
 899static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
 900{
 901	struct ipa *ipa = endpoint->ipa;
 902	const struct reg *reg;
 903	u32 offset;
 904	u32 val;
 905
 906	if (!endpoint->toward_ipa)
 907		return;		/* Register not valid for RX endpoints */
 908
 909	reg = ipa_reg(ipa, ENDP_INIT_MODE);
 910	if (endpoint->config.dma_mode) {
 911		enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
 912		u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
 913
 914		val = reg_encode(reg, ENDP_MODE, IPA_DMA);
 915		val |= reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
 916	} else {
 917		val = reg_encode(reg, ENDP_MODE, IPA_BASIC);
 918	}
 919	/* All other bits unspecified (and 0) */
 920
 921	offset = reg_n_offset(reg, endpoint->endpoint_id);
 922	iowrite32(val, ipa->reg_virt + offset);
 923}
 924
 925/* For IPA v4.5+, times are expressed using Qtime.  A time is represented
 926 * at one of several available granularities, which are configured in
 927 * ipa_qtime_config().  Three (or, starting with IPA v5.0, four) pulse
 928 * generators are set up with different "tick" periods.  A Qtime value
 929 * encodes a tick count along with an indication of a pulse generator
 930 * (which has a fixed tick period).  Two pulse generators are always
 931 * available to the AP; a third is available starting with IPA v5.0.
 932 * This function determines which pulse generator most accurately
 933 * represents the time period provided, and returns the tick count to
 934 * use to represent that time.
 935 */
 936static u32
 937ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
 938{
 939	u32 which = 0;
 940	u32 ticks;
 941
 942	/* Pulse generator 0 has 100 microsecond granularity */
 943	ticks = DIV_ROUND_CLOSEST(microseconds, 100);
 944	if (ticks <= max)
 945		goto out;
 946
 947	/* Pulse generator 1 has millisecond granularity */
 948	which = 1;
 949	ticks = DIV_ROUND_CLOSEST(microseconds, 1000);
 950	if (ticks <= max)
 951		goto out;
 952
 953	if (ipa->version >= IPA_VERSION_5_0) {
 954		/* Pulse generator 2 has 10 millisecond granularity */
 955		which = 2;
 956		ticks = DIV_ROUND_CLOSEST(microseconds, 100);
 957	}
 958	WARN_ON(ticks > max);
 959out:
 960	*select = which;
 961
 962	return ticks;
 963}
 964
 965/* Encode the aggregation timer limit (microseconds) based on IPA version */
 966static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg,
 967				  u32 microseconds)
 968{
 969	u32 ticks;
 970	u32 max;
 
 971
 972	if (!microseconds)
 973		return 0;	/* Nothing to compute if time limit is 0 */
 974
 975	max = reg_field_max(reg, TIME_LIMIT);
 976	if (ipa->version >= IPA_VERSION_4_5) {
 977		u32 select;
 
 978
 979		ticks = ipa_qtime_val(ipa, microseconds, max, &select);
 
 
 
 
 
 
 
 
 980
 981		return reg_encode(reg, AGGR_GRAN_SEL, select) |
 982		       reg_encode(reg, TIME_LIMIT, ticks);
 983	}
 984
 985	/* We program aggregation granularity in ipa_hardware_config() */
 986	ticks = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
 987	WARN(ticks > max, "aggr_time_limit too large (%u > %u usec)\n",
 988	     microseconds, max * IPA_AGGR_GRANULARITY);
 989
 990	return reg_encode(reg, TIME_LIMIT, ticks);
 991}
 992
 993static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
 994{
 995	u32 endpoint_id = endpoint->endpoint_id;
 996	struct ipa *ipa = endpoint->ipa;
 997	const struct reg *reg;
 998	u32 val = 0;
 999
1000	reg = ipa_reg(ipa, ENDP_INIT_AGGR);
1001	if (endpoint->config.aggregation) {
1002		if (!endpoint->toward_ipa) {
1003			const struct ipa_endpoint_rx *rx_config;
1004			u32 buffer_size;
1005			u32 limit;
1006
1007			rx_config = &endpoint->config.rx;
1008			val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
1009			val |= reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
1010
1011			buffer_size = rx_config->buffer_size;
1012			limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
1013						 rx_config->aggr_hard_limit);
1014			val |= reg_encode(reg, BYTE_LIMIT, limit);
1015
1016			limit = rx_config->aggr_time_limit;
1017			val |= aggr_time_limit_encode(ipa, reg, limit);
1018
1019			/* AGGR_PKT_LIMIT is 0 (unlimited) */
1020
1021			if (rx_config->aggr_close_eof)
1022				val |= reg_bit(reg, SW_EOF_ACTIVE);
1023		} else {
1024			val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
1025			val |= reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
1026			/* other fields ignored */
1027		}
1028		/* AGGR_FORCE_CLOSE is 0 */
1029		/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
1030	} else {
1031		val |= reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
1032		/* other fields ignored */
1033	}
1034
1035	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1036}
1037
1038/* The head-of-line blocking timer is defined as a tick count.  For
1039 * IPA version 4.5 the tick count is based on the Qtimer, which is
1040 * derived from the 19.2 MHz SoC XO clock.  For older IPA versions
1041 * each tick represents 128 cycles of the IPA core clock.
1042 *
1043 * Return the encoded value representing the timeout period provided
1044 * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
1045 */
1046static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg,
1047				  u32 microseconds)
1048{
1049	u32 width;
1050	u32 scale;
1051	u64 ticks;
1052	u64 rate;
1053	u32 high;
1054	u32 val;
1055
1056	if (!microseconds)
1057		return 0;	/* Nothing to compute if timer period is 0 */
1058
1059	if (ipa->version >= IPA_VERSION_4_5) {
1060		u32 max = reg_field_max(reg, TIMER_LIMIT);
1061		u32 select;
1062		u32 ticks;
1063
1064		ticks = ipa_qtime_val(ipa, microseconds, max, &select);
 
 
 
 
 
 
 
 
1065
1066		return reg_encode(reg, TIMER_GRAN_SEL, 1) |
1067		       reg_encode(reg, TIMER_LIMIT, ticks);
1068	}
1069
1070	/* Use 64 bit arithmetic to avoid overflow */
1071	rate = ipa_core_clock_rate(ipa);
1072	ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
1073
1074	/* We still need the result to fit into the field */
1075	WARN_ON(ticks > reg_field_max(reg, TIMER_BASE_VALUE));
1076
1077	/* IPA v3.5.1 through v4.1 just record the tick count */
1078	if (ipa->version < IPA_VERSION_4_2)
1079		return reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
1080
1081	/* For IPA v4.2, the tick count is represented by base and
1082	 * scale fields within the 32-bit timer register, where:
1083	 *     ticks = base << scale;
1084	 * The best precision is achieved when the base value is as
1085	 * large as possible.  Find the highest set bit in the tick
1086	 * count, and extract the number of bits in the base field
1087	 * such that high bit is included.
1088	 */
1089	high = fls(ticks);		/* 1..32 (or warning above) */
1090	width = hweight32(reg_fmask(reg, TIMER_BASE_VALUE));
1091	scale = high > width ? high - width : 0;
1092	if (scale) {
1093		/* If we're scaling, round up to get a closer result */
1094		ticks += 1 << (scale - 1);
1095		/* High bit was set, so rounding might have affected it */
1096		if (fls(ticks) != high)
1097			scale++;
1098	}
1099
1100	val = reg_encode(reg, TIMER_SCALE, scale);
1101	val |= reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
1102
1103	return val;
1104}
1105
1106/* If microseconds is 0, timeout is immediate */
1107static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
1108					      u32 microseconds)
1109{
1110	u32 endpoint_id = endpoint->endpoint_id;
1111	struct ipa *ipa = endpoint->ipa;
1112	const struct reg *reg;
1113	u32 val;
1114
1115	/* This should only be changed when HOL_BLOCK_EN is disabled */
1116	reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
1117	val = hol_block_timer_encode(ipa, reg, microseconds);
1118
1119	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1120}
1121
1122static void
1123ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
1124{
1125	u32 endpoint_id = endpoint->endpoint_id;
1126	struct ipa *ipa = endpoint->ipa;
1127	const struct reg *reg;
1128	u32 offset;
1129	u32 val;
1130
1131	reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
1132	offset = reg_n_offset(reg, endpoint_id);
1133	val = enable ? reg_bit(reg, HOL_BLOCK_EN) : 0;
1134
1135	iowrite32(val, ipa->reg_virt + offset);
1136
1137	/* When enabling, the register must be written twice for IPA v4.5+ */
1138	if (enable && ipa->version >= IPA_VERSION_4_5)
1139		iowrite32(val, ipa->reg_virt + offset);
1140}
1141
1142/* Assumes HOL_BLOCK is in disabled state */
1143static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
1144					       u32 microseconds)
1145{
1146	ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
1147	ipa_endpoint_init_hol_block_en(endpoint, true);
1148}
1149
1150static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
1151{
1152	ipa_endpoint_init_hol_block_en(endpoint, false);
1153}
1154
1155void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
1156{
1157	u32 endpoint_id = 0;
1158
1159	while (endpoint_id < ipa->endpoint_count) {
1160		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
1161
1162		if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
1163			continue;
1164
1165		ipa_endpoint_init_hol_block_disable(endpoint);
1166		ipa_endpoint_init_hol_block_enable(endpoint, 0);
1167	}
1168}
1169
1170static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
1171{
1172	u32 endpoint_id = endpoint->endpoint_id;
1173	struct ipa *ipa = endpoint->ipa;
1174	const struct reg *reg;
1175	u32 val = 0;
1176
1177	if (!endpoint->toward_ipa)
1178		return;		/* Register not valid for RX endpoints */
1179
1180	reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
1181	/* DEAGGR_HDR_LEN is 0 */
1182	/* PACKET_OFFSET_VALID is 0 */
1183	/* PACKET_OFFSET_LOCATION is ignored (not valid) */
1184	/* MAX_PACKET_LEN is 0 (not enforced) */
1185
1186	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1187}
1188
1189static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
1190{
1191	u32 resource_group = endpoint->config.resource_group;
1192	u32 endpoint_id = endpoint->endpoint_id;
1193	struct ipa *ipa = endpoint->ipa;
1194	const struct reg *reg;
1195	u32 val;
1196
1197	reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
1198	val = reg_encode(reg, ENDP_RSRC_GRP, resource_group);
1199
1200	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1201}
1202
1203static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
1204{
1205	u32 endpoint_id = endpoint->endpoint_id;
1206	struct ipa *ipa = endpoint->ipa;
1207	const struct reg *reg;
1208	u32 val;
1209
1210	if (!endpoint->toward_ipa)
1211		return;		/* Register not valid for RX endpoints */
1212
1213	reg = ipa_reg(ipa, ENDP_INIT_SEQ);
1214
1215	/* Low-order byte configures primary packet processing */
1216	val = reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
1217
1218	/* Second byte (if supported) configures replicated packet processing */
1219	if (ipa->version < IPA_VERSION_4_5)
1220		val |= reg_encode(reg, SEQ_REP_TYPE,
1221				  endpoint->config.tx.seq_rep_type);
1222
1223	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1224}
1225
1226/**
1227 * ipa_endpoint_skb_tx() - Transmit a socket buffer
1228 * @endpoint:	Endpoint pointer
1229 * @skb:	Socket buffer to send
1230 *
1231 * Returns:	0 if successful, or a negative error code
1232 */
1233int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
1234{
1235	struct gsi_trans *trans;
1236	u32 nr_frags;
1237	int ret;
1238
1239	/* Make sure source endpoint's TLV FIFO has enough entries to
1240	 * hold the linear portion of the skb and all its fragments.
1241	 * If not, see if we can linearize it before giving up.
1242	 */
1243	nr_frags = skb_shinfo(skb)->nr_frags;
1244	if (nr_frags > endpoint->skb_frag_max) {
1245		if (skb_linearize(skb))
1246			return -E2BIG;
1247		nr_frags = 0;
1248	}
1249
1250	trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
1251	if (!trans)
1252		return -EBUSY;
1253
1254	ret = gsi_trans_skb_add(trans, skb);
1255	if (ret)
1256		goto err_trans_free;
1257	trans->data = skb;	/* transaction owns skb now */
1258
1259	gsi_trans_commit(trans, !netdev_xmit_more());
1260
1261	return 0;
1262
1263err_trans_free:
1264	gsi_trans_free(trans);
1265
1266	return -ENOMEM;
1267}
1268
1269static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
1270{
1271	u32 endpoint_id = endpoint->endpoint_id;
1272	struct ipa *ipa = endpoint->ipa;
1273	const struct reg *reg;
1274	u32 val = 0;
1275
1276	reg = ipa_reg(ipa, ENDP_STATUS);
1277	if (endpoint->config.status_enable) {
1278		val |= reg_bit(reg, STATUS_EN);
1279		if (endpoint->toward_ipa) {
1280			enum ipa_endpoint_name name;
1281			u32 status_endpoint_id;
1282
1283			name = endpoint->config.tx.status_endpoint;
1284			status_endpoint_id = ipa->name_map[name]->endpoint_id;
1285
1286			val |= reg_encode(reg, STATUS_ENDP, status_endpoint_id);
 
1287		}
1288		/* STATUS_LOCATION is 0, meaning IPA packet status
1289		 * precedes the packet (not present for IPA v4.5+)
1290		 */
1291		/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
1292	}
1293
1294	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1295}
1296
1297static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
1298				      struct gsi_trans *trans)
1299{
1300	struct page *page;
1301	u32 buffer_size;
1302	u32 offset;
1303	u32 len;
1304	int ret;
1305
1306	buffer_size = endpoint->config.rx.buffer_size;
1307	page = dev_alloc_pages(get_order(buffer_size));
1308	if (!page)
1309		return -ENOMEM;
1310
1311	/* Offset the buffer to make space for skb headroom */
1312	offset = NET_SKB_PAD;
1313	len = buffer_size - offset;
1314
1315	ret = gsi_trans_page_add(trans, page, len, offset);
1316	if (ret)
1317		put_page(page);
1318	else
1319		trans->data = page;	/* transaction owns page now */
1320
1321	return ret;
1322}
1323
1324/**
1325 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1326 * @endpoint:	Endpoint to be replenished
1327 *
1328 * The IPA hardware can hold a fixed number of receive buffers for an RX
1329 * endpoint, based on the number of entries in the underlying channel ring
1330 * buffer.  If an endpoint's "backlog" is non-zero, it indicates how many
1331 * more receive buffers can be supplied to the hardware.  Replenishing for
1332 * an endpoint can be disabled, in which case buffers are not queued to
1333 * the hardware.
1334 */
1335static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
1336{
1337	struct gsi_trans *trans;
1338
1339	if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
1340		return;
1341
1342	/* Skip it if it's already active */
1343	if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
1344		return;
1345
1346	while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
1347		bool doorbell;
1348
1349		if (ipa_endpoint_replenish_one(endpoint, trans))
1350			goto try_again_later;
1351
1352
1353		/* Ring the doorbell if we've got a full batch */
1354		doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
1355		gsi_trans_commit(trans, doorbell);
1356	}
1357
1358	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1359
1360	return;
1361
1362try_again_later:
1363	gsi_trans_free(trans);
1364	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1365
1366	/* Whenever a receive buffer transaction completes we'll try to
1367	 * replenish again.  It's unlikely, but if we fail to supply even
1368	 * one buffer, nothing will trigger another replenish attempt.
1369	 * If the hardware has no receive buffers queued, schedule work to
1370	 * try replenishing again.
1371	 */
1372	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1373		schedule_delayed_work(&endpoint->replenish_work,
1374				      msecs_to_jiffies(1));
1375}
1376
1377static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1378{
1379	set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1380
1381	/* Start replenishing if hardware currently has no buffers */
1382	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1383		ipa_endpoint_replenish(endpoint);
1384}
1385
1386static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1387{
1388	clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1389}
1390
1391static void ipa_endpoint_replenish_work(struct work_struct *work)
1392{
1393	struct delayed_work *dwork = to_delayed_work(work);
1394	struct ipa_endpoint *endpoint;
1395
1396	endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1397
1398	ipa_endpoint_replenish(endpoint);
1399}
1400
1401static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1402				  void *data, u32 len, u32 extra)
1403{
1404	struct sk_buff *skb;
1405
1406	if (!endpoint->netdev)
1407		return;
1408
1409	skb = __dev_alloc_skb(len, GFP_ATOMIC);
1410	if (skb) {
1411		/* Copy the data into the socket buffer and receive it */
1412		skb_put(skb, len);
1413		memcpy(skb->data, data, len);
1414		skb->truesize += extra;
1415	}
1416
1417	ipa_modem_skb_rx(endpoint->netdev, skb);
1418}
1419
1420static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1421				   struct page *page, u32 len)
1422{
1423	u32 buffer_size = endpoint->config.rx.buffer_size;
1424	struct sk_buff *skb;
1425
1426	/* Nothing to do if there's no netdev */
1427	if (!endpoint->netdev)
1428		return false;
1429
1430	WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
1431
1432	skb = build_skb(page_address(page), buffer_size);
1433	if (skb) {
1434		/* Reserve the headroom and account for the data */
1435		skb_reserve(skb, NET_SKB_PAD);
1436		skb_put(skb, len);
1437	}
1438
1439	/* Receive the buffer (or record drop if unable to build it) */
1440	ipa_modem_skb_rx(endpoint->netdev, skb);
1441
1442	return skb != NULL;
1443}
1444
1445 /* The format of an IPA packet status structure is the same for several
1446  * status types (opcodes).  Other types aren't currently supported.
1447 */
1448static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1449{
1450	switch (opcode) {
1451	case IPA_STATUS_OPCODE_PACKET:
1452	case IPA_STATUS_OPCODE_DROPPED_PACKET:
1453	case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1454	case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1455		return true;
1456	default:
1457		return false;
1458	}
1459}
1460
1461static bool
1462ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, const void *data)
1463{
1464	struct ipa *ipa = endpoint->ipa;
1465	enum ipa_status_opcode opcode;
1466	u32 endpoint_id;
1467
1468	opcode = ipa_status_extract(ipa, data, STATUS_OPCODE);
1469	if (!ipa_status_format_packet(opcode))
1470		return true;
1471
1472	endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT);
 
 
1473	if (endpoint_id != endpoint->endpoint_id)
1474		return true;
1475
1476	return false;	/* Don't skip this packet, process it */
1477}
1478
1479static bool
1480ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
1481{
1482	struct ipa_endpoint *command_endpoint;
1483	enum ipa_status_mask status_mask;
1484	struct ipa *ipa = endpoint->ipa;
1485	u32 endpoint_id;
1486
1487	status_mask = ipa_status_extract(ipa, data, STATUS_MASK);
1488	if (!status_mask)
1489		return false;	/* No valid tag */
1490
1491	/* The status contains a valid tag.  We know the packet was sent to
1492	 * this endpoint (already verified by ipa_endpoint_status_skip()).
1493	 * If the packet came from the AP->command TX endpoint we know
1494	 * this packet was sent as part of the pipeline clear process.
1495	 */
1496	endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT);
 
1497	command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1498	if (endpoint_id == command_endpoint->endpoint_id) {
1499		complete(&ipa->completion);
1500	} else {
1501		dev_err(&ipa->pdev->dev,
1502			"unexpected tagged packet from endpoint %u\n",
1503			endpoint_id);
1504	}
1505
1506	return true;
1507}
1508
1509/* Return whether the status indicates the packet should be dropped */
1510static bool
1511ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, const void *data)
1512{
1513	enum ipa_status_exception exception;
1514	struct ipa *ipa = endpoint->ipa;
1515	u32 rule;
1516
1517	/* If the status indicates a tagged transfer, we'll drop the packet */
1518	if (ipa_endpoint_status_tag_valid(endpoint, data))
1519		return true;
1520
1521	/* Deaggregation exceptions we drop; all other types we consume */
1522	exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION);
1523	if (exception)
1524		return exception == IPA_STATUS_EXCEPTION_DEAGGR;
1525
1526	/* Drop the packet if it fails to match a routing rule; otherwise no */
1527	rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX);
1528
1529	return rule == IPA_STATUS_RULE_MISS;
1530}
1531
1532static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1533				      struct page *page, u32 total_len)
1534{
1535	u32 buffer_size = endpoint->config.rx.buffer_size;
1536	void *data = page_address(page) + NET_SKB_PAD;
1537	u32 unused = buffer_size - total_len;
1538	struct ipa *ipa = endpoint->ipa;
1539	u32 resid = total_len;
1540
1541	while (resid) {
1542		u32 length;
1543		u32 align;
1544		u32 len;
1545
1546		if (resid < IPA_STATUS_SIZE) {
1547			dev_err(&endpoint->ipa->pdev->dev,
1548				"short message (%u bytes < %zu byte status)\n",
1549				resid, IPA_STATUS_SIZE);
1550			break;
1551		}
1552
1553		/* Skip over status packets that lack packet data */
1554		length = ipa_status_extract(ipa, data, STATUS_LENGTH);
1555		if (!length || ipa_endpoint_status_skip(endpoint, data)) {
1556			data += IPA_STATUS_SIZE;
1557			resid -= IPA_STATUS_SIZE;
1558			continue;
1559		}
1560
1561		/* Compute the amount of buffer space consumed by the packet,
1562		 * including the status.  If the hardware is configured to
1563		 * pad packet data to an aligned boundary, account for that.
1564		 * And if checksum offload is enabled a trailer containing
1565		 * computed checksum information will be appended.
1566		 */
1567		align = endpoint->config.rx.pad_align ? : 1;
1568		len = IPA_STATUS_SIZE + ALIGN(length, align);
 
1569		if (endpoint->config.checksum)
1570			len += sizeof(struct rmnet_map_dl_csum_trailer);
1571
1572		if (!ipa_endpoint_status_drop(endpoint, data)) {
1573			void *data2;
1574			u32 extra;
 
1575
1576			/* Client receives only packet data (no status) */
1577			data2 = data + IPA_STATUS_SIZE;
 
1578
1579			/* Have the true size reflect the extra unused space in
1580			 * the original receive buffer.  Distribute the "cost"
1581			 * proportionately across all aggregated packets in the
1582			 * buffer.
1583			 */
1584			extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1585			ipa_endpoint_skb_copy(endpoint, data2, length, extra);
1586		}
1587
1588		/* Consume status and the full packet it describes */
1589		data += len;
1590		resid -= len;
1591	}
1592}
1593
1594void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1595				 struct gsi_trans *trans)
1596{
1597	struct page *page;
1598
1599	if (endpoint->toward_ipa)
1600		return;
1601
1602	if (trans->cancelled)
1603		goto done;
1604
1605	/* Parse or build a socket buffer using the actual received length */
1606	page = trans->data;
1607	if (endpoint->config.status_enable)
1608		ipa_endpoint_status_parse(endpoint, page, trans->len);
1609	else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1610		trans->data = NULL;	/* Pages have been consumed */
1611done:
1612	ipa_endpoint_replenish(endpoint);
1613}
1614
1615void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1616				struct gsi_trans *trans)
1617{
1618	if (endpoint->toward_ipa) {
1619		struct ipa *ipa = endpoint->ipa;
1620
1621		/* Nothing to do for command transactions */
1622		if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1623			struct sk_buff *skb = trans->data;
1624
1625			if (skb)
1626				dev_kfree_skb_any(skb);
1627		}
1628	} else {
1629		struct page *page = trans->data;
1630
1631		if (page)
1632			put_page(page);
1633	}
1634}
1635
1636void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1637{
1638	const struct reg *reg;
1639	u32 val;
1640
1641	reg = ipa_reg(ipa, ROUTE);
1642	/* ROUTE_DIS is 0 */
1643	val = reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
1644	val |= reg_bit(reg, ROUTE_DEF_HDR_TABLE);
1645	/* ROUTE_DEF_HDR_OFST is 0 */
1646	val |= reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
1647	val |= reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
1648
1649	iowrite32(val, ipa->reg_virt + reg_offset(reg));
1650}
1651
1652void ipa_endpoint_default_route_clear(struct ipa *ipa)
1653{
1654	ipa_endpoint_default_route_set(ipa, 0);
1655}
1656
1657/**
1658 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1659 * @endpoint:	Endpoint to be reset
1660 *
1661 * If aggregation is active on an RX endpoint when a reset is performed
1662 * on its underlying GSI channel, a special sequence of actions must be
1663 * taken to ensure the IPA pipeline is properly cleared.
1664 *
1665 * Return:	0 if successful, or a negative error code
1666 */
1667static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1668{
1669	struct device *dev = &endpoint->ipa->pdev->dev;
1670	struct ipa *ipa = endpoint->ipa;
1671	struct gsi *gsi = &ipa->gsi;
1672	bool suspended = false;
1673	dma_addr_t addr;
1674	u32 retries;
1675	u32 len = 1;
1676	void *virt;
1677	int ret;
1678
1679	virt = kzalloc(len, GFP_KERNEL);
1680	if (!virt)
1681		return -ENOMEM;
1682
1683	addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1684	if (dma_mapping_error(dev, addr)) {
1685		ret = -ENOMEM;
1686		goto out_kfree;
1687	}
1688
1689	/* Force close aggregation before issuing the reset */
1690	ipa_endpoint_force_close(endpoint);
1691
1692	/* Reset and reconfigure the channel with the doorbell engine
1693	 * disabled.  Then poll until we know aggregation is no longer
1694	 * active.  We'll re-enable the doorbell (if appropriate) when
1695	 * we reset again below.
1696	 */
1697	gsi_channel_reset(gsi, endpoint->channel_id, false);
1698
1699	/* Make sure the channel isn't suspended */
1700	suspended = ipa_endpoint_program_suspend(endpoint, false);
1701
1702	/* Start channel and do a 1 byte read */
1703	ret = gsi_channel_start(gsi, endpoint->channel_id);
1704	if (ret)
1705		goto out_suspend_again;
1706
1707	ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1708	if (ret)
1709		goto err_endpoint_stop;
1710
1711	/* Wait for aggregation to be closed on the channel */
1712	retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1713	do {
1714		if (!ipa_endpoint_aggr_active(endpoint))
1715			break;
1716		usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1717	} while (retries--);
1718
1719	/* Check one last time */
1720	if (ipa_endpoint_aggr_active(endpoint))
1721		dev_err(dev, "endpoint %u still active during reset\n",
1722			endpoint->endpoint_id);
1723
1724	gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1725
1726	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1727	if (ret)
1728		goto out_suspend_again;
1729
1730	/* Finally, reset and reconfigure the channel again (re-enabling
1731	 * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1732	 * complete the channel reset sequence.  Finish by suspending the
1733	 * channel again (if necessary).
1734	 */
1735	gsi_channel_reset(gsi, endpoint->channel_id, true);
1736
1737	usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1738
1739	goto out_suspend_again;
1740
1741err_endpoint_stop:
1742	(void)gsi_channel_stop(gsi, endpoint->channel_id);
1743out_suspend_again:
1744	if (suspended)
1745		(void)ipa_endpoint_program_suspend(endpoint, true);
1746	dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1747out_kfree:
1748	kfree(virt);
1749
1750	return ret;
1751}
1752
1753static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1754{
1755	u32 channel_id = endpoint->channel_id;
1756	struct ipa *ipa = endpoint->ipa;
1757	bool special;
1758	int ret = 0;
1759
1760	/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1761	 * is active, we need to handle things specially to recover.
1762	 * All other cases just need to reset the underlying GSI channel.
1763	 */
1764	special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1765			endpoint->config.aggregation;
1766	if (special && ipa_endpoint_aggr_active(endpoint))
1767		ret = ipa_endpoint_reset_rx_aggr(endpoint);
1768	else
1769		gsi_channel_reset(&ipa->gsi, channel_id, true);
1770
1771	if (ret)
1772		dev_err(&ipa->pdev->dev,
1773			"error %d resetting channel %u for endpoint %u\n",
1774			ret, endpoint->channel_id, endpoint->endpoint_id);
1775}
1776
1777static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1778{
1779	if (endpoint->toward_ipa) {
1780		/* Newer versions of IPA use GSI channel flow control
1781		 * instead of endpoint DELAY mode to prevent sending data.
1782		 * Flow control is disabled for newly-allocated channels,
1783		 * and we can assume flow control is not (ever) enabled
1784		 * for AP TX channels.
1785		 */
1786		if (endpoint->ipa->version < IPA_VERSION_4_2)
1787			ipa_endpoint_program_delay(endpoint, false);
1788	} else {
1789		/* Ensure suspend mode is off on all AP RX endpoints */
1790		(void)ipa_endpoint_program_suspend(endpoint, false);
1791	}
1792	ipa_endpoint_init_cfg(endpoint);
1793	ipa_endpoint_init_nat(endpoint);
1794	ipa_endpoint_init_hdr(endpoint);
1795	ipa_endpoint_init_hdr_ext(endpoint);
1796	ipa_endpoint_init_hdr_metadata_mask(endpoint);
1797	ipa_endpoint_init_mode(endpoint);
1798	ipa_endpoint_init_aggr(endpoint);
1799	if (!endpoint->toward_ipa) {
1800		if (endpoint->config.rx.holb_drop)
1801			ipa_endpoint_init_hol_block_enable(endpoint, 0);
1802		else
1803			ipa_endpoint_init_hol_block_disable(endpoint);
1804	}
1805	ipa_endpoint_init_deaggr(endpoint);
1806	ipa_endpoint_init_rsrc_grp(endpoint);
1807	ipa_endpoint_init_seq(endpoint);
1808	ipa_endpoint_status(endpoint);
1809}
1810
1811int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1812{
1813	u32 endpoint_id = endpoint->endpoint_id;
1814	struct ipa *ipa = endpoint->ipa;
1815	struct gsi *gsi = &ipa->gsi;
1816	int ret;
1817
1818	ret = gsi_channel_start(gsi, endpoint->channel_id);
1819	if (ret) {
1820		dev_err(&ipa->pdev->dev,
1821			"error %d starting %cX channel %u for endpoint %u\n",
1822			ret, endpoint->toward_ipa ? 'T' : 'R',
1823			endpoint->channel_id, endpoint_id);
1824		return ret;
1825	}
1826
1827	if (!endpoint->toward_ipa) {
1828		ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id);
1829		ipa_endpoint_replenish_enable(endpoint);
1830	}
1831
1832	__set_bit(endpoint_id, ipa->enabled);
1833
1834	return 0;
1835}
1836
1837void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1838{
1839	u32 endpoint_id = endpoint->endpoint_id;
1840	struct ipa *ipa = endpoint->ipa;
1841	struct gsi *gsi = &ipa->gsi;
1842	int ret;
1843
1844	if (!test_bit(endpoint_id, ipa->enabled))
1845		return;
1846
1847	__clear_bit(endpoint_id, endpoint->ipa->enabled);
1848
1849	if (!endpoint->toward_ipa) {
1850		ipa_endpoint_replenish_disable(endpoint);
1851		ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
1852	}
1853
1854	/* Note that if stop fails, the channel's state is not well-defined */
1855	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1856	if (ret)
1857		dev_err(&ipa->pdev->dev,
1858			"error %d attempting to stop endpoint %u\n", ret,
1859			endpoint_id);
1860}
1861
1862void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1863{
1864	struct device *dev = &endpoint->ipa->pdev->dev;
1865	struct gsi *gsi = &endpoint->ipa->gsi;
1866	int ret;
1867
1868	if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1869		return;
1870
1871	if (!endpoint->toward_ipa) {
1872		ipa_endpoint_replenish_disable(endpoint);
1873		(void)ipa_endpoint_program_suspend(endpoint, true);
1874	}
1875
1876	ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1877	if (ret)
1878		dev_err(dev, "error %d suspending channel %u\n", ret,
1879			endpoint->channel_id);
1880}
1881
1882void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1883{
1884	struct device *dev = &endpoint->ipa->pdev->dev;
1885	struct gsi *gsi = &endpoint->ipa->gsi;
1886	int ret;
1887
1888	if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1889		return;
1890
1891	if (!endpoint->toward_ipa)
1892		(void)ipa_endpoint_program_suspend(endpoint, false);
1893
1894	ret = gsi_channel_resume(gsi, endpoint->channel_id);
1895	if (ret)
1896		dev_err(dev, "error %d resuming channel %u\n", ret,
1897			endpoint->channel_id);
1898	else if (!endpoint->toward_ipa)
1899		ipa_endpoint_replenish_enable(endpoint);
1900}
1901
1902void ipa_endpoint_suspend(struct ipa *ipa)
1903{
1904	if (!ipa->setup_complete)
1905		return;
1906
1907	if (ipa->modem_netdev)
1908		ipa_modem_suspend(ipa->modem_netdev);
1909
1910	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1911	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1912}
1913
1914void ipa_endpoint_resume(struct ipa *ipa)
1915{
1916	if (!ipa->setup_complete)
1917		return;
1918
1919	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1920	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1921
1922	if (ipa->modem_netdev)
1923		ipa_modem_resume(ipa->modem_netdev);
1924}
1925
1926static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1927{
1928	struct gsi *gsi = &endpoint->ipa->gsi;
1929	u32 channel_id = endpoint->channel_id;
1930
1931	/* Only AP endpoints get set up */
1932	if (endpoint->ee_id != GSI_EE_AP)
1933		return;
1934
1935	endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
1936	if (!endpoint->toward_ipa) {
1937		/* RX transactions require a single TRE, so the maximum
1938		 * backlog is the same as the maximum outstanding TREs.
1939		 */
1940		clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1941		clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1942		INIT_DELAYED_WORK(&endpoint->replenish_work,
1943				  ipa_endpoint_replenish_work);
1944	}
1945
1946	ipa_endpoint_program(endpoint);
1947
1948	__set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1949}
1950
1951static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1952{
1953	__clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1954
1955	if (!endpoint->toward_ipa)
1956		cancel_delayed_work_sync(&endpoint->replenish_work);
1957
1958	ipa_endpoint_reset(endpoint);
1959}
1960
1961void ipa_endpoint_setup(struct ipa *ipa)
1962{
1963	u32 endpoint_id;
1964
1965	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
1966		ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1967}
1968
1969void ipa_endpoint_teardown(struct ipa *ipa)
1970{
1971	u32 endpoint_id;
1972
1973	for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count)
1974		ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1975}
1976
1977void ipa_endpoint_deconfig(struct ipa *ipa)
1978{
1979	ipa->available_count = 0;
1980	bitmap_free(ipa->available);
1981	ipa->available = NULL;
1982}
1983
1984int ipa_endpoint_config(struct ipa *ipa)
1985{
1986	struct device *dev = &ipa->pdev->dev;
1987	const struct reg *reg;
1988	u32 endpoint_id;
1989	u32 hw_limit;
1990	u32 tx_count;
1991	u32 rx_count;
1992	u32 rx_base;
1993	u32 limit;
1994	u32 val;
1995
1996	/* Prior to IPA v3.5, the FLAVOR_0 register was not supported.
1997	 * Furthermore, the endpoints were not grouped such that TX
1998	 * endpoint numbers started with 0 and RX endpoints had numbers
1999	 * higher than all TX endpoints, so we can't do the simple
2000	 * direction check used for newer hardware below.
2001	 *
2002	 * For hardware that doesn't support the FLAVOR_0 register,
2003	 * just set the available mask to support any endpoint, and
2004	 * assume the configuration is valid.
2005	 */
2006	if (ipa->version < IPA_VERSION_3_5) {
2007		ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL);
2008		if (!ipa->available)
2009			return -ENOMEM;
2010		ipa->available_count = IPA_ENDPOINT_MAX;
2011
2012		bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
2013
2014		return 0;
2015	}
2016
2017	/* Find out about the endpoints supplied by the hardware, and ensure
2018	 * the highest one doesn't exceed the number supported by software.
2019	 */
2020	reg = ipa_reg(ipa, FLAVOR_0);
2021	val = ioread32(ipa->reg_virt + reg_offset(reg));
2022
2023	/* Our RX is an IPA producer; our TX is an IPA consumer. */
2024	tx_count = reg_decode(reg, MAX_CONS_PIPES, val);
2025	rx_count = reg_decode(reg, MAX_PROD_PIPES, val);
2026	rx_base = reg_decode(reg, PROD_LOWEST, val);
2027
2028	limit = rx_base + rx_count;
2029	if (limit > IPA_ENDPOINT_MAX) {
2030		dev_err(dev, "too many endpoints, %u > %u\n",
2031			limit, IPA_ENDPOINT_MAX);
2032		return -EINVAL;
2033	}
2034
2035	/* Until IPA v5.0, the max endpoint ID was 32 */
2036	hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1;
2037	if (limit > hw_limit) {
2038		dev_err(dev, "unexpected endpoint count, %u > %u\n",
2039			limit, hw_limit);
2040		return -EINVAL;
2041	}
2042
2043	/* Allocate and initialize the available endpoint bitmap */
2044	ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
2045	if (!ipa->available)
2046		return -ENOMEM;
2047	ipa->available_count = limit;
2048
2049	/* Mark all supported RX and TX endpoints as available */
2050	bitmap_set(ipa->available, 0, tx_count);
2051	bitmap_set(ipa->available, rx_base, rx_count);
2052
2053	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
2054		struct ipa_endpoint *endpoint;
2055
2056		if (endpoint_id >= limit) {
2057			dev_err(dev, "invalid endpoint id, %u > %u\n",
2058				endpoint_id, limit - 1);
2059			goto err_free_bitmap;
2060		}
2061
2062		if (!test_bit(endpoint_id, ipa->available)) {
2063			dev_err(dev, "unavailable endpoint id %u\n",
2064				endpoint_id);
2065			goto err_free_bitmap;
2066		}
2067
2068		/* Make sure it's pointing in the right direction */
2069		endpoint = &ipa->endpoint[endpoint_id];
2070		if (endpoint->toward_ipa) {
2071			if (endpoint_id < tx_count)
2072				continue;
2073		} else if (endpoint_id >= rx_base) {
2074			continue;
2075		}
2076
2077		dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id);
2078		goto err_free_bitmap;
2079	}
2080
2081	return 0;
2082
2083err_free_bitmap:
2084	ipa_endpoint_deconfig(ipa);
2085
2086	return -EINVAL;
2087}
2088
2089static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
2090				  const struct ipa_gsi_endpoint_data *data)
2091{
2092	struct ipa_endpoint *endpoint;
2093
2094	endpoint = &ipa->endpoint[data->endpoint_id];
2095
2096	if (data->ee_id == GSI_EE_AP)
2097		ipa->channel_map[data->channel_id] = endpoint;
2098	ipa->name_map[name] = endpoint;
2099
2100	endpoint->ipa = ipa;
2101	endpoint->ee_id = data->ee_id;
2102	endpoint->channel_id = data->channel_id;
2103	endpoint->endpoint_id = data->endpoint_id;
2104	endpoint->toward_ipa = data->toward_ipa;
2105	endpoint->config = data->endpoint.config;
2106
2107	__set_bit(endpoint->endpoint_id, ipa->defined);
2108}
2109
2110static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
2111{
2112	__clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
2113
2114	memset(endpoint, 0, sizeof(*endpoint));
2115}
2116
2117void ipa_endpoint_exit(struct ipa *ipa)
2118{
2119	u32 endpoint_id;
2120
2121	ipa->filtered = 0;
2122
2123	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
2124		ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
2125
2126	bitmap_free(ipa->enabled);
2127	ipa->enabled = NULL;
2128	bitmap_free(ipa->set_up);
2129	ipa->set_up = NULL;
2130	bitmap_free(ipa->defined);
2131	ipa->defined = NULL;
2132
2133	memset(ipa->name_map, 0, sizeof(ipa->name_map));
2134	memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
2135}
2136
2137/* Returns a bitmask of endpoints that support filtering, or 0 on error */
2138int ipa_endpoint_init(struct ipa *ipa, u32 count,
2139		      const struct ipa_gsi_endpoint_data *data)
2140{
2141	enum ipa_endpoint_name name;
2142	u32 filtered;
2143
2144	BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
2145
2146	/* Number of endpoints is one more than the maximum ID */
2147	ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
2148	if (!ipa->endpoint_count)
2149		return -EINVAL;
2150
2151	/* Initialize endpoint state bitmaps */
2152	ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2153	if (!ipa->defined)
2154		return -ENOMEM;
2155
2156	ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2157	if (!ipa->set_up)
2158		goto err_free_defined;
2159
2160	ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2161	if (!ipa->enabled)
2162		goto err_free_set_up;
2163
2164	filtered = 0;
2165	for (name = 0; name < count; name++, data++) {
2166		if (ipa_gsi_endpoint_data_empty(data))
2167			continue;	/* Skip over empty slots */
2168
2169		ipa_endpoint_init_one(ipa, name, data);
2170
2171		if (data->endpoint.filter_support)
2172			filtered |= BIT(data->endpoint_id);
2173		if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
2174			ipa->modem_tx_count++;
2175	}
2176
2177	/* Make sure the set of filtered endpoints is valid */
2178	if (!ipa_filtered_valid(ipa, filtered)) {
2179		ipa_endpoint_exit(ipa);
2180
2181		return -EINVAL;
2182	}
2183
2184	ipa->filtered = filtered;
2185
2186	return 0;
2187
2188err_free_set_up:
2189	bitmap_free(ipa->set_up);
2190	ipa->set_up = NULL;
2191err_free_defined:
2192	bitmap_free(ipa->defined);
2193	ipa->defined = NULL;
2194
2195	return -ENOMEM;
2196}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2019-2022 Linaro Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <linux/device.h>
   9#include <linux/slab.h>
  10#include <linux/bitfield.h>
  11#include <linux/if_rmnet.h>
  12#include <linux/dma-direction.h>
  13
  14#include "gsi.h"
  15#include "gsi_trans.h"
  16#include "ipa.h"
  17#include "ipa_data.h"
  18#include "ipa_endpoint.h"
  19#include "ipa_cmd.h"
  20#include "ipa_mem.h"
  21#include "ipa_modem.h"
  22#include "ipa_table.h"
  23#include "ipa_gsi.h"
  24#include "ipa_power.h"
  25
  26/* Hardware is told about receive buffers once a "batch" has been queued */
  27#define IPA_REPLENISH_BATCH	16		/* Must be non-zero */
  28
  29/* The amount of RX buffer space consumed by standard skb overhead */
  30#define IPA_RX_BUFFER_OVERHEAD	(PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
  31
  32/* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
  33#define IPA_ENDPOINT_QMAP_METADATA_MASK		0x000000ff /* host byte order */
  34
  35#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX	3
  36
  37/** enum ipa_status_opcode - status element opcode hardware values */
  38enum ipa_status_opcode {
  39	IPA_STATUS_OPCODE_PACKET		= 0x01,
  40	IPA_STATUS_OPCODE_DROPPED_PACKET	= 0x04,
  41	IPA_STATUS_OPCODE_SUSPENDED_PACKET	= 0x08,
  42	IPA_STATUS_OPCODE_PACKET_2ND_PASS	= 0x40,
 
 
 
  43};
  44
  45/** enum ipa_status_exception - status element exception type */
  46enum ipa_status_exception {
  47	/* 0 means no exception */
  48	IPA_STATUS_EXCEPTION_DEAGGR		= 0x01,
 
 
 
 
 
 
 
 
 
 
  49};
  50
  51/* Status element provided by hardware */
  52struct ipa_status {
  53	u8 opcode;		/* enum ipa_status_opcode */
  54	u8 exception;		/* enum ipa_status_exception */
  55	__le16 mask;
  56	__le16 pkt_len;
  57	u8 endp_src_idx;
  58	u8 endp_dst_idx;
  59	__le32 metadata;
  60	__le32 flags1;
  61	__le64 flags2;
  62	__le32 flags3;
  63	__le32 flags4;
 
 
 
 
 
  64};
  65
  66/* Field masks for struct ipa_status structure fields */
  67#define IPA_STATUS_MASK_TAG_VALID_FMASK		GENMASK(4, 4)
  68#define IPA_STATUS_SRC_IDX_FMASK		GENMASK(4, 0)
  69#define IPA_STATUS_DST_IDX_FMASK		GENMASK(4, 0)
  70#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK	GENMASK(31, 22)
  71#define IPA_STATUS_FLAGS2_TAG_FMASK		GENMASK_ULL(63, 16)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  72
  73/* Compute the aggregation size value to use for a given buffer size */
  74static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
  75{
  76	/* A hard aggregation limit will not be crossed; aggregation closes
  77	 * if saving incoming data would cross the hard byte limit boundary.
  78	 *
  79	 * With a soft limit, aggregation closes *after* the size boundary
  80	 * has been crossed.  In that case the limit must leave enough space
  81	 * after that limit to receive a full MTU of data plus overhead.
  82	 */
  83	if (!aggr_hard_limit)
  84		rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
  85
  86	/* The byte limit is encoded as a number of kilobytes */
  87
  88	return rx_buffer_size / SZ_1K;
  89}
  90
  91static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
  92			    const struct ipa_gsi_endpoint_data *all_data,
  93			    const struct ipa_gsi_endpoint_data *data)
  94{
  95	const struct ipa_gsi_endpoint_data *other_data;
  96	struct device *dev = &ipa->pdev->dev;
  97	enum ipa_endpoint_name other_name;
  98
  99	if (ipa_gsi_endpoint_data_empty(data))
 100		return true;
 101
 102	if (!data->toward_ipa) {
 103		const struct ipa_endpoint_rx *rx_config;
 104		const struct ipa_reg *reg;
 105		u32 buffer_size;
 106		u32 aggr_size;
 107		u32 limit;
 108
 109		if (data->endpoint.filter_support) {
 110			dev_err(dev, "filtering not supported for "
 111					"RX endpoint %u\n",
 112				data->endpoint_id);
 113			return false;
 114		}
 115
 116		/* Nothing more to check for non-AP RX */
 117		if (data->ee_id != GSI_EE_AP)
 118			return true;
 119
 120		rx_config = &data->endpoint.config.rx;
 121
 122		/* The buffer size must hold an MTU plus overhead */
 123		buffer_size = rx_config->buffer_size;
 124		limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
 125		if (buffer_size < limit) {
 126			dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
 127				data->endpoint_id, buffer_size, limit);
 128			return false;
 129		}
 130
 131		if (!data->endpoint.config.aggregation) {
 132			bool result = true;
 133
 134			/* No aggregation; check for bogus aggregation data */
 135			if (rx_config->aggr_time_limit) {
 136				dev_err(dev,
 137					"time limit with no aggregation for RX endpoint %u\n",
 138					data->endpoint_id);
 139				result = false;
 140			}
 141
 142			if (rx_config->aggr_hard_limit) {
 143				dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
 144					data->endpoint_id);
 145				result = false;
 146			}
 147
 148			if (rx_config->aggr_close_eof) {
 149				dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
 150					data->endpoint_id);
 151				result = false;
 152			}
 153
 154			return result;	/* Nothing more to check */
 155		}
 156
 157		/* For an endpoint supporting receive aggregation, the byte
 158		 * limit defines the point at which aggregation closes.  This
 159		 * check ensures the receive buffer size doesn't result in a
 160		 * limit that exceeds what's representable in the aggregation
 161		 * byte limit field.
 162		 */
 163		aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
 164					     rx_config->aggr_hard_limit);
 165		reg = ipa_reg(ipa, ENDP_INIT_AGGR);
 166
 167		limit = ipa_reg_field_max(reg, BYTE_LIMIT);
 168		if (aggr_size > limit) {
 169			dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
 170				data->endpoint_id, aggr_size, limit);
 171
 172			return false;
 173		}
 174
 175		return true;	/* Nothing more to check for RX */
 176	}
 177
 178	/* Starting with IPA v4.5 sequencer replication is obsolete */
 179	if (ipa->version >= IPA_VERSION_4_5) {
 180		if (data->endpoint.config.tx.seq_rep_type) {
 181			dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
 182				data->endpoint_id);
 183			return false;
 184		}
 185	}
 186
 187	if (data->endpoint.config.status_enable) {
 188		other_name = data->endpoint.config.tx.status_endpoint;
 189		if (other_name >= count) {
 190			dev_err(dev, "status endpoint name %u out of range "
 191					"for endpoint %u\n",
 192				other_name, data->endpoint_id);
 193			return false;
 194		}
 195
 196		/* Status endpoint must be defined... */
 197		other_data = &all_data[other_name];
 198		if (ipa_gsi_endpoint_data_empty(other_data)) {
 199			dev_err(dev, "DMA endpoint name %u undefined "
 200					"for endpoint %u\n",
 201				other_name, data->endpoint_id);
 202			return false;
 203		}
 204
 205		/* ...and has to be an RX endpoint... */
 206		if (other_data->toward_ipa) {
 207			dev_err(dev,
 208				"status endpoint for endpoint %u not RX\n",
 209				data->endpoint_id);
 210			return false;
 211		}
 212
 213		/* ...and if it's to be an AP endpoint... */
 214		if (other_data->ee_id == GSI_EE_AP) {
 215			/* ...make sure it has status enabled. */
 216			if (!other_data->endpoint.config.status_enable) {
 217				dev_err(dev,
 218					"status not enabled for endpoint %u\n",
 219					other_data->endpoint_id);
 220				return false;
 221			}
 222		}
 223	}
 224
 225	if (data->endpoint.config.dma_mode) {
 226		other_name = data->endpoint.config.dma_endpoint;
 227		if (other_name >= count) {
 228			dev_err(dev, "DMA endpoint name %u out of range "
 229					"for endpoint %u\n",
 230				other_name, data->endpoint_id);
 231			return false;
 232		}
 233
 234		other_data = &all_data[other_name];
 235		if (ipa_gsi_endpoint_data_empty(other_data)) {
 236			dev_err(dev, "DMA endpoint name %u undefined "
 237					"for endpoint %u\n",
 238				other_name, data->endpoint_id);
 239			return false;
 240		}
 241	}
 242
 243	return true;
 244}
 245
 246/* Validate endpoint configuration data.  Return max defined endpoint ID */
 247static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
 248			    const struct ipa_gsi_endpoint_data *data)
 249{
 250	const struct ipa_gsi_endpoint_data *dp = data;
 251	struct device *dev = &ipa->pdev->dev;
 252	enum ipa_endpoint_name name;
 253	u32 max;
 254
 255	if (count > IPA_ENDPOINT_COUNT) {
 256		dev_err(dev, "too many endpoints specified (%u > %u)\n",
 257			count, IPA_ENDPOINT_COUNT);
 258		return 0;
 259	}
 260
 261	/* Make sure needed endpoints have defined data */
 262	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
 263		dev_err(dev, "command TX endpoint not defined\n");
 264		return 0;
 265	}
 266	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
 267		dev_err(dev, "LAN RX endpoint not defined\n");
 268		return 0;
 269	}
 270	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
 271		dev_err(dev, "AP->modem TX endpoint not defined\n");
 272		return 0;
 273	}
 274	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
 275		dev_err(dev, "AP<-modem RX endpoint not defined\n");
 276		return 0;
 277	}
 278
 279	max = 0;
 280	for (name = 0; name < count; name++, dp++) {
 281		if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
 282			return 0;
 283		max = max_t(u32, max, dp->endpoint_id);
 284	}
 285
 286	return max;
 287}
 288
 289/* Allocate a transaction to use on a non-command endpoint */
 290static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
 291						  u32 tre_count)
 292{
 293	struct gsi *gsi = &endpoint->ipa->gsi;
 294	u32 channel_id = endpoint->channel_id;
 295	enum dma_data_direction direction;
 296
 297	direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 298
 299	return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
 300}
 301
 302/* suspend_delay represents suspend for RX, delay for TX endpoints.
 303 * Note that suspend is not supported starting with IPA v4.0, and
 304 * delay mode should not be used starting with IPA v4.2.
 305 */
 306static bool
 307ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
 308{
 309	struct ipa *ipa = endpoint->ipa;
 310	const struct ipa_reg *reg;
 311	u32 field_id;
 312	u32 offset;
 313	bool state;
 314	u32 mask;
 315	u32 val;
 316
 317	if (endpoint->toward_ipa)
 318		WARN_ON(ipa->version >= IPA_VERSION_4_2);
 319	else
 320		WARN_ON(ipa->version >= IPA_VERSION_4_0);
 321
 322	reg = ipa_reg(ipa, ENDP_INIT_CTRL);
 323	offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
 324	val = ioread32(ipa->reg_virt + offset);
 325
 326	field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
 327	mask = ipa_reg_bit(reg, field_id);
 328
 329	state = !!(val & mask);
 330
 331	/* Don't bother if it's already in the requested state */
 332	if (suspend_delay != state) {
 333		val ^= mask;
 334		iowrite32(val, ipa->reg_virt + offset);
 335	}
 336
 337	return state;
 338}
 339
 340/* We don't care what the previous state was for delay mode */
 341static void
 342ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
 343{
 344	/* Delay mode should not be used for IPA v4.2+ */
 345	WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
 346	WARN_ON(!endpoint->toward_ipa);
 347
 348	(void)ipa_endpoint_init_ctrl(endpoint, enable);
 349}
 350
 351static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
 352{
 353	u32 endpoint_id = endpoint->endpoint_id;
 354	struct ipa *ipa = endpoint->ipa;
 355	u32 unit = endpoint_id / 32;
 356	const struct ipa_reg *reg;
 357	u32 val;
 358
 359	WARN_ON(!test_bit(endpoint_id, ipa->available));
 360
 361	reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
 362	val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
 363
 364	return !!(val & BIT(endpoint_id % 32));
 365}
 366
 367static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
 368{
 369	u32 endpoint_id = endpoint->endpoint_id;
 370	u32 mask = BIT(endpoint_id % 32);
 371	struct ipa *ipa = endpoint->ipa;
 372	u32 unit = endpoint_id / 32;
 373	const struct ipa_reg *reg;
 374
 375	WARN_ON(!test_bit(endpoint_id, ipa->available));
 376
 377	reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
 378	iowrite32(mask, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
 379}
 380
 381/**
 382 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
 383 * @endpoint:	Endpoint on which to emulate a suspend
 384 *
 385 *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
 386 *  with an open aggregation frame.  This is to work around a hardware
 387 *  issue in IPA version 3.5.1 where the suspend interrupt will not be
 388 *  generated when it should be.
 389 */
 390static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
 391{
 392	struct ipa *ipa = endpoint->ipa;
 393
 394	if (!endpoint->config.aggregation)
 395		return;
 396
 397	/* Nothing to do if the endpoint doesn't have aggregation open */
 398	if (!ipa_endpoint_aggr_active(endpoint))
 399		return;
 400
 401	/* Force close aggregation */
 402	ipa_endpoint_force_close(endpoint);
 403
 404	ipa_interrupt_simulate_suspend(ipa->interrupt);
 405}
 406
 407/* Returns previous suspend state (true means suspend was enabled) */
 408static bool
 409ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
 410{
 411	bool suspended;
 412
 413	if (endpoint->ipa->version >= IPA_VERSION_4_0)
 414		return enable;	/* For IPA v4.0+, no change made */
 415
 416	WARN_ON(endpoint->toward_ipa);
 417
 418	suspended = ipa_endpoint_init_ctrl(endpoint, enable);
 419
 420	/* A client suspended with an open aggregation frame will not
 421	 * generate a SUSPEND IPA interrupt.  If enabling suspend, have
 422	 * ipa_endpoint_suspend_aggr() handle this.
 423	 */
 424	if (enable && !suspended)
 425		ipa_endpoint_suspend_aggr(endpoint);
 426
 427	return suspended;
 428}
 429
 430/* Put all modem RX endpoints into suspend mode, and stop transmission
 431 * on all modem TX endpoints.  Prior to IPA v4.2, endpoint DELAY mode is
 432 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
 433 * control instead.
 434 */
 435void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
 436{
 437	u32 endpoint_id = 0;
 438
 439	while (endpoint_id < ipa->endpoint_count) {
 440		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
 441
 442		if (endpoint->ee_id != GSI_EE_MODEM)
 443			continue;
 444
 445		if (!endpoint->toward_ipa)
 446			(void)ipa_endpoint_program_suspend(endpoint, enable);
 447		else if (ipa->version < IPA_VERSION_4_2)
 448			ipa_endpoint_program_delay(endpoint, enable);
 449		else
 450			gsi_modem_channel_flow_control(&ipa->gsi,
 451						       endpoint->channel_id,
 452						       enable);
 453	}
 454}
 455
 456/* Reset all modem endpoints to use the default exception endpoint */
 457int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
 458{
 459	struct gsi_trans *trans;
 460	u32 endpoint_id;
 461	u32 count;
 462
 463	/* We need one command per modem TX endpoint, plus the commands
 464	 * that clear the pipeline.
 465	 */
 466	count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
 467	trans = ipa_cmd_trans_alloc(ipa, count);
 468	if (!trans) {
 469		dev_err(&ipa->pdev->dev,
 470			"no transaction to reset modem exception endpoints\n");
 471		return -EBUSY;
 472	}
 473
 474	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
 475		struct ipa_endpoint *endpoint;
 476		const struct ipa_reg *reg;
 477		u32 offset;
 478
 479		/* We only reset modem TX endpoints */
 480		endpoint = &ipa->endpoint[endpoint_id];
 481		if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
 482			continue;
 483
 484		reg = ipa_reg(ipa, ENDP_STATUS);
 485		offset = ipa_reg_n_offset(reg, endpoint_id);
 486
 487		/* Value written is 0, and all bits are updated.  That
 488		 * means status is disabled on the endpoint, and as a
 489		 * result all other fields in the register are ignored.
 490		 */
 491		ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
 492	}
 493
 494	ipa_cmd_pipeline_clear_add(trans);
 495
 496	gsi_trans_commit_wait(trans);
 497
 498	ipa_cmd_pipeline_clear_wait(ipa);
 499
 500	return 0;
 501}
 502
 503static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
 504{
 505	u32 endpoint_id = endpoint->endpoint_id;
 506	struct ipa *ipa = endpoint->ipa;
 507	enum ipa_cs_offload_en enabled;
 508	const struct ipa_reg *reg;
 509	u32 val = 0;
 510
 511	reg = ipa_reg(ipa, ENDP_INIT_CFG);
 512	/* FRAG_OFFLOAD_EN is 0 */
 513	if (endpoint->config.checksum) {
 514		enum ipa_version version = ipa->version;
 515
 516		if (endpoint->toward_ipa) {
 517			u32 off;
 518
 519			/* Checksum header offset is in 4-byte units */
 520			off = sizeof(struct rmnet_map_header) / sizeof(u32);
 521			val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
 522
 523			enabled = version < IPA_VERSION_4_5
 524					? IPA_CS_OFFLOAD_UL
 525					: IPA_CS_OFFLOAD_INLINE;
 526		} else {
 527			enabled = version < IPA_VERSION_4_5
 528					? IPA_CS_OFFLOAD_DL
 529					: IPA_CS_OFFLOAD_INLINE;
 530		}
 531	} else {
 532		enabled = IPA_CS_OFFLOAD_NONE;
 533	}
 534	val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled);
 535	/* CS_GEN_QMB_MASTER_SEL is 0 */
 536
 537	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
 538}
 539
 540static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
 541{
 542	u32 endpoint_id = endpoint->endpoint_id;
 543	struct ipa *ipa = endpoint->ipa;
 544	const struct ipa_reg *reg;
 545	u32 val;
 546
 547	if (!endpoint->toward_ipa)
 548		return;
 549
 550	reg = ipa_reg(ipa, ENDP_INIT_NAT);
 551	val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_BYPASS);
 552
 553	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
 554}
 555
 556static u32
 557ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
 558{
 559	u32 header_size = sizeof(struct rmnet_map_header);
 560
 561	/* Without checksum offload, we just have the MAP header */
 562	if (!endpoint->config.checksum)
 563		return header_size;
 564
 565	if (version < IPA_VERSION_4_5) {
 566		/* Checksum header inserted for AP TX endpoints only */
 567		if (endpoint->toward_ipa)
 568			header_size += sizeof(struct rmnet_map_ul_csum_header);
 569	} else {
 570		/* Checksum header is used in both directions */
 571		header_size += sizeof(struct rmnet_map_v5_csum_header);
 572	}
 573
 574	return header_size;
 575}
 576
 577/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
 578static u32 ipa_header_size_encode(enum ipa_version version,
 579				  const struct ipa_reg *reg, u32 header_size)
 580{
 581	u32 field_max = ipa_reg_field_max(reg, HDR_LEN);
 582	u32 val;
 583
 584	/* We know field_max can be used as a mask (2^n - 1) */
 585	val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max);
 586	if (version < IPA_VERSION_4_5) {
 587		WARN_ON(header_size > field_max);
 588		return val;
 589	}
 590
 591	/* IPA v4.5 adds a few more most-significant bits */
 592	header_size >>= hweight32(field_max);
 593	WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB));
 594	val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size);
 595
 596	return val;
 597}
 598
 599/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
 600static u32 ipa_metadata_offset_encode(enum ipa_version version,
 601				      const struct ipa_reg *reg, u32 offset)
 602{
 603	u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA);
 604	u32 val;
 605
 606	/* We know field_max can be used as a mask (2^n - 1) */
 607	val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset);
 608	if (version < IPA_VERSION_4_5) {
 609		WARN_ON(offset > field_max);
 610		return val;
 611	}
 612
 613	/* IPA v4.5 adds a few more most-significant bits */
 614	offset >>= hweight32(field_max);
 615	WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB));
 616	val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
 617
 618	return val;
 619}
 620
 621/**
 622 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
 623 * @endpoint:	Endpoint pointer
 624 *
 625 * We program QMAP endpoints so each packet received is preceded by a QMAP
 626 * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
 627 * packet size field, and we have the IPA hardware populate both for each
 628 * received packet.  The header is configured (in the HDR_EXT register)
 629 * to use big endian format.
 630 *
 631 * The packet size is written into the QMAP header's pkt_len field.  That
 632 * location is defined here using the HDR_OFST_PKT_SIZE field.
 633 *
 634 * The mux_id comes from a 4-byte metadata value supplied with each packet
 635 * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
 636 * value that we want, in its low-order byte.  A bitmask defined in the
 637 * endpoint's METADATA_MASK register defines which byte within the modem
 638 * metadata contains the mux_id.  And the OFST_METADATA field programmed
 639 * here indicates where the extracted byte should be placed within the QMAP
 640 * header.
 641 */
 642static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
 643{
 644	u32 endpoint_id = endpoint->endpoint_id;
 645	struct ipa *ipa = endpoint->ipa;
 646	const struct ipa_reg *reg;
 647	u32 val = 0;
 648
 649	reg = ipa_reg(ipa, ENDP_INIT_HDR);
 650	if (endpoint->config.qmap) {
 651		enum ipa_version version = ipa->version;
 652		size_t header_size;
 653
 654		header_size = ipa_qmap_header_size(version, endpoint);
 655		val = ipa_header_size_encode(version, reg, header_size);
 656
 657		/* Define how to fill fields in a received QMAP header */
 658		if (!endpoint->toward_ipa) {
 659			u32 off;     /* Field offset within header */
 660
 661			/* Where IPA will write the metadata value */
 662			off = offsetof(struct rmnet_map_header, mux_id);
 663			val |= ipa_metadata_offset_encode(version, reg, off);
 664
 665			/* Where IPA will write the length */
 666			off = offsetof(struct rmnet_map_header, pkt_len);
 667			/* Upper bits are stored in HDR_EXT with IPA v4.5 */
 668			if (version >= IPA_VERSION_4_5)
 669				off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
 670
 671			val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
 672			val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off);
 673		}
 674		/* For QMAP TX, metadata offset is 0 (modem assumes this) */
 675		val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID);
 676
 677		/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
 678		/* HDR_A5_MUX is 0 */
 679		/* HDR_LEN_INC_DEAGG_HDR is 0 */
 680		/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
 681	}
 682
 683	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
 684}
 685
 686static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
 687{
 688	u32 pad_align = endpoint->config.rx.pad_align;
 689	u32 endpoint_id = endpoint->endpoint_id;
 690	struct ipa *ipa = endpoint->ipa;
 691	const struct ipa_reg *reg;
 692	u32 val = 0;
 693
 694	reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
 695	if (endpoint->config.qmap) {
 696		/* We have a header, so we must specify its endianness */
 697		val |= ipa_reg_bit(reg, HDR_ENDIANNESS);	/* big endian */
 698
 699		/* A QMAP header contains a 6 bit pad field at offset 0.
 700		 * The RMNet driver assumes this field is meaningful in
 701		 * packets it receives, and assumes the header's payload
 702		 * length includes that padding.  The RMNet driver does
 703		 * *not* pad packets it sends, however, so the pad field
 704		 * (although 0) should be ignored.
 705		 */
 706		if (!endpoint->toward_ipa) {
 707			val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
 708			/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
 709			val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
 710			/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
 711		}
 712	}
 713
 714	/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
 715	if (!endpoint->toward_ipa)
 716		val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
 717
 718	/* IPA v4.5 adds some most-significant bits to a few fields,
 719	 * two of which are defined in the HDR (not HDR_EXT) register.
 720	 */
 721	if (ipa->version >= IPA_VERSION_4_5) {
 722		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
 723		if (endpoint->config.qmap && !endpoint->toward_ipa) {
 724			u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
 725			u32 off;     /* Field offset within header */
 726
 727			off = offsetof(struct rmnet_map_header, pkt_len);
 728			/* Low bits are in the ENDP_INIT_HDR register */
 729			off >>= hweight32(mask);
 730			val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
 731			/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
 732		}
 733	}
 734
 735	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
 736}
 737
 738static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
 739{
 740	u32 endpoint_id = endpoint->endpoint_id;
 741	struct ipa *ipa = endpoint->ipa;
 742	const struct ipa_reg *reg;
 743	u32 val = 0;
 744	u32 offset;
 745
 746	if (endpoint->toward_ipa)
 747		return;		/* Register not valid for TX endpoints */
 748
 749	reg = ipa_reg(ipa,  ENDP_INIT_HDR_METADATA_MASK);
 750	offset = ipa_reg_n_offset(reg, endpoint_id);
 751
 752	/* Note that HDR_ENDIANNESS indicates big endian header fields */
 753	if (endpoint->config.qmap)
 754		val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
 755
 756	iowrite32(val, ipa->reg_virt + offset);
 757}
 758
 759static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
 760{
 761	struct ipa *ipa = endpoint->ipa;
 762	const struct ipa_reg *reg;
 763	u32 offset;
 764	u32 val;
 765
 766	if (!endpoint->toward_ipa)
 767		return;		/* Register not valid for RX endpoints */
 768
 769	reg = ipa_reg(ipa, ENDP_INIT_MODE);
 770	if (endpoint->config.dma_mode) {
 771		enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
 772		u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
 773
 774		val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
 775		val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
 776	} else {
 777		val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
 778	}
 779	/* All other bits unspecified (and 0) */
 780
 781	offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
 782	iowrite32(val, ipa->reg_virt + offset);
 783}
 784
 785/* For IPA v4.5+, times are expressed using Qtime.  The AP uses one of two
 786 * pulse generators (0 and 1) to measure elapsed time.  In ipa_qtime_config()
 787 * they're configured to have granularity 100 usec and 1 msec, respectively.
 788 *
 789 * The return value is the positive or negative Qtime value to use to
 790 * express the (microsecond) time provided.  A positive return value
 791 * means pulse generator 0 can be used; otherwise use pulse generator 1.
 
 
 
 792 */
 793static int ipa_qtime_val(u32 microseconds, u32 max)
 
 794{
 795	u32 val;
 
 796
 797	/* Use 100 microsecond granularity if possible */
 798	val = DIV_ROUND_CLOSEST(microseconds, 100);
 799	if (val <= max)
 800		return (int)val;
 801
 802	/* Have to use pulse generator 1 (millisecond granularity) */
 803	val = DIV_ROUND_CLOSEST(microseconds, 1000);
 804	WARN_ON(val > max);
 
 
 
 
 
 
 
 
 
 
 
 805
 806	return (int)-val;
 807}
 808
 809/* Encode the aggregation timer limit (microseconds) based on IPA version */
 810static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
 811				  u32 microseconds)
 812{
 
 813	u32 max;
 814	u32 val;
 815
 816	if (!microseconds)
 817		return 0;	/* Nothing to compute if time limit is 0 */
 818
 819	max = ipa_reg_field_max(reg, TIME_LIMIT);
 820	if (ipa->version >= IPA_VERSION_4_5) {
 821		u32 gran_sel;
 822		int ret;
 823
 824		/* Compute the Qtime limit value to use */
 825		ret = ipa_qtime_val(microseconds, max);
 826		if (ret < 0) {
 827			val = -ret;
 828			gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
 829		} else {
 830			val = ret;
 831			gran_sel = 0;
 832		}
 833
 834		return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
 
 835	}
 836
 837	/* We program aggregation granularity in ipa_hardware_config() */
 838	val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
 839	WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
 840	     microseconds, max * IPA_AGGR_GRANULARITY);
 841
 842	return ipa_reg_encode(reg, TIME_LIMIT, val);
 843}
 844
 845static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
 846{
 847	u32 endpoint_id = endpoint->endpoint_id;
 848	struct ipa *ipa = endpoint->ipa;
 849	const struct ipa_reg *reg;
 850	u32 val = 0;
 851
 852	reg = ipa_reg(ipa, ENDP_INIT_AGGR);
 853	if (endpoint->config.aggregation) {
 854		if (!endpoint->toward_ipa) {
 855			const struct ipa_endpoint_rx *rx_config;
 856			u32 buffer_size;
 857			u32 limit;
 858
 859			rx_config = &endpoint->config.rx;
 860			val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
 861			val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
 862
 863			buffer_size = rx_config->buffer_size;
 864			limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
 865						 rx_config->aggr_hard_limit);
 866			val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
 867
 868			limit = rx_config->aggr_time_limit;
 869			val |= aggr_time_limit_encode(ipa, reg, limit);
 870
 871			/* AGGR_PKT_LIMIT is 0 (unlimited) */
 872
 873			if (rx_config->aggr_close_eof)
 874				val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
 875		} else {
 876			val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
 877			val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
 878			/* other fields ignored */
 879		}
 880		/* AGGR_FORCE_CLOSE is 0 */
 881		/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
 882	} else {
 883		val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
 884		/* other fields ignored */
 885	}
 886
 887	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
 888}
 889
 890/* The head-of-line blocking timer is defined as a tick count.  For
 891 * IPA version 4.5 the tick count is based on the Qtimer, which is
 892 * derived from the 19.2 MHz SoC XO clock.  For older IPA versions
 893 * each tick represents 128 cycles of the IPA core clock.
 894 *
 895 * Return the encoded value representing the timeout period provided
 896 * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
 897 */
 898static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
 899				  u32 microseconds)
 900{
 901	u32 width;
 902	u32 scale;
 903	u64 ticks;
 904	u64 rate;
 905	u32 high;
 906	u32 val;
 907
 908	if (!microseconds)
 909		return 0;	/* Nothing to compute if timer period is 0 */
 910
 911	if (ipa->version >= IPA_VERSION_4_5) {
 912		u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
 913		u32 gran_sel;
 914		int ret;
 915
 916		/* Compute the Qtime limit value to use */
 917		ret = ipa_qtime_val(microseconds, max);
 918		if (ret < 0) {
 919			val = -ret;
 920			gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
 921		} else {
 922			val = ret;
 923			gran_sel = 0;
 924		}
 925
 926		return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
 
 927	}
 928
 929	/* Use 64 bit arithmetic to avoid overflow */
 930	rate = ipa_core_clock_rate(ipa);
 931	ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
 932
 933	/* We still need the result to fit into the field */
 934	WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
 935
 936	/* IPA v3.5.1 through v4.1 just record the tick count */
 937	if (ipa->version < IPA_VERSION_4_2)
 938		return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
 939
 940	/* For IPA v4.2, the tick count is represented by base and
 941	 * scale fields within the 32-bit timer register, where:
 942	 *     ticks = base << scale;
 943	 * The best precision is achieved when the base value is as
 944	 * large as possible.  Find the highest set bit in the tick
 945	 * count, and extract the number of bits in the base field
 946	 * such that high bit is included.
 947	 */
 948	high = fls(ticks);		/* 1..32 (or warning above) */
 949	width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
 950	scale = high > width ? high - width : 0;
 951	if (scale) {
 952		/* If we're scaling, round up to get a closer result */
 953		ticks += 1 << (scale - 1);
 954		/* High bit was set, so rounding might have affected it */
 955		if (fls(ticks) != high)
 956			scale++;
 957	}
 958
 959	val = ipa_reg_encode(reg, TIMER_SCALE, scale);
 960	val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
 961
 962	return val;
 963}
 964
 965/* If microseconds is 0, timeout is immediate */
 966static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
 967					      u32 microseconds)
 968{
 969	u32 endpoint_id = endpoint->endpoint_id;
 970	struct ipa *ipa = endpoint->ipa;
 971	const struct ipa_reg *reg;
 972	u32 val;
 973
 974	/* This should only be changed when HOL_BLOCK_EN is disabled */
 975	reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
 976	val = hol_block_timer_encode(ipa, reg, microseconds);
 977
 978	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
 979}
 980
 981static void
 982ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
 983{
 984	u32 endpoint_id = endpoint->endpoint_id;
 985	struct ipa *ipa = endpoint->ipa;
 986	const struct ipa_reg *reg;
 987	u32 offset;
 988	u32 val;
 989
 990	reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
 991	offset = ipa_reg_n_offset(reg, endpoint_id);
 992	val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
 993
 994	iowrite32(val, ipa->reg_virt + offset);
 995
 996	/* When enabling, the register must be written twice for IPA v4.5+ */
 997	if (enable && ipa->version >= IPA_VERSION_4_5)
 998		iowrite32(val, ipa->reg_virt + offset);
 999}
1000
1001/* Assumes HOL_BLOCK is in disabled state */
1002static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
1003					       u32 microseconds)
1004{
1005	ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
1006	ipa_endpoint_init_hol_block_en(endpoint, true);
1007}
1008
1009static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
1010{
1011	ipa_endpoint_init_hol_block_en(endpoint, false);
1012}
1013
1014void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
1015{
1016	u32 endpoint_id = 0;
1017
1018	while (endpoint_id < ipa->endpoint_count) {
1019		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
1020
1021		if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
1022			continue;
1023
1024		ipa_endpoint_init_hol_block_disable(endpoint);
1025		ipa_endpoint_init_hol_block_enable(endpoint, 0);
1026	}
1027}
1028
1029static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
1030{
1031	u32 endpoint_id = endpoint->endpoint_id;
1032	struct ipa *ipa = endpoint->ipa;
1033	const struct ipa_reg *reg;
1034	u32 val = 0;
1035
1036	if (!endpoint->toward_ipa)
1037		return;		/* Register not valid for RX endpoints */
1038
1039	reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
1040	/* DEAGGR_HDR_LEN is 0 */
1041	/* PACKET_OFFSET_VALID is 0 */
1042	/* PACKET_OFFSET_LOCATION is ignored (not valid) */
1043	/* MAX_PACKET_LEN is 0 (not enforced) */
1044
1045	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1046}
1047
1048static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
1049{
1050	u32 resource_group = endpoint->config.resource_group;
1051	u32 endpoint_id = endpoint->endpoint_id;
1052	struct ipa *ipa = endpoint->ipa;
1053	const struct ipa_reg *reg;
1054	u32 val;
1055
1056	reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
1057	val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group);
1058
1059	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1060}
1061
1062static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
1063{
1064	u32 endpoint_id = endpoint->endpoint_id;
1065	struct ipa *ipa = endpoint->ipa;
1066	const struct ipa_reg *reg;
1067	u32 val;
1068
1069	if (!endpoint->toward_ipa)
1070		return;		/* Register not valid for RX endpoints */
1071
1072	reg = ipa_reg(ipa, ENDP_INIT_SEQ);
1073
1074	/* Low-order byte configures primary packet processing */
1075	val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
1076
1077	/* Second byte (if supported) configures replicated packet processing */
1078	if (ipa->version < IPA_VERSION_4_5)
1079		val |= ipa_reg_encode(reg, SEQ_REP_TYPE,
1080				      endpoint->config.tx.seq_rep_type);
1081
1082	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1083}
1084
1085/**
1086 * ipa_endpoint_skb_tx() - Transmit a socket buffer
1087 * @endpoint:	Endpoint pointer
1088 * @skb:	Socket buffer to send
1089 *
1090 * Returns:	0 if successful, or a negative error code
1091 */
1092int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
1093{
1094	struct gsi_trans *trans;
1095	u32 nr_frags;
1096	int ret;
1097
1098	/* Make sure source endpoint's TLV FIFO has enough entries to
1099	 * hold the linear portion of the skb and all its fragments.
1100	 * If not, see if we can linearize it before giving up.
1101	 */
1102	nr_frags = skb_shinfo(skb)->nr_frags;
1103	if (nr_frags > endpoint->skb_frag_max) {
1104		if (skb_linearize(skb))
1105			return -E2BIG;
1106		nr_frags = 0;
1107	}
1108
1109	trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
1110	if (!trans)
1111		return -EBUSY;
1112
1113	ret = gsi_trans_skb_add(trans, skb);
1114	if (ret)
1115		goto err_trans_free;
1116	trans->data = skb;	/* transaction owns skb now */
1117
1118	gsi_trans_commit(trans, !netdev_xmit_more());
1119
1120	return 0;
1121
1122err_trans_free:
1123	gsi_trans_free(trans);
1124
1125	return -ENOMEM;
1126}
1127
1128static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
1129{
1130	u32 endpoint_id = endpoint->endpoint_id;
1131	struct ipa *ipa = endpoint->ipa;
1132	const struct ipa_reg *reg;
1133	u32 val = 0;
1134
1135	reg = ipa_reg(ipa, ENDP_STATUS);
1136	if (endpoint->config.status_enable) {
1137		val |= ipa_reg_bit(reg, STATUS_EN);
1138		if (endpoint->toward_ipa) {
1139			enum ipa_endpoint_name name;
1140			u32 status_endpoint_id;
1141
1142			name = endpoint->config.tx.status_endpoint;
1143			status_endpoint_id = ipa->name_map[name]->endpoint_id;
1144
1145			val |= ipa_reg_encode(reg, STATUS_ENDP,
1146					      status_endpoint_id);
1147		}
1148		/* STATUS_LOCATION is 0, meaning status element precedes
1149		 * packet (not present for IPA v4.5+)
1150		 */
1151		/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
1152	}
1153
1154	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1155}
1156
1157static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
1158				      struct gsi_trans *trans)
1159{
1160	struct page *page;
1161	u32 buffer_size;
1162	u32 offset;
1163	u32 len;
1164	int ret;
1165
1166	buffer_size = endpoint->config.rx.buffer_size;
1167	page = dev_alloc_pages(get_order(buffer_size));
1168	if (!page)
1169		return -ENOMEM;
1170
1171	/* Offset the buffer to make space for skb headroom */
1172	offset = NET_SKB_PAD;
1173	len = buffer_size - offset;
1174
1175	ret = gsi_trans_page_add(trans, page, len, offset);
1176	if (ret)
1177		put_page(page);
1178	else
1179		trans->data = page;	/* transaction owns page now */
1180
1181	return ret;
1182}
1183
1184/**
1185 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1186 * @endpoint:	Endpoint to be replenished
1187 *
1188 * The IPA hardware can hold a fixed number of receive buffers for an RX
1189 * endpoint, based on the number of entries in the underlying channel ring
1190 * buffer.  If an endpoint's "backlog" is non-zero, it indicates how many
1191 * more receive buffers can be supplied to the hardware.  Replenishing for
1192 * an endpoint can be disabled, in which case buffers are not queued to
1193 * the hardware.
1194 */
1195static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
1196{
1197	struct gsi_trans *trans;
1198
1199	if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
1200		return;
1201
1202	/* Skip it if it's already active */
1203	if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
1204		return;
1205
1206	while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
1207		bool doorbell;
1208
1209		if (ipa_endpoint_replenish_one(endpoint, trans))
1210			goto try_again_later;
1211
1212
1213		/* Ring the doorbell if we've got a full batch */
1214		doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
1215		gsi_trans_commit(trans, doorbell);
1216	}
1217
1218	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1219
1220	return;
1221
1222try_again_later:
1223	gsi_trans_free(trans);
1224	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1225
1226	/* Whenever a receive buffer transaction completes we'll try to
1227	 * replenish again.  It's unlikely, but if we fail to supply even
1228	 * one buffer, nothing will trigger another replenish attempt.
1229	 * If the hardware has no receive buffers queued, schedule work to
1230	 * try replenishing again.
1231	 */
1232	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1233		schedule_delayed_work(&endpoint->replenish_work,
1234				      msecs_to_jiffies(1));
1235}
1236
1237static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1238{
1239	set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1240
1241	/* Start replenishing if hardware currently has no buffers */
1242	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1243		ipa_endpoint_replenish(endpoint);
1244}
1245
1246static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1247{
1248	clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1249}
1250
1251static void ipa_endpoint_replenish_work(struct work_struct *work)
1252{
1253	struct delayed_work *dwork = to_delayed_work(work);
1254	struct ipa_endpoint *endpoint;
1255
1256	endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1257
1258	ipa_endpoint_replenish(endpoint);
1259}
1260
1261static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1262				  void *data, u32 len, u32 extra)
1263{
1264	struct sk_buff *skb;
1265
1266	if (!endpoint->netdev)
1267		return;
1268
1269	skb = __dev_alloc_skb(len, GFP_ATOMIC);
1270	if (skb) {
1271		/* Copy the data into the socket buffer and receive it */
1272		skb_put(skb, len);
1273		memcpy(skb->data, data, len);
1274		skb->truesize += extra;
1275	}
1276
1277	ipa_modem_skb_rx(endpoint->netdev, skb);
1278}
1279
1280static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1281				   struct page *page, u32 len)
1282{
1283	u32 buffer_size = endpoint->config.rx.buffer_size;
1284	struct sk_buff *skb;
1285
1286	/* Nothing to do if there's no netdev */
1287	if (!endpoint->netdev)
1288		return false;
1289
1290	WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
1291
1292	skb = build_skb(page_address(page), buffer_size);
1293	if (skb) {
1294		/* Reserve the headroom and account for the data */
1295		skb_reserve(skb, NET_SKB_PAD);
1296		skb_put(skb, len);
1297	}
1298
1299	/* Receive the buffer (or record drop if unable to build it) */
1300	ipa_modem_skb_rx(endpoint->netdev, skb);
1301
1302	return skb != NULL;
1303}
1304
1305/* The format of a packet status element is the same for several status
1306 * types (opcodes).  Other types aren't currently supported.
1307 */
1308static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1309{
1310	switch (opcode) {
1311	case IPA_STATUS_OPCODE_PACKET:
1312	case IPA_STATUS_OPCODE_DROPPED_PACKET:
1313	case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1314	case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1315		return true;
1316	default:
1317		return false;
1318	}
1319}
1320
1321static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1322				     const struct ipa_status *status)
1323{
 
 
1324	u32 endpoint_id;
1325
1326	if (!ipa_status_format_packet(status->opcode))
 
1327		return true;
1328	if (!status->pkt_len)
1329		return true;
1330	endpoint_id = u8_get_bits(status->endp_dst_idx,
1331				  IPA_STATUS_DST_IDX_FMASK);
1332	if (endpoint_id != endpoint->endpoint_id)
1333		return true;
1334
1335	return false;	/* Don't skip this packet, process it */
1336}
1337
1338static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
1339				    const struct ipa_status *status)
1340{
1341	struct ipa_endpoint *command_endpoint;
 
1342	struct ipa *ipa = endpoint->ipa;
1343	u32 endpoint_id;
1344
1345	if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
 
1346		return false;	/* No valid tag */
1347
1348	/* The status contains a valid tag.  We know the packet was sent to
1349	 * this endpoint (already verified by ipa_endpoint_status_skip()).
1350	 * If the packet came from the AP->command TX endpoint we know
1351	 * this packet was sent as part of the pipeline clear process.
1352	 */
1353	endpoint_id = u8_get_bits(status->endp_src_idx,
1354				  IPA_STATUS_SRC_IDX_FMASK);
1355	command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1356	if (endpoint_id == command_endpoint->endpoint_id) {
1357		complete(&ipa->completion);
1358	} else {
1359		dev_err(&ipa->pdev->dev,
1360			"unexpected tagged packet from endpoint %u\n",
1361			endpoint_id);
1362	}
1363
1364	return true;
1365}
1366
1367/* Return whether the status indicates the packet should be dropped */
1368static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
1369				     const struct ipa_status *status)
1370{
1371	u32 val;
 
 
1372
1373	/* If the status indicates a tagged transfer, we'll drop the packet */
1374	if (ipa_endpoint_status_tag(endpoint, status))
1375		return true;
1376
1377	/* Deaggregation exceptions we drop; all other types we consume */
1378	if (status->exception)
1379		return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
 
1380
1381	/* Drop the packet if it fails to match a routing rule; otherwise no */
1382	val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1383
1384	return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1385}
1386
1387static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1388				      struct page *page, u32 total_len)
1389{
1390	u32 buffer_size = endpoint->config.rx.buffer_size;
1391	void *data = page_address(page) + NET_SKB_PAD;
1392	u32 unused = buffer_size - total_len;
 
1393	u32 resid = total_len;
1394
1395	while (resid) {
1396		const struct ipa_status *status = data;
1397		u32 align;
1398		u32 len;
1399
1400		if (resid < sizeof(*status)) {
1401			dev_err(&endpoint->ipa->pdev->dev,
1402				"short message (%u bytes < %zu byte status)\n",
1403				resid, sizeof(*status));
1404			break;
1405		}
1406
1407		/* Skip over status packets that lack packet data */
1408		if (ipa_endpoint_status_skip(endpoint, status)) {
1409			data += sizeof(*status);
1410			resid -= sizeof(*status);
 
1411			continue;
1412		}
1413
1414		/* Compute the amount of buffer space consumed by the packet,
1415		 * including the status element.  If the hardware is configured
1416		 * to pad packet data to an aligned boundary, account for that.
1417		 * And if checksum offload is enabled a trailer containing
1418		 * computed checksum information will be appended.
1419		 */
1420		align = endpoint->config.rx.pad_align ? : 1;
1421		len = le16_to_cpu(status->pkt_len);
1422		len = sizeof(*status) + ALIGN(len, align);
1423		if (endpoint->config.checksum)
1424			len += sizeof(struct rmnet_map_dl_csum_trailer);
1425
1426		if (!ipa_endpoint_status_drop(endpoint, status)) {
1427			void *data2;
1428			u32 extra;
1429			u32 len2;
1430
1431			/* Client receives only packet data (no status) */
1432			data2 = data + sizeof(*status);
1433			len2 = le16_to_cpu(status->pkt_len);
1434
1435			/* Have the true size reflect the extra unused space in
1436			 * the original receive buffer.  Distribute the "cost"
1437			 * proportionately across all aggregated packets in the
1438			 * buffer.
1439			 */
1440			extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1441			ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1442		}
1443
1444		/* Consume status and the full packet it describes */
1445		data += len;
1446		resid -= len;
1447	}
1448}
1449
1450void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1451				 struct gsi_trans *trans)
1452{
1453	struct page *page;
1454
1455	if (endpoint->toward_ipa)
1456		return;
1457
1458	if (trans->cancelled)
1459		goto done;
1460
1461	/* Parse or build a socket buffer using the actual received length */
1462	page = trans->data;
1463	if (endpoint->config.status_enable)
1464		ipa_endpoint_status_parse(endpoint, page, trans->len);
1465	else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1466		trans->data = NULL;	/* Pages have been consumed */
1467done:
1468	ipa_endpoint_replenish(endpoint);
1469}
1470
1471void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1472				struct gsi_trans *trans)
1473{
1474	if (endpoint->toward_ipa) {
1475		struct ipa *ipa = endpoint->ipa;
1476
1477		/* Nothing to do for command transactions */
1478		if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1479			struct sk_buff *skb = trans->data;
1480
1481			if (skb)
1482				dev_kfree_skb_any(skb);
1483		}
1484	} else {
1485		struct page *page = trans->data;
1486
1487		if (page)
1488			put_page(page);
1489	}
1490}
1491
1492void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1493{
1494	const struct ipa_reg *reg;
1495	u32 val;
1496
1497	reg = ipa_reg(ipa, ROUTE);
1498	/* ROUTE_DIS is 0 */
1499	val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
1500	val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE);
1501	/* ROUTE_DEF_HDR_OFST is 0 */
1502	val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
1503	val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
1504
1505	iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
1506}
1507
1508void ipa_endpoint_default_route_clear(struct ipa *ipa)
1509{
1510	ipa_endpoint_default_route_set(ipa, 0);
1511}
1512
1513/**
1514 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1515 * @endpoint:	Endpoint to be reset
1516 *
1517 * If aggregation is active on an RX endpoint when a reset is performed
1518 * on its underlying GSI channel, a special sequence of actions must be
1519 * taken to ensure the IPA pipeline is properly cleared.
1520 *
1521 * Return:	0 if successful, or a negative error code
1522 */
1523static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1524{
1525	struct device *dev = &endpoint->ipa->pdev->dev;
1526	struct ipa *ipa = endpoint->ipa;
1527	struct gsi *gsi = &ipa->gsi;
1528	bool suspended = false;
1529	dma_addr_t addr;
1530	u32 retries;
1531	u32 len = 1;
1532	void *virt;
1533	int ret;
1534
1535	virt = kzalloc(len, GFP_KERNEL);
1536	if (!virt)
1537		return -ENOMEM;
1538
1539	addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1540	if (dma_mapping_error(dev, addr)) {
1541		ret = -ENOMEM;
1542		goto out_kfree;
1543	}
1544
1545	/* Force close aggregation before issuing the reset */
1546	ipa_endpoint_force_close(endpoint);
1547
1548	/* Reset and reconfigure the channel with the doorbell engine
1549	 * disabled.  Then poll until we know aggregation is no longer
1550	 * active.  We'll re-enable the doorbell (if appropriate) when
1551	 * we reset again below.
1552	 */
1553	gsi_channel_reset(gsi, endpoint->channel_id, false);
1554
1555	/* Make sure the channel isn't suspended */
1556	suspended = ipa_endpoint_program_suspend(endpoint, false);
1557
1558	/* Start channel and do a 1 byte read */
1559	ret = gsi_channel_start(gsi, endpoint->channel_id);
1560	if (ret)
1561		goto out_suspend_again;
1562
1563	ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1564	if (ret)
1565		goto err_endpoint_stop;
1566
1567	/* Wait for aggregation to be closed on the channel */
1568	retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1569	do {
1570		if (!ipa_endpoint_aggr_active(endpoint))
1571			break;
1572		usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1573	} while (retries--);
1574
1575	/* Check one last time */
1576	if (ipa_endpoint_aggr_active(endpoint))
1577		dev_err(dev, "endpoint %u still active during reset\n",
1578			endpoint->endpoint_id);
1579
1580	gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1581
1582	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1583	if (ret)
1584		goto out_suspend_again;
1585
1586	/* Finally, reset and reconfigure the channel again (re-enabling
1587	 * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1588	 * complete the channel reset sequence.  Finish by suspending the
1589	 * channel again (if necessary).
1590	 */
1591	gsi_channel_reset(gsi, endpoint->channel_id, true);
1592
1593	usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1594
1595	goto out_suspend_again;
1596
1597err_endpoint_stop:
1598	(void)gsi_channel_stop(gsi, endpoint->channel_id);
1599out_suspend_again:
1600	if (suspended)
1601		(void)ipa_endpoint_program_suspend(endpoint, true);
1602	dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1603out_kfree:
1604	kfree(virt);
1605
1606	return ret;
1607}
1608
1609static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1610{
1611	u32 channel_id = endpoint->channel_id;
1612	struct ipa *ipa = endpoint->ipa;
1613	bool special;
1614	int ret = 0;
1615
1616	/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1617	 * is active, we need to handle things specially to recover.
1618	 * All other cases just need to reset the underlying GSI channel.
1619	 */
1620	special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1621			endpoint->config.aggregation;
1622	if (special && ipa_endpoint_aggr_active(endpoint))
1623		ret = ipa_endpoint_reset_rx_aggr(endpoint);
1624	else
1625		gsi_channel_reset(&ipa->gsi, channel_id, true);
1626
1627	if (ret)
1628		dev_err(&ipa->pdev->dev,
1629			"error %d resetting channel %u for endpoint %u\n",
1630			ret, endpoint->channel_id, endpoint->endpoint_id);
1631}
1632
1633static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1634{
1635	if (endpoint->toward_ipa) {
1636		/* Newer versions of IPA use GSI channel flow control
1637		 * instead of endpoint DELAY mode to prevent sending data.
1638		 * Flow control is disabled for newly-allocated channels,
1639		 * and we can assume flow control is not (ever) enabled
1640		 * for AP TX channels.
1641		 */
1642		if (endpoint->ipa->version < IPA_VERSION_4_2)
1643			ipa_endpoint_program_delay(endpoint, false);
1644	} else {
1645		/* Ensure suspend mode is off on all AP RX endpoints */
1646		(void)ipa_endpoint_program_suspend(endpoint, false);
1647	}
1648	ipa_endpoint_init_cfg(endpoint);
1649	ipa_endpoint_init_nat(endpoint);
1650	ipa_endpoint_init_hdr(endpoint);
1651	ipa_endpoint_init_hdr_ext(endpoint);
1652	ipa_endpoint_init_hdr_metadata_mask(endpoint);
1653	ipa_endpoint_init_mode(endpoint);
1654	ipa_endpoint_init_aggr(endpoint);
1655	if (!endpoint->toward_ipa) {
1656		if (endpoint->config.rx.holb_drop)
1657			ipa_endpoint_init_hol_block_enable(endpoint, 0);
1658		else
1659			ipa_endpoint_init_hol_block_disable(endpoint);
1660	}
1661	ipa_endpoint_init_deaggr(endpoint);
1662	ipa_endpoint_init_rsrc_grp(endpoint);
1663	ipa_endpoint_init_seq(endpoint);
1664	ipa_endpoint_status(endpoint);
1665}
1666
1667int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1668{
1669	u32 endpoint_id = endpoint->endpoint_id;
1670	struct ipa *ipa = endpoint->ipa;
1671	struct gsi *gsi = &ipa->gsi;
1672	int ret;
1673
1674	ret = gsi_channel_start(gsi, endpoint->channel_id);
1675	if (ret) {
1676		dev_err(&ipa->pdev->dev,
1677			"error %d starting %cX channel %u for endpoint %u\n",
1678			ret, endpoint->toward_ipa ? 'T' : 'R',
1679			endpoint->channel_id, endpoint_id);
1680		return ret;
1681	}
1682
1683	if (!endpoint->toward_ipa) {
1684		ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id);
1685		ipa_endpoint_replenish_enable(endpoint);
1686	}
1687
1688	__set_bit(endpoint_id, ipa->enabled);
1689
1690	return 0;
1691}
1692
1693void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1694{
1695	u32 endpoint_id = endpoint->endpoint_id;
1696	struct ipa *ipa = endpoint->ipa;
1697	struct gsi *gsi = &ipa->gsi;
1698	int ret;
1699
1700	if (!test_bit(endpoint_id, ipa->enabled))
1701		return;
1702
1703	__clear_bit(endpoint_id, endpoint->ipa->enabled);
1704
1705	if (!endpoint->toward_ipa) {
1706		ipa_endpoint_replenish_disable(endpoint);
1707		ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
1708	}
1709
1710	/* Note that if stop fails, the channel's state is not well-defined */
1711	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1712	if (ret)
1713		dev_err(&ipa->pdev->dev,
1714			"error %d attempting to stop endpoint %u\n", ret,
1715			endpoint_id);
1716}
1717
1718void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1719{
1720	struct device *dev = &endpoint->ipa->pdev->dev;
1721	struct gsi *gsi = &endpoint->ipa->gsi;
1722	int ret;
1723
1724	if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1725		return;
1726
1727	if (!endpoint->toward_ipa) {
1728		ipa_endpoint_replenish_disable(endpoint);
1729		(void)ipa_endpoint_program_suspend(endpoint, true);
1730	}
1731
1732	ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1733	if (ret)
1734		dev_err(dev, "error %d suspending channel %u\n", ret,
1735			endpoint->channel_id);
1736}
1737
1738void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1739{
1740	struct device *dev = &endpoint->ipa->pdev->dev;
1741	struct gsi *gsi = &endpoint->ipa->gsi;
1742	int ret;
1743
1744	if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1745		return;
1746
1747	if (!endpoint->toward_ipa)
1748		(void)ipa_endpoint_program_suspend(endpoint, false);
1749
1750	ret = gsi_channel_resume(gsi, endpoint->channel_id);
1751	if (ret)
1752		dev_err(dev, "error %d resuming channel %u\n", ret,
1753			endpoint->channel_id);
1754	else if (!endpoint->toward_ipa)
1755		ipa_endpoint_replenish_enable(endpoint);
1756}
1757
1758void ipa_endpoint_suspend(struct ipa *ipa)
1759{
1760	if (!ipa->setup_complete)
1761		return;
1762
1763	if (ipa->modem_netdev)
1764		ipa_modem_suspend(ipa->modem_netdev);
1765
1766	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1767	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1768}
1769
1770void ipa_endpoint_resume(struct ipa *ipa)
1771{
1772	if (!ipa->setup_complete)
1773		return;
1774
1775	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1776	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1777
1778	if (ipa->modem_netdev)
1779		ipa_modem_resume(ipa->modem_netdev);
1780}
1781
1782static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1783{
1784	struct gsi *gsi = &endpoint->ipa->gsi;
1785	u32 channel_id = endpoint->channel_id;
1786
1787	/* Only AP endpoints get set up */
1788	if (endpoint->ee_id != GSI_EE_AP)
1789		return;
1790
1791	endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
1792	if (!endpoint->toward_ipa) {
1793		/* RX transactions require a single TRE, so the maximum
1794		 * backlog is the same as the maximum outstanding TREs.
1795		 */
1796		clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1797		clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1798		INIT_DELAYED_WORK(&endpoint->replenish_work,
1799				  ipa_endpoint_replenish_work);
1800	}
1801
1802	ipa_endpoint_program(endpoint);
1803
1804	__set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1805}
1806
1807static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1808{
1809	__clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1810
1811	if (!endpoint->toward_ipa)
1812		cancel_delayed_work_sync(&endpoint->replenish_work);
1813
1814	ipa_endpoint_reset(endpoint);
1815}
1816
1817void ipa_endpoint_setup(struct ipa *ipa)
1818{
1819	u32 endpoint_id;
1820
1821	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
1822		ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1823}
1824
1825void ipa_endpoint_teardown(struct ipa *ipa)
1826{
1827	u32 endpoint_id;
1828
1829	for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count)
1830		ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1831}
1832
1833void ipa_endpoint_deconfig(struct ipa *ipa)
1834{
1835	ipa->available_count = 0;
1836	bitmap_free(ipa->available);
1837	ipa->available = NULL;
1838}
1839
1840int ipa_endpoint_config(struct ipa *ipa)
1841{
1842	struct device *dev = &ipa->pdev->dev;
1843	const struct ipa_reg *reg;
1844	u32 endpoint_id;
 
1845	u32 tx_count;
1846	u32 rx_count;
1847	u32 rx_base;
1848	u32 limit;
1849	u32 val;
1850
1851	/* Prior to IPA v3.5, the FLAVOR_0 register was not supported.
1852	 * Furthermore, the endpoints were not grouped such that TX
1853	 * endpoint numbers started with 0 and RX endpoints had numbers
1854	 * higher than all TX endpoints, so we can't do the simple
1855	 * direction check used for newer hardware below.
1856	 *
1857	 * For hardware that doesn't support the FLAVOR_0 register,
1858	 * just set the available mask to support any endpoint, and
1859	 * assume the configuration is valid.
1860	 */
1861	if (ipa->version < IPA_VERSION_3_5) {
1862		ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL);
1863		if (!ipa->available)
1864			return -ENOMEM;
1865		ipa->available_count = IPA_ENDPOINT_MAX;
1866
1867		bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
1868
1869		return 0;
1870	}
1871
1872	/* Find out about the endpoints supplied by the hardware, and ensure
1873	 * the highest one doesn't exceed the number supported by software.
1874	 */
1875	reg = ipa_reg(ipa, FLAVOR_0);
1876	val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
1877
1878	/* Our RX is an IPA producer; our TX is an IPA consumer. */
1879	tx_count = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
1880	rx_count = ipa_reg_decode(reg, MAX_PROD_PIPES, val);
1881	rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
1882
1883	limit = rx_base + rx_count;
1884	if (limit > IPA_ENDPOINT_MAX) {
1885		dev_err(dev, "too many endpoints, %u > %u\n",
1886			limit, IPA_ENDPOINT_MAX);
 
 
 
 
 
 
 
 
1887		return -EINVAL;
1888	}
1889
1890	/* Allocate and initialize the available endpoint bitmap */
1891	ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
1892	if (!ipa->available)
1893		return -ENOMEM;
1894	ipa->available_count = limit;
1895
1896	/* Mark all supported RX and TX endpoints as available */
1897	bitmap_set(ipa->available, 0, tx_count);
1898	bitmap_set(ipa->available, rx_base, rx_count);
1899
1900	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
1901		struct ipa_endpoint *endpoint;
1902
1903		if (endpoint_id >= limit) {
1904			dev_err(dev, "invalid endpoint id, %u > %u\n",
1905				endpoint_id, limit - 1);
1906			goto err_free_bitmap;
1907		}
1908
1909		if (!test_bit(endpoint_id, ipa->available)) {
1910			dev_err(dev, "unavailable endpoint id %u\n",
1911				endpoint_id);
1912			goto err_free_bitmap;
1913		}
1914
1915		/* Make sure it's pointing in the right direction */
1916		endpoint = &ipa->endpoint[endpoint_id];
1917		if (endpoint->toward_ipa) {
1918			if (endpoint_id < tx_count)
1919				continue;
1920		} else if (endpoint_id >= rx_base) {
1921			continue;
1922		}
1923
1924		dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id);
1925		goto err_free_bitmap;
1926	}
1927
1928	return 0;
1929
1930err_free_bitmap:
1931	ipa_endpoint_deconfig(ipa);
1932
1933	return -EINVAL;
1934}
1935
1936static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1937				  const struct ipa_gsi_endpoint_data *data)
1938{
1939	struct ipa_endpoint *endpoint;
1940
1941	endpoint = &ipa->endpoint[data->endpoint_id];
1942
1943	if (data->ee_id == GSI_EE_AP)
1944		ipa->channel_map[data->channel_id] = endpoint;
1945	ipa->name_map[name] = endpoint;
1946
1947	endpoint->ipa = ipa;
1948	endpoint->ee_id = data->ee_id;
1949	endpoint->channel_id = data->channel_id;
1950	endpoint->endpoint_id = data->endpoint_id;
1951	endpoint->toward_ipa = data->toward_ipa;
1952	endpoint->config = data->endpoint.config;
1953
1954	__set_bit(endpoint->endpoint_id, ipa->defined);
1955}
1956
1957static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1958{
1959	__clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
1960
1961	memset(endpoint, 0, sizeof(*endpoint));
1962}
1963
1964void ipa_endpoint_exit(struct ipa *ipa)
1965{
1966	u32 endpoint_id;
1967
1968	ipa->filtered = 0;
1969
1970	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
1971		ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1972
1973	bitmap_free(ipa->enabled);
1974	ipa->enabled = NULL;
1975	bitmap_free(ipa->set_up);
1976	ipa->set_up = NULL;
1977	bitmap_free(ipa->defined);
1978	ipa->defined = NULL;
1979
1980	memset(ipa->name_map, 0, sizeof(ipa->name_map));
1981	memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1982}
1983
1984/* Returns a bitmask of endpoints that support filtering, or 0 on error */
1985int ipa_endpoint_init(struct ipa *ipa, u32 count,
1986		      const struct ipa_gsi_endpoint_data *data)
1987{
1988	enum ipa_endpoint_name name;
1989	u32 filtered;
1990
1991	BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
1992
1993	/* Number of endpoints is one more than the maximum ID */
1994	ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
1995	if (!ipa->endpoint_count)
1996		return -EINVAL;
1997
1998	/* Initialize endpoint state bitmaps */
1999	ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2000	if (!ipa->defined)
2001		return -ENOMEM;
2002
2003	ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2004	if (!ipa->set_up)
2005		goto err_free_defined;
2006
2007	ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2008	if (!ipa->enabled)
2009		goto err_free_set_up;
2010
2011	filtered = 0;
2012	for (name = 0; name < count; name++, data++) {
2013		if (ipa_gsi_endpoint_data_empty(data))
2014			continue;	/* Skip over empty slots */
2015
2016		ipa_endpoint_init_one(ipa, name, data);
2017
2018		if (data->endpoint.filter_support)
2019			filtered |= BIT(data->endpoint_id);
2020		if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
2021			ipa->modem_tx_count++;
2022	}
2023
2024	/* Make sure the set of filtered endpoints is valid */
2025	if (!ipa_filtered_valid(ipa, filtered)) {
2026		ipa_endpoint_exit(ipa);
2027
2028		return -EINVAL;
2029	}
2030
2031	ipa->filtered = filtered;
2032
2033	return 0;
2034
2035err_free_set_up:
2036	bitmap_free(ipa->set_up);
2037	ipa->set_up = NULL;
2038err_free_defined:
2039	bitmap_free(ipa->defined);
2040	ipa->defined = NULL;
2041
2042	return -ENOMEM;
2043}