Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2019-2023 Linaro Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <linux/device.h>
   9#include <linux/slab.h>
  10#include <linux/bitfield.h>
  11#include <linux/if_rmnet.h>
  12#include <linux/dma-direction.h>
  13
  14#include "gsi.h"
  15#include "gsi_trans.h"
  16#include "ipa.h"
  17#include "ipa_data.h"
  18#include "ipa_endpoint.h"
  19#include "ipa_cmd.h"
  20#include "ipa_mem.h"
  21#include "ipa_modem.h"
  22#include "ipa_table.h"
  23#include "ipa_gsi.h"
  24#include "ipa_power.h"
  25
  26/* Hardware is told about receive buffers once a "batch" has been queued */
  27#define IPA_REPLENISH_BATCH	16		/* Must be non-zero */
 
 
 
 
  28
  29/* The amount of RX buffer space consumed by standard skb overhead */
  30#define IPA_RX_BUFFER_OVERHEAD	(PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
  31
  32/* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
  33#define IPA_ENDPOINT_QMAP_METADATA_MASK		0x000000ff /* host byte order */
  34
  35#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX	3
 
  36
  37/** enum ipa_status_opcode - IPA status opcode field hardware values */
  38enum ipa_status_opcode {				/* *Not* a bitmask */
  39	IPA_STATUS_OPCODE_PACKET		= 1,
  40	IPA_STATUS_OPCODE_NEW_RULE_PACKET	= 2,
  41	IPA_STATUS_OPCODE_DROPPED_PACKET	= 4,
  42	IPA_STATUS_OPCODE_SUSPENDED_PACKET	= 8,
  43	IPA_STATUS_OPCODE_LOG			= 16,
  44	IPA_STATUS_OPCODE_DCMP			= 32,
  45	IPA_STATUS_OPCODE_PACKET_2ND_PASS	= 64,
  46};
  47
  48/** enum ipa_status_exception - IPA status exception field hardware values */
  49enum ipa_status_exception {				/* *Not* a bitmask */
  50	/* 0 means no exception */
  51	IPA_STATUS_EXCEPTION_DEAGGR		= 1,
  52	IPA_STATUS_EXCEPTION_IPTYPE		= 4,
  53	IPA_STATUS_EXCEPTION_PACKET_LENGTH	= 8,
  54	IPA_STATUS_EXCEPTION_FRAG_RULE_MISS	= 16,
  55	IPA_STATUS_EXCEPTION_SW_FILTER		= 32,
  56	IPA_STATUS_EXCEPTION_NAT		= 64,		/* IPv4 */
  57	IPA_STATUS_EXCEPTION_IPV6_CONN_TRACK	= 64,		/* IPv6 */
  58	IPA_STATUS_EXCEPTION_UC			= 128,
  59	IPA_STATUS_EXCEPTION_INVALID_ENDPOINT	= 129,
  60	IPA_STATUS_EXCEPTION_HEADER_INSERT	= 136,
  61	IPA_STATUS_EXCEPTION_CHEKCSUM		= 229,
  62};
  63
  64/** enum ipa_status_mask - IPA status mask field bitmask hardware values */
  65enum ipa_status_mask {
  66	IPA_STATUS_MASK_FRAG_PROCESS		= BIT(0),
  67	IPA_STATUS_MASK_FILT_PROCESS		= BIT(1),
  68	IPA_STATUS_MASK_NAT_PROCESS		= BIT(2),
  69	IPA_STATUS_MASK_ROUTE_PROCESS		= BIT(3),
  70	IPA_STATUS_MASK_TAG_VALID		= BIT(4),
  71	IPA_STATUS_MASK_FRAGMENT		= BIT(5),
  72	IPA_STATUS_MASK_FIRST_FRAGMENT		= BIT(6),
  73	IPA_STATUS_MASK_V4			= BIT(7),
  74	IPA_STATUS_MASK_CKSUM_PROCESS		= BIT(8),
  75	IPA_STATUS_MASK_AGGR_PROCESS		= BIT(9),
  76	IPA_STATUS_MASK_DEST_EOT		= BIT(10),
  77	IPA_STATUS_MASK_DEAGGR_PROCESS		= BIT(11),
  78	IPA_STATUS_MASK_DEAGG_FIRST		= BIT(12),
  79	IPA_STATUS_MASK_SRC_EOT			= BIT(13),
  80	IPA_STATUS_MASK_PREV_EOT		= BIT(14),
  81	IPA_STATUS_MASK_BYTE_LIMIT		= BIT(15),
  82};
  83
  84/* Special IPA filter/router rule field value indicating "rule miss" */
  85#define IPA_STATUS_RULE_MISS	0x3ff	/* 10-bit filter/router rule fields */
  86
  87/** The IPA status nat_type field uses enum ipa_nat_type hardware values */
  88
  89/* enum ipa_status_field_id - IPA packet status structure field identifiers */
  90enum ipa_status_field_id {
  91	STATUS_OPCODE,			/* enum ipa_status_opcode */
  92	STATUS_EXCEPTION,		/* enum ipa_status_exception */
  93	STATUS_MASK,			/* enum ipa_status_mask (bitmask) */
  94	STATUS_LENGTH,
  95	STATUS_SRC_ENDPOINT,
  96	STATUS_DST_ENDPOINT,
  97	STATUS_METADATA,
  98	STATUS_FILTER_LOCAL,		/* Boolean */
  99	STATUS_FILTER_HASH,		/* Boolean */
 100	STATUS_FILTER_GLOBAL,		/* Boolean */
 101	STATUS_FILTER_RETAIN,		/* Boolean */
 102	STATUS_FILTER_RULE_INDEX,
 103	STATUS_ROUTER_LOCAL,		/* Boolean */
 104	STATUS_ROUTER_HASH,		/* Boolean */
 105	STATUS_UCP,			/* Boolean */
 106	STATUS_ROUTER_TABLE,
 107	STATUS_ROUTER_RULE_INDEX,
 108	STATUS_NAT_HIT,			/* Boolean */
 109	STATUS_NAT_INDEX,
 110	STATUS_NAT_TYPE,		/* enum ipa_nat_type */
 111	STATUS_TAG_LOW32,		/* Low-order 32 bits of 48-bit tag */
 112	STATUS_TAG_HIGH16,		/* High-order 16 bits of 48-bit tag */
 113	STATUS_SEQUENCE,
 114	STATUS_TIME_OF_DAY,
 115	STATUS_HEADER_LOCAL,		/* Boolean */
 116	STATUS_HEADER_OFFSET,
 117	STATUS_FRAG_HIT,		/* Boolean */
 118	STATUS_FRAG_RULE_INDEX,
 119};
 120
 121/* Size in bytes of an IPA packet status structure */
 122#define IPA_STATUS_SIZE			sizeof(__le32[8])
 123
 124/* IPA status structure decoder; looks up field values for a structure */
 125static u32 ipa_status_extract(struct ipa *ipa, const void *data,
 126			      enum ipa_status_field_id field)
 127{
 128	enum ipa_version version = ipa->version;
 129	const __le32 *word = data;
 130
 131	switch (field) {
 132	case STATUS_OPCODE:
 133		return le32_get_bits(word[0], GENMASK(7, 0));
 134	case STATUS_EXCEPTION:
 135		return le32_get_bits(word[0], GENMASK(15, 8));
 136	case STATUS_MASK:
 137		return le32_get_bits(word[0], GENMASK(31, 16));
 138	case STATUS_LENGTH:
 139		return le32_get_bits(word[1], GENMASK(15, 0));
 140	case STATUS_SRC_ENDPOINT:
 141		if (version < IPA_VERSION_5_0)
 142			return le32_get_bits(word[1], GENMASK(20, 16));
 143		return le32_get_bits(word[1], GENMASK(23, 16));
 144	/* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */
 145	/* Status word 1, bits 24-26 are reserved (IPA v5.0+) */
 146	case STATUS_DST_ENDPOINT:
 147		if (version < IPA_VERSION_5_0)
 148			return le32_get_bits(word[1], GENMASK(28, 24));
 149		return le32_get_bits(word[7], GENMASK(23, 16));
 150	/* Status word 1, bits 29-31 are reserved */
 151	case STATUS_METADATA:
 152		return le32_to_cpu(word[2]);
 153	case STATUS_FILTER_LOCAL:
 154		return le32_get_bits(word[3], GENMASK(0, 0));
 155	case STATUS_FILTER_HASH:
 156		return le32_get_bits(word[3], GENMASK(1, 1));
 157	case STATUS_FILTER_GLOBAL:
 158		return le32_get_bits(word[3], GENMASK(2, 2));
 159	case STATUS_FILTER_RETAIN:
 160		return le32_get_bits(word[3], GENMASK(3, 3));
 161	case STATUS_FILTER_RULE_INDEX:
 162		return le32_get_bits(word[3], GENMASK(13, 4));
 163	/* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */
 164	case STATUS_ROUTER_LOCAL:
 165		if (version < IPA_VERSION_5_0)
 166			return le32_get_bits(word[3], GENMASK(14, 14));
 167		return le32_get_bits(word[1], GENMASK(27, 27));
 168	case STATUS_ROUTER_HASH:
 169		if (version < IPA_VERSION_5_0)
 170			return le32_get_bits(word[3], GENMASK(15, 15));
 171		return le32_get_bits(word[1], GENMASK(28, 28));
 172	case STATUS_UCP:
 173		if (version < IPA_VERSION_5_0)
 174			return le32_get_bits(word[3], GENMASK(16, 16));
 175		return le32_get_bits(word[7], GENMASK(31, 31));
 176	case STATUS_ROUTER_TABLE:
 177		if (version < IPA_VERSION_5_0)
 178			return le32_get_bits(word[3], GENMASK(21, 17));
 179		return le32_get_bits(word[3], GENMASK(21, 14));
 180	case STATUS_ROUTER_RULE_INDEX:
 181		return le32_get_bits(word[3], GENMASK(31, 22));
 182	case STATUS_NAT_HIT:
 183		return le32_get_bits(word[4], GENMASK(0, 0));
 184	case STATUS_NAT_INDEX:
 185		return le32_get_bits(word[4], GENMASK(13, 1));
 186	case STATUS_NAT_TYPE:
 187		return le32_get_bits(word[4], GENMASK(15, 14));
 188	case STATUS_TAG_LOW32:
 189		return le32_get_bits(word[4], GENMASK(31, 16)) |
 190			(le32_get_bits(word[5], GENMASK(15, 0)) << 16);
 191	case STATUS_TAG_HIGH16:
 192		return le32_get_bits(word[5], GENMASK(31, 16));
 193	case STATUS_SEQUENCE:
 194		return le32_get_bits(word[6], GENMASK(7, 0));
 195	case STATUS_TIME_OF_DAY:
 196		return le32_get_bits(word[6], GENMASK(31, 8));
 197	case STATUS_HEADER_LOCAL:
 198		return le32_get_bits(word[7], GENMASK(0, 0));
 199	case STATUS_HEADER_OFFSET:
 200		return le32_get_bits(word[7], GENMASK(10, 1));
 201	case STATUS_FRAG_HIT:
 202		return le32_get_bits(word[7], GENMASK(11, 11));
 203	case STATUS_FRAG_RULE_INDEX:
 204		return le32_get_bits(word[7], GENMASK(15, 12));
 205	/* Status word 7, bits 16-30 are reserved */
 206	/* Status word 7, bit 31 is reserved (not IPA v5.0+) */
 207	default:
 208		WARN(true, "%s: bad field_id %u\n", __func__, field);
 209		return 0;
 210	}
 211}
 212
 213/* Compute the aggregation size value to use for a given buffer size */
 214static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
 215{
 216	/* A hard aggregation limit will not be crossed; aggregation closes
 217	 * if saving incoming data would cross the hard byte limit boundary.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 218	 *
 219	 * With a soft limit, aggregation closes *after* the size boundary
 220	 * has been crossed.  In that case the limit must leave enough space
 221	 * after that limit to receive a full MTU of data plus overhead.
 222	 */
 223	if (!aggr_hard_limit)
 224		rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
 225
 226	/* The byte limit is encoded as a number of kilobytes */
 227
 228	return rx_buffer_size / SZ_1K;
 
 
 
 229}
 230
 231static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
 232			    const struct ipa_gsi_endpoint_data *all_data,
 233			    const struct ipa_gsi_endpoint_data *data)
 234{
 235	const struct ipa_gsi_endpoint_data *other_data;
 
 236	enum ipa_endpoint_name other_name;
 237	struct device *dev = ipa->dev;
 238
 239	if (ipa_gsi_endpoint_data_empty(data))
 240		return true;
 241
 242	if (!data->toward_ipa) {
 243		const struct ipa_endpoint_rx *rx_config;
 244		const struct reg *reg;
 245		u32 buffer_size;
 246		u32 aggr_size;
 247		u32 limit;
 248
 249		if (data->endpoint.filter_support) {
 250			dev_err(dev, "filtering not supported for "
 251					"RX endpoint %u\n",
 252				data->endpoint_id);
 253			return false;
 254		}
 255
 256		/* Nothing more to check for non-AP RX */
 257		if (data->ee_id != GSI_EE_AP)
 258			return true;
 259
 260		rx_config = &data->endpoint.config.rx;
 261
 262		/* The buffer size must hold an MTU plus overhead */
 263		buffer_size = rx_config->buffer_size;
 264		limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
 265		if (buffer_size < limit) {
 266			dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
 267				data->endpoint_id, buffer_size, limit);
 268			return false;
 269		}
 270
 271		if (!data->endpoint.config.aggregation) {
 272			bool result = true;
 273
 274			/* No aggregation; check for bogus aggregation data */
 275			if (rx_config->aggr_time_limit) {
 276				dev_err(dev,
 277					"time limit with no aggregation for RX endpoint %u\n",
 278					data->endpoint_id);
 279				result = false;
 280			}
 281
 282			if (rx_config->aggr_hard_limit) {
 283				dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
 284					data->endpoint_id);
 285				result = false;
 286			}
 287
 288			if (rx_config->aggr_close_eof) {
 289				dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
 290					data->endpoint_id);
 291				result = false;
 292			}
 293
 294			return result;	/* Nothing more to check */
 295		}
 296
 297		/* For an endpoint supporting receive aggregation, the byte
 298		 * limit defines the point at which aggregation closes.  This
 299		 * check ensures the receive buffer size doesn't result in a
 300		 * limit that exceeds what's representable in the aggregation
 301		 * byte limit field.
 302		 */
 303		aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
 304					     rx_config->aggr_hard_limit);
 305		reg = ipa_reg(ipa, ENDP_INIT_AGGR);
 306
 307		limit = reg_field_max(reg, BYTE_LIMIT);
 308		if (aggr_size > limit) {
 309			dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
 310				data->endpoint_id, aggr_size, limit);
 311
 312			return false;
 313		}
 314
 315		return true;	/* Nothing more to check for RX */
 316	}
 317
 318	/* Starting with IPA v4.5 sequencer replication is obsolete */
 319	if (ipa->version >= IPA_VERSION_4_5) {
 320		if (data->endpoint.config.tx.seq_rep_type) {
 321			dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
 322				data->endpoint_id);
 323			return false;
 324		}
 325	}
 326
 327	if (data->endpoint.config.status_enable) {
 328		other_name = data->endpoint.config.tx.status_endpoint;
 329		if (other_name >= count) {
 330			dev_err(dev, "status endpoint name %u out of range "
 331					"for endpoint %u\n",
 332				other_name, data->endpoint_id);
 333			return false;
 334		}
 335
 336		/* Status endpoint must be defined... */
 337		other_data = &all_data[other_name];
 338		if (ipa_gsi_endpoint_data_empty(other_data)) {
 339			dev_err(dev, "DMA endpoint name %u undefined "
 340					"for endpoint %u\n",
 341				other_name, data->endpoint_id);
 342			return false;
 343		}
 344
 345		/* ...and has to be an RX endpoint... */
 346		if (other_data->toward_ipa) {
 347			dev_err(dev,
 348				"status endpoint for endpoint %u not RX\n",
 349				data->endpoint_id);
 350			return false;
 351		}
 352
 353		/* ...and if it's to be an AP endpoint... */
 354		if (other_data->ee_id == GSI_EE_AP) {
 355			/* ...make sure it has status enabled. */
 356			if (!other_data->endpoint.config.status_enable) {
 357				dev_err(dev,
 358					"status not enabled for endpoint %u\n",
 359					other_data->endpoint_id);
 360				return false;
 361			}
 362		}
 363	}
 364
 365	if (data->endpoint.config.dma_mode) {
 366		other_name = data->endpoint.config.dma_endpoint;
 367		if (other_name >= count) {
 368			dev_err(dev, "DMA endpoint name %u out of range "
 369					"for endpoint %u\n",
 370				other_name, data->endpoint_id);
 371			return false;
 372		}
 373
 374		other_data = &all_data[other_name];
 375		if (ipa_gsi_endpoint_data_empty(other_data)) {
 376			dev_err(dev, "DMA endpoint name %u undefined "
 377					"for endpoint %u\n",
 378				other_name, data->endpoint_id);
 379			return false;
 380		}
 381	}
 382
 383	return true;
 384}
 385
 386/* Validate endpoint configuration data.  Return max defined endpoint ID */
 387static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
 388			    const struct ipa_gsi_endpoint_data *data)
 389{
 390	const struct ipa_gsi_endpoint_data *dp = data;
 391	struct device *dev = ipa->dev;
 392	enum ipa_endpoint_name name;
 393	u32 max;
 
 394
 395	if (count > IPA_ENDPOINT_COUNT) {
 396		dev_err(dev, "too many endpoints specified (%u > %u)\n",
 397			count, IPA_ENDPOINT_COUNT);
 398		return 0;
 399	}
 400
 401	/* Make sure needed endpoints have defined data */
 402	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
 403		dev_err(dev, "command TX endpoint not defined\n");
 404		return 0;
 405	}
 406	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
 407		dev_err(dev, "LAN RX endpoint not defined\n");
 408		return 0;
 409	}
 410	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
 411		dev_err(dev, "AP->modem TX endpoint not defined\n");
 412		return 0;
 413	}
 414	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
 415		dev_err(dev, "AP<-modem RX endpoint not defined\n");
 416		return 0;
 417	}
 418
 419	max = 0;
 420	for (name = 0; name < count; name++, dp++) {
 421		if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
 422			return 0;
 423		max = max_t(u32, max, dp->endpoint_id);
 424	}
 
 
 
 425
 426	return max;
 
 
 
 427}
 428
 
 
 429/* Allocate a transaction to use on a non-command endpoint */
 430static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
 431						  u32 tre_count)
 432{
 433	struct gsi *gsi = &endpoint->ipa->gsi;
 434	u32 channel_id = endpoint->channel_id;
 435	enum dma_data_direction direction;
 436
 437	direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 438
 439	return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
 440}
 441
 442/* suspend_delay represents suspend for RX, delay for TX endpoints.
 443 * Note that suspend is not supported starting with IPA v4.0, and
 444 * delay mode should not be used starting with IPA v4.2.
 445 */
 446static bool
 447ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
 448{
 
 449	struct ipa *ipa = endpoint->ipa;
 450	const struct reg *reg;
 451	u32 field_id;
 452	u32 offset;
 453	bool state;
 454	u32 mask;
 455	u32 val;
 456
 457	if (endpoint->toward_ipa)
 458		WARN_ON(ipa->version >= IPA_VERSION_4_2);
 459	else
 460		WARN_ON(ipa->version >= IPA_VERSION_4_0);
 
 
 
 
 
 461
 462	reg = ipa_reg(ipa, ENDP_INIT_CTRL);
 463	offset = reg_n_offset(reg, endpoint->endpoint_id);
 464	val = ioread32(ipa->reg_virt + offset);
 465
 466	field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
 467	mask = reg_bit(reg, field_id);
 468
 469	state = !!(val & mask);
 470
 471	/* Don't bother if it's already in the requested state */
 
 472	if (suspend_delay != state) {
 473		val ^= mask;
 474		iowrite32(val, ipa->reg_virt + offset);
 475	}
 476
 477	return state;
 478}
 479
 480/* We don't care what the previous state was for delay mode */
 481static void
 482ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
 483{
 484	/* Delay mode should not be used for IPA v4.2+ */
 485	WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
 486	WARN_ON(!endpoint->toward_ipa);
 487
 488	(void)ipa_endpoint_init_ctrl(endpoint, enable);
 
 
 489}
 490
 491static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
 492{
 493	u32 endpoint_id = endpoint->endpoint_id;
 494	struct ipa *ipa = endpoint->ipa;
 495	u32 unit = endpoint_id / 32;
 496	const struct reg *reg;
 497	u32 val;
 498
 499	WARN_ON(!test_bit(endpoint_id, ipa->available));
 500
 501	reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
 502	val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
 503
 504	return !!(val & BIT(endpoint_id % 32));
 505}
 506
 507static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
 508{
 509	u32 endpoint_id = endpoint->endpoint_id;
 510	u32 mask = BIT(endpoint_id % 32);
 511	struct ipa *ipa = endpoint->ipa;
 512	u32 unit = endpoint_id / 32;
 513	const struct reg *reg;
 514
 515	WARN_ON(!test_bit(endpoint_id, ipa->available));
 516
 517	reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
 518	iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit));
 519}
 520
 521/**
 522 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
 523 * @endpoint:	Endpoint on which to emulate a suspend
 524 *
 525 *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
 526 *  with an open aggregation frame.  This is to work around a hardware
 527 *  issue in IPA version 3.5.1 where the suspend interrupt will not be
 528 *  generated when it should be.
 529 */
 530static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
 531{
 532	struct ipa *ipa = endpoint->ipa;
 533
 534	if (!endpoint->config.aggregation)
 535		return;
 536
 537	/* Nothing to do if the endpoint doesn't have aggregation open */
 538	if (!ipa_endpoint_aggr_active(endpoint))
 539		return;
 540
 541	/* Force close aggregation */
 542	ipa_endpoint_force_close(endpoint);
 543
 544	ipa_interrupt_simulate_suspend(ipa->interrupt);
 545}
 546
 547/* Returns previous suspend state (true means suspend was enabled) */
 548static bool
 549ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
 550{
 551	bool suspended;
 552
 553	if (endpoint->ipa->version >= IPA_VERSION_4_0)
 554		return enable;	/* For IPA v4.0+, no change made */
 555
 556	WARN_ON(endpoint->toward_ipa);
 557
 558	suspended = ipa_endpoint_init_ctrl(endpoint, enable);
 559
 560	/* A client suspended with an open aggregation frame will not
 561	 * generate a SUSPEND IPA interrupt.  If enabling suspend, have
 562	 * ipa_endpoint_suspend_aggr() handle this.
 563	 */
 564	if (enable && !suspended)
 565		ipa_endpoint_suspend_aggr(endpoint);
 566
 567	return suspended;
 568}
 569
 570/* Put all modem RX endpoints into suspend mode, and stop transmission
 571 * on all modem TX endpoints.  Prior to IPA v4.2, endpoint DELAY mode is
 572 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
 573 * control instead.
 574 */
 575void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
 576{
 577	u32 endpoint_id = 0;
 
 
 
 
 578
 579	while (endpoint_id < ipa->endpoint_count) {
 580		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
 581
 582		if (endpoint->ee_id != GSI_EE_MODEM)
 583			continue;
 584
 585		if (!endpoint->toward_ipa)
 586			(void)ipa_endpoint_program_suspend(endpoint, enable);
 587		else if (ipa->version < IPA_VERSION_4_2)
 588			ipa_endpoint_program_delay(endpoint, enable);
 589		else
 590			gsi_modem_channel_flow_control(&ipa->gsi,
 591						       endpoint->channel_id,
 592						       enable);
 593	}
 594}
 595
 596/* Reset all modem endpoints to use the default exception endpoint */
 597int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
 598{
 
 599	struct gsi_trans *trans;
 600	u32 endpoint_id;
 601	u32 count;
 602
 603	/* We need one command per modem TX endpoint, plus the commands
 604	 * that clear the pipeline.
 
 
 605	 */
 606	count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
 607	trans = ipa_cmd_trans_alloc(ipa, count);
 608	if (!trans) {
 609		dev_err(ipa->dev,
 610			"no transaction to reset modem exception endpoints\n");
 611		return -EBUSY;
 612	}
 613
 614	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
 
 615		struct ipa_endpoint *endpoint;
 616		const struct reg *reg;
 617		u32 offset;
 618
 
 
 619		/* We only reset modem TX endpoints */
 620		endpoint = &ipa->endpoint[endpoint_id];
 621		if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
 622			continue;
 623
 624		reg = ipa_reg(ipa, ENDP_STATUS);
 625		offset = reg_n_offset(reg, endpoint_id);
 626
 627		/* Value written is 0, and all bits are updated.  That
 628		 * means status is disabled on the endpoint, and as a
 629		 * result all other fields in the register are ignored.
 630		 */
 631		ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
 632	}
 633
 634	ipa_cmd_pipeline_clear_add(trans);
 635
 
 636	gsi_trans_commit_wait(trans);
 637
 638	ipa_cmd_pipeline_clear_wait(ipa);
 639
 640	return 0;
 641}
 642
 643static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
 644{
 645	u32 endpoint_id = endpoint->endpoint_id;
 646	struct ipa *ipa = endpoint->ipa;
 647	enum ipa_cs_offload_en enabled;
 648	const struct reg *reg;
 649	u32 val = 0;
 650
 651	reg = ipa_reg(ipa, ENDP_INIT_CFG);
 652	/* FRAG_OFFLOAD_EN is 0 */
 653	if (endpoint->config.checksum) {
 654		enum ipa_version version = ipa->version;
 655
 656		if (endpoint->toward_ipa) {
 657			u32 off;
 658
 
 
 659			/* Checksum header offset is in 4-byte units */
 660			off = sizeof(struct rmnet_map_header) / sizeof(u32);
 661			val |= reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
 662
 663			enabled = version < IPA_VERSION_4_5
 664					? IPA_CS_OFFLOAD_UL
 665					: IPA_CS_OFFLOAD_INLINE;
 666		} else {
 667			enabled = version < IPA_VERSION_4_5
 668					? IPA_CS_OFFLOAD_DL
 669					: IPA_CS_OFFLOAD_INLINE;
 670		}
 671	} else {
 672		enabled = IPA_CS_OFFLOAD_NONE;
 
 673	}
 674	val |= reg_encode(reg, CS_OFFLOAD_EN, enabled);
 675	/* CS_GEN_QMB_MASTER_SEL is 0 */
 676
 677	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
 678}
 679
 680static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
 681{
 682	u32 endpoint_id = endpoint->endpoint_id;
 683	struct ipa *ipa = endpoint->ipa;
 684	const struct reg *reg;
 685	u32 val;
 686
 687	if (!endpoint->toward_ipa)
 688		return;
 689
 690	reg = ipa_reg(ipa, ENDP_INIT_NAT);
 691	val = reg_encode(reg, NAT_EN, IPA_NAT_TYPE_BYPASS);
 692
 693	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
 694}
 695
 696static u32
 697ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
 698{
 699	u32 header_size = sizeof(struct rmnet_map_header);
 700
 701	/* Without checksum offload, we just have the MAP header */
 702	if (!endpoint->config.checksum)
 703		return header_size;
 704
 705	if (version < IPA_VERSION_4_5) {
 706		/* Checksum header inserted for AP TX endpoints only */
 707		if (endpoint->toward_ipa)
 708			header_size += sizeof(struct rmnet_map_ul_csum_header);
 709	} else {
 710		/* Checksum header is used in both directions */
 711		header_size += sizeof(struct rmnet_map_v5_csum_header);
 712	}
 713
 714	return header_size;
 715}
 716
 717/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
 718static u32 ipa_header_size_encode(enum ipa_version version,
 719				  const struct reg *reg, u32 header_size)
 720{
 721	u32 field_max = reg_field_max(reg, HDR_LEN);
 722	u32 val;
 723
 724	/* We know field_max can be used as a mask (2^n - 1) */
 725	val = reg_encode(reg, HDR_LEN, header_size & field_max);
 726	if (version < IPA_VERSION_4_5) {
 727		WARN_ON(header_size > field_max);
 728		return val;
 729	}
 730
 731	/* IPA v4.5 adds a few more most-significant bits */
 732	header_size >>= hweight32(field_max);
 733	WARN_ON(header_size > reg_field_max(reg, HDR_LEN_MSB));
 734	val |= reg_encode(reg, HDR_LEN_MSB, header_size);
 735
 736	return val;
 737}
 738
 739/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
 740static u32 ipa_metadata_offset_encode(enum ipa_version version,
 741				      const struct reg *reg, u32 offset)
 742{
 743	u32 field_max = reg_field_max(reg, HDR_OFST_METADATA);
 744	u32 val;
 745
 746	/* We know field_max can be used as a mask (2^n - 1) */
 747	val = reg_encode(reg, HDR_OFST_METADATA, offset);
 748	if (version < IPA_VERSION_4_5) {
 749		WARN_ON(offset > field_max);
 750		return val;
 751	}
 752
 753	/* IPA v4.5 adds a few more most-significant bits */
 754	offset >>= hweight32(field_max);
 755	WARN_ON(offset > reg_field_max(reg, HDR_OFST_METADATA_MSB));
 756	val |= reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
 757
 758	return val;
 759}
 760
 761/**
 762 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
 763 * @endpoint:	Endpoint pointer
 764 *
 765 * We program QMAP endpoints so each packet received is preceded by a QMAP
 766 * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
 767 * packet size field, and we have the IPA hardware populate both for each
 768 * received packet.  The header is configured (in the HDR_EXT register)
 769 * to use big endian format.
 770 *
 771 * The packet size is written into the QMAP header's pkt_len field.  That
 772 * location is defined here using the HDR_OFST_PKT_SIZE field.
 773 *
 774 * The mux_id comes from a 4-byte metadata value supplied with each packet
 775 * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
 776 * value that we want, in its low-order byte.  A bitmask defined in the
 777 * endpoint's METADATA_MASK register defines which byte within the modem
 778 * metadata contains the mux_id.  And the OFST_METADATA field programmed
 779 * here indicates where the extracted byte should be placed within the QMAP
 780 * header.
 781 */
 782static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
 783{
 784	u32 endpoint_id = endpoint->endpoint_id;
 785	struct ipa *ipa = endpoint->ipa;
 786	const struct reg *reg;
 787	u32 val = 0;
 788
 789	reg = ipa_reg(ipa, ENDP_INIT_HDR);
 790	if (endpoint->config.qmap) {
 791		enum ipa_version version = ipa->version;
 792		size_t header_size;
 793
 794		header_size = ipa_qmap_header_size(version, endpoint);
 795		val = ipa_header_size_encode(version, reg, header_size);
 
 
 796
 797		/* Define how to fill fields in a received QMAP header */
 798		if (!endpoint->toward_ipa) {
 799			u32 off;     /* Field offset within header */
 800
 801			/* Where IPA will write the metadata value */
 802			off = offsetof(struct rmnet_map_header, mux_id);
 803			val |= ipa_metadata_offset_encode(version, reg, off);
 804
 805			/* Where IPA will write the length */
 806			off = offsetof(struct rmnet_map_header, pkt_len);
 807			/* Upper bits are stored in HDR_EXT with IPA v4.5 */
 808			if (version >= IPA_VERSION_4_5)
 809				off &= reg_field_max(reg, HDR_OFST_PKT_SIZE);
 810
 811			val |= reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
 812			val |= reg_encode(reg, HDR_OFST_PKT_SIZE, off);
 813		}
 814		/* For QMAP TX, metadata offset is 0 (modem assumes this) */
 815		val |= reg_bit(reg, HDR_OFST_METADATA_VALID);
 816
 817		/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
 818		/* HDR_A5_MUX is 0 */
 819		/* HDR_LEN_INC_DEAGG_HDR is 0 */
 820		/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
 821	}
 822
 823	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
 824}
 825
 826static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
 827{
 828	u32 pad_align = endpoint->config.rx.pad_align;
 829	u32 endpoint_id = endpoint->endpoint_id;
 830	struct ipa *ipa = endpoint->ipa;
 831	const struct reg *reg;
 832	u32 val = 0;
 833
 834	reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
 835	if (endpoint->config.qmap) {
 836		/* We have a header, so we must specify its endianness */
 837		val |= reg_bit(reg, HDR_ENDIANNESS);	/* big endian */
 838
 839		/* A QMAP header contains a 6 bit pad field at offset 0.
 840		 * The RMNet driver assumes this field is meaningful in
 841		 * packets it receives, and assumes the header's payload
 842		 * length includes that padding.  The RMNet driver does
 843		 * *not* pad packets it sends, however, so the pad field
 844		 * (although 0) should be ignored.
 845		 */
 846		if (!endpoint->toward_ipa) {
 847			val |= reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
 848			/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
 849			val |= reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
 850			/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
 851		}
 852	}
 853
 854	/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
 855	if (!endpoint->toward_ipa)
 856		val |= reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
 857
 858	/* IPA v4.5 adds some most-significant bits to a few fields,
 859	 * two of which are defined in the HDR (not HDR_EXT) register.
 860	 */
 861	if (ipa->version >= IPA_VERSION_4_5) {
 862		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
 863		if (endpoint->config.qmap && !endpoint->toward_ipa) {
 864			u32 mask = reg_field_max(reg, HDR_OFST_PKT_SIZE);
 865			u32 off;     /* Field offset within header */
 866
 867			off = offsetof(struct rmnet_map_header, pkt_len);
 868			/* Low bits are in the ENDP_INIT_HDR register */
 869			off >>= hweight32(mask);
 870			val |= reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
 871			/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
 872		}
 873	}
 874
 875	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
 876}
 877
 
 878static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
 879{
 880	u32 endpoint_id = endpoint->endpoint_id;
 881	struct ipa *ipa = endpoint->ipa;
 882	const struct reg *reg;
 883	u32 val = 0;
 884	u32 offset;
 885
 886	if (endpoint->toward_ipa)
 887		return;		/* Register not valid for TX endpoints */
 888
 889	reg = ipa_reg(ipa,  ENDP_INIT_HDR_METADATA_MASK);
 890	offset = reg_n_offset(reg, endpoint_id);
 891
 892	/* Note that HDR_ENDIANNESS indicates big endian header fields */
 893	if (endpoint->config.qmap)
 894		val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
 895
 896	iowrite32(val, ipa->reg_virt + offset);
 897}
 898
 899static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
 900{
 901	struct ipa *ipa = endpoint->ipa;
 902	const struct reg *reg;
 903	u32 offset;
 904	u32 val;
 905
 906	if (!endpoint->toward_ipa)
 907		return;		/* Register not valid for RX endpoints */
 908
 909	reg = ipa_reg(ipa, ENDP_INIT_MODE);
 910	if (endpoint->config.dma_mode) {
 911		enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
 912		u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
 
 913
 914		val = reg_encode(reg, ENDP_MODE, IPA_DMA);
 915		val |= reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
 916	} else {
 917		val = reg_encode(reg, ENDP_MODE, IPA_BASIC);
 918	}
 919	/* All other bits unspecified (and 0) */
 920
 921	offset = reg_n_offset(reg, endpoint->endpoint_id);
 922	iowrite32(val, ipa->reg_virt + offset);
 923}
 924
 925/* For IPA v4.5+, times are expressed using Qtime.  A time is represented
 926 * at one of several available granularities, which are configured in
 927 * ipa_qtime_config().  Three (or, starting with IPA v5.0, four) pulse
 928 * generators are set up with different "tick" periods.  A Qtime value
 929 * encodes a tick count along with an indication of a pulse generator
 930 * (which has a fixed tick period).  Two pulse generators are always
 931 * available to the AP; a third is available starting with IPA v5.0.
 932 * This function determines which pulse generator most accurately
 933 * represents the time period provided, and returns the tick count to
 934 * use to represent that time.
 935 */
 936static u32
 937ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
 938{
 939	u32 which = 0;
 940	u32 ticks;
 941
 942	/* Pulse generator 0 has 100 microsecond granularity */
 943	ticks = DIV_ROUND_CLOSEST(microseconds, 100);
 944	if (ticks <= max)
 945		goto out;
 946
 947	/* Pulse generator 1 has millisecond granularity */
 948	which = 1;
 949	ticks = DIV_ROUND_CLOSEST(microseconds, 1000);
 950	if (ticks <= max)
 951		goto out;
 952
 953	if (ipa->version >= IPA_VERSION_5_0) {
 954		/* Pulse generator 2 has 10 millisecond granularity */
 955		which = 2;
 956		ticks = DIV_ROUND_CLOSEST(microseconds, 100);
 957	}
 958	WARN_ON(ticks > max);
 959out:
 960	*select = which;
 961
 962	return ticks;
 963}
 964
 965/* Encode the aggregation timer limit (microseconds) based on IPA version */
 966static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg,
 967				  u32 microseconds)
 968{
 969	u32 ticks;
 970	u32 max;
 971
 972	if (!microseconds)
 973		return 0;	/* Nothing to compute if time limit is 0 */
 974
 975	max = reg_field_max(reg, TIME_LIMIT);
 976	if (ipa->version >= IPA_VERSION_4_5) {
 977		u32 select;
 978
 979		ticks = ipa_qtime_val(ipa, microseconds, max, &select);
 980
 981		return reg_encode(reg, AGGR_GRAN_SEL, select) |
 982		       reg_encode(reg, TIME_LIMIT, ticks);
 983	}
 984
 985	/* We program aggregation granularity in ipa_hardware_config() */
 986	ticks = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
 987	WARN(ticks > max, "aggr_time_limit too large (%u > %u usec)\n",
 988	     microseconds, max * IPA_AGGR_GRANULARITY);
 989
 990	return reg_encode(reg, TIME_LIMIT, ticks);
 991}
 992
 993static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
 994{
 995	u32 endpoint_id = endpoint->endpoint_id;
 996	struct ipa *ipa = endpoint->ipa;
 997	const struct reg *reg;
 998	u32 val = 0;
 999
1000	reg = ipa_reg(ipa, ENDP_INIT_AGGR);
1001	if (endpoint->config.aggregation) {
1002		if (!endpoint->toward_ipa) {
1003			const struct ipa_endpoint_rx *rx_config;
1004			u32 buffer_size;
1005			u32 limit;
1006
1007			rx_config = &endpoint->config.rx;
1008			val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
1009			val |= reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
1010
1011			buffer_size = rx_config->buffer_size;
1012			limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
1013						 rx_config->aggr_hard_limit);
1014			val |= reg_encode(reg, BYTE_LIMIT, limit);
1015
1016			limit = rx_config->aggr_time_limit;
1017			val |= aggr_time_limit_encode(ipa, reg, limit);
 
 
 
 
1018
1019			/* AGGR_PKT_LIMIT is 0 (unlimited) */
1020
1021			if (rx_config->aggr_close_eof)
1022				val |= reg_bit(reg, SW_EOF_ACTIVE);
 
1023		} else {
1024			val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
1025			val |= reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
 
1026			/* other fields ignored */
1027		}
1028		/* AGGR_FORCE_CLOSE is 0 */
1029		/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
1030	} else {
1031		val |= reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
1032		/* other fields ignored */
1033	}
1034
1035	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1036}
1037
1038/* The head-of-line blocking timer is defined as a tick count.  For
1039 * IPA version 4.5 the tick count is based on the Qtimer, which is
1040 * derived from the 19.2 MHz SoC XO clock.  For older IPA versions
1041 * each tick represents 128 cycles of the IPA core clock.
1042 *
1043 * Return the encoded value representing the timeout period provided
1044 * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
1045 */
1046static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg,
1047				  u32 microseconds)
1048{
1049	u32 width;
1050	u32 scale;
1051	u64 ticks;
1052	u64 rate;
1053	u32 high;
1054	u32 val;
1055
1056	if (!microseconds)
1057		return 0;	/* Nothing to compute if timer period is 0 */
1058
1059	if (ipa->version >= IPA_VERSION_4_5) {
1060		u32 max = reg_field_max(reg, TIMER_LIMIT);
1061		u32 select;
1062		u32 ticks;
1063
1064		ticks = ipa_qtime_val(ipa, microseconds, max, &select);
1065
1066		return reg_encode(reg, TIMER_GRAN_SEL, 1) |
1067		       reg_encode(reg, TIMER_LIMIT, ticks);
1068	}
1069
1070	/* Use 64 bit arithmetic to avoid overflow */
1071	rate = ipa_core_clock_rate(ipa);
1072	ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
 
 
1073
1074	/* We still need the result to fit into the field */
1075	WARN_ON(ticks > reg_field_max(reg, TIMER_BASE_VALUE));
1076
1077	/* IPA v3.5.1 through v4.1 just record the tick count */
1078	if (ipa->version < IPA_VERSION_4_2)
1079		return reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
1080
1081	/* For IPA v4.2, the tick count is represented by base and
1082	 * scale fields within the 32-bit timer register, where:
1083	 *     ticks = base << scale;
1084	 * The best precision is achieved when the base value is as
1085	 * large as possible.  Find the highest set bit in the tick
1086	 * count, and extract the number of bits in the base field
1087	 * such that high bit is included.
1088	 */
1089	high = fls(ticks);		/* 1..32 (or warning above) */
1090	width = hweight32(reg_fmask(reg, TIMER_BASE_VALUE));
1091	scale = high > width ? high - width : 0;
1092	if (scale) {
1093		/* If we're scaling, round up to get a closer result */
1094		ticks += 1 << (scale - 1);
1095		/* High bit was set, so rounding might have affected it */
1096		if (fls(ticks) != high)
1097			scale++;
1098	}
1099
1100	val = reg_encode(reg, TIMER_SCALE, scale);
1101	val |= reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
1102
1103	return val;
1104}
1105
1106/* If microseconds is 0, timeout is immediate */
1107static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
1108					      u32 microseconds)
1109{
1110	u32 endpoint_id = endpoint->endpoint_id;
1111	struct ipa *ipa = endpoint->ipa;
1112	const struct reg *reg;
1113	u32 val;
1114
1115	/* This should only be changed when HOL_BLOCK_EN is disabled */
1116	reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
1117	val = hol_block_timer_encode(ipa, reg, microseconds);
1118
1119	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1120}
1121
1122static void
1123ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
1124{
1125	u32 endpoint_id = endpoint->endpoint_id;
1126	struct ipa *ipa = endpoint->ipa;
1127	const struct reg *reg;
1128	u32 offset;
1129	u32 val;
1130
1131	reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
1132	offset = reg_n_offset(reg, endpoint_id);
1133	val = enable ? reg_bit(reg, HOL_BLOCK_EN) : 0;
1134
1135	iowrite32(val, ipa->reg_virt + offset);
1136
1137	/* When enabling, the register must be written twice for IPA v4.5+ */
1138	if (enable && ipa->version >= IPA_VERSION_4_5)
1139		iowrite32(val, ipa->reg_virt + offset);
1140}
1141
1142/* Assumes HOL_BLOCK is in disabled state */
1143static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
1144					       u32 microseconds)
1145{
1146	ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
1147	ipa_endpoint_init_hol_block_en(endpoint, true);
1148}
1149
1150static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
1151{
1152	ipa_endpoint_init_hol_block_en(endpoint, false);
1153}
1154
1155void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
1156{
1157	u32 endpoint_id = 0;
1158
1159	while (endpoint_id < ipa->endpoint_count) {
1160		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
1161
1162		if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
1163			continue;
1164
1165		ipa_endpoint_init_hol_block_disable(endpoint);
1166		ipa_endpoint_init_hol_block_enable(endpoint, 0);
1167	}
1168}
1169
1170static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
1171{
1172	u32 endpoint_id = endpoint->endpoint_id;
1173	struct ipa *ipa = endpoint->ipa;
1174	const struct reg *reg;
1175	u32 val = 0;
1176
1177	if (!endpoint->toward_ipa)
1178		return;		/* Register not valid for RX endpoints */
1179
1180	reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
1181	/* DEAGGR_HDR_LEN is 0 */
1182	/* PACKET_OFFSET_VALID is 0 */
1183	/* PACKET_OFFSET_LOCATION is ignored (not valid) */
1184	/* MAX_PACKET_LEN is 0 (not enforced) */
1185
1186	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1187}
1188
1189static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
1190{
1191	u32 resource_group = endpoint->config.resource_group;
1192	u32 endpoint_id = endpoint->endpoint_id;
1193	struct ipa *ipa = endpoint->ipa;
1194	const struct reg *reg;
1195	u32 val;
1196
1197	reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
1198	val = reg_encode(reg, ENDP_RSRC_GRP, resource_group);
1199
1200	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1201}
1202
1203static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
1204{
1205	u32 endpoint_id = endpoint->endpoint_id;
1206	struct ipa *ipa = endpoint->ipa;
1207	const struct reg *reg;
1208	u32 val;
1209
1210	if (!endpoint->toward_ipa)
1211		return;		/* Register not valid for RX endpoints */
1212
1213	reg = ipa_reg(ipa, ENDP_INIT_SEQ);
1214
1215	/* Low-order byte configures primary packet processing */
1216	val = reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
1217
1218	/* Second byte (if supported) configures replicated packet processing */
1219	if (ipa->version < IPA_VERSION_4_5)
1220		val |= reg_encode(reg, SEQ_REP_TYPE,
1221				  endpoint->config.tx.seq_rep_type);
1222
1223	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1224}
1225
1226/**
1227 * ipa_endpoint_skb_tx() - Transmit a socket buffer
1228 * @endpoint:	Endpoint pointer
1229 * @skb:	Socket buffer to send
1230 *
1231 * Returns:	0 if successful, or a negative error code
1232 */
1233int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
1234{
1235	struct gsi_trans *trans;
1236	u32 nr_frags;
1237	int ret;
1238
1239	/* Make sure source endpoint's TLV FIFO has enough entries to
1240	 * hold the linear portion of the skb and all its fragments.
1241	 * If not, see if we can linearize it before giving up.
1242	 */
1243	nr_frags = skb_shinfo(skb)->nr_frags;
1244	if (nr_frags > endpoint->skb_frag_max) {
1245		if (skb_linearize(skb))
1246			return -E2BIG;
1247		nr_frags = 0;
1248	}
1249
1250	trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
1251	if (!trans)
1252		return -EBUSY;
1253
1254	ret = gsi_trans_skb_add(trans, skb);
1255	if (ret)
1256		goto err_trans_free;
1257	trans->data = skb;	/* transaction owns skb now */
1258
1259	gsi_trans_commit(trans, !netdev_xmit_more());
1260
1261	return 0;
1262
1263err_trans_free:
1264	gsi_trans_free(trans);
1265
1266	return -ENOMEM;
1267}
1268
1269static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
1270{
1271	u32 endpoint_id = endpoint->endpoint_id;
1272	struct ipa *ipa = endpoint->ipa;
1273	const struct reg *reg;
1274	u32 val = 0;
 
 
 
1275
1276	reg = ipa_reg(ipa, ENDP_STATUS);
1277	if (endpoint->config.status_enable) {
1278		val |= reg_bit(reg, STATUS_EN);
1279		if (endpoint->toward_ipa) {
1280			enum ipa_endpoint_name name;
1281			u32 status_endpoint_id;
1282
1283			name = endpoint->config.tx.status_endpoint;
1284			status_endpoint_id = ipa->name_map[name]->endpoint_id;
1285
1286			val |= reg_encode(reg, STATUS_ENDP, status_endpoint_id);
 
1287		}
1288		/* STATUS_LOCATION is 0, meaning IPA packet status
1289		 * precedes the packet (not present for IPA v4.5+)
1290		 */
1291		/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
1292	}
1293
1294	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1295}
1296
1297static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
1298				      struct gsi_trans *trans)
1299{
 
 
1300	struct page *page;
1301	u32 buffer_size;
1302	u32 offset;
1303	u32 len;
1304	int ret;
1305
1306	buffer_size = endpoint->config.rx.buffer_size;
1307	page = dev_alloc_pages(get_order(buffer_size));
1308	if (!page)
1309		return -ENOMEM;
1310
 
 
 
 
1311	/* Offset the buffer to make space for skb headroom */
1312	offset = NET_SKB_PAD;
1313	len = buffer_size - offset;
1314
1315	ret = gsi_trans_page_add(trans, page, len, offset);
1316	if (ret)
1317		put_page(page);
1318	else
1319		trans->data = page;	/* transaction owns page now */
1320
1321	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1322}
1323
1324/**
1325 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1326 * @endpoint:	Endpoint to be replenished
 
1327 *
1328 * The IPA hardware can hold a fixed number of receive buffers for an RX
1329 * endpoint, based on the number of entries in the underlying channel ring
1330 * buffer.  If an endpoint's "backlog" is non-zero, it indicates how many
1331 * more receive buffers can be supplied to the hardware.  Replenishing for
1332 * an endpoint can be disabled, in which case buffers are not queued to
1333 * the hardware.
1334 */
1335static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
1336{
1337	struct gsi_trans *trans;
1338
1339	if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
1340		return;
1341
1342	/* Skip it if it's already active */
1343	if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
 
1344		return;
 
1345
1346	while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
1347		bool doorbell;
1348
1349		if (ipa_endpoint_replenish_one(endpoint, trans))
 
1350			goto try_again_later;
1351
1352
1353		/* Ring the doorbell if we've got a full batch */
1354		doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
1355		gsi_trans_commit(trans, doorbell);
1356	}
1357
1358	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1359
1360	return;
1361
1362try_again_later:
1363	gsi_trans_free(trans);
1364	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
 
 
 
1365
1366	/* Whenever a receive buffer transaction completes we'll try to
1367	 * replenish again.  It's unlikely, but if we fail to supply even
1368	 * one buffer, nothing will trigger another replenish attempt.
1369	 * If the hardware has no receive buffers queued, schedule work to
1370	 * try replenishing again.
1371	 */
1372	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
 
1373		schedule_delayed_work(&endpoint->replenish_work,
1374				      msecs_to_jiffies(1));
1375}
1376
1377static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1378{
1379	set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
 
 
 
 
 
 
1380
1381	/* Start replenishing if hardware currently has no buffers */
1382	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1383		ipa_endpoint_replenish(endpoint);
 
1384}
1385
1386static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1387{
1388	clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
 
 
 
 
1389}
1390
1391static void ipa_endpoint_replenish_work(struct work_struct *work)
1392{
1393	struct delayed_work *dwork = to_delayed_work(work);
1394	struct ipa_endpoint *endpoint;
1395
1396	endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1397
1398	ipa_endpoint_replenish(endpoint);
1399}
1400
1401static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1402				  void *data, u32 len, u32 extra)
1403{
1404	struct sk_buff *skb;
1405
1406	if (!endpoint->netdev)
1407		return;
1408
1409	skb = __dev_alloc_skb(len, GFP_ATOMIC);
1410	if (skb) {
1411		/* Copy the data into the socket buffer and receive it */
1412		skb_put(skb, len);
1413		memcpy(skb->data, data, len);
1414		skb->truesize += extra;
1415	}
1416
1417	ipa_modem_skb_rx(endpoint->netdev, skb);
 
 
 
 
1418}
1419
1420static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1421				   struct page *page, u32 len)
1422{
1423	u32 buffer_size = endpoint->config.rx.buffer_size;
1424	struct sk_buff *skb;
1425
1426	/* Nothing to do if there's no netdev */
1427	if (!endpoint->netdev)
1428		return false;
1429
1430	WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
1431
1432	skb = build_skb(page_address(page), buffer_size);
1433	if (skb) {
1434		/* Reserve the headroom and account for the data */
1435		skb_reserve(skb, NET_SKB_PAD);
1436		skb_put(skb, len);
1437	}
1438
1439	/* Receive the buffer (or record drop if unable to build it) */
1440	ipa_modem_skb_rx(endpoint->netdev, skb);
1441
1442	return skb != NULL;
1443}
1444
1445 /* The format of an IPA packet status structure is the same for several
1446  * status types (opcodes).  Other types aren't currently supported.
 
1447 */
1448static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1449{
1450	switch (opcode) {
1451	case IPA_STATUS_OPCODE_PACKET:
1452	case IPA_STATUS_OPCODE_DROPPED_PACKET:
1453	case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1454	case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1455		return true;
1456	default:
1457		return false;
1458	}
1459}
1460
1461static bool
1462ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, const void *data)
1463{
1464	struct ipa *ipa = endpoint->ipa;
1465	enum ipa_status_opcode opcode;
1466	u32 endpoint_id;
1467
1468	opcode = ipa_status_extract(ipa, data, STATUS_OPCODE);
1469	if (!ipa_status_format_packet(opcode))
1470		return true;
1471
1472	endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT);
 
 
1473	if (endpoint_id != endpoint->endpoint_id)
1474		return true;
1475
1476	return false;	/* Don't skip this packet, process it */
1477}
1478
1479static bool
1480ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
1481{
1482	struct ipa_endpoint *command_endpoint;
1483	enum ipa_status_mask status_mask;
1484	struct ipa *ipa = endpoint->ipa;
1485	u32 endpoint_id;
1486
1487	status_mask = ipa_status_extract(ipa, data, STATUS_MASK);
1488	if (!status_mask)
1489		return false;	/* No valid tag */
1490
1491	/* The status contains a valid tag.  We know the packet was sent to
1492	 * this endpoint (already verified by ipa_endpoint_status_skip()).
1493	 * If the packet came from the AP->command TX endpoint we know
1494	 * this packet was sent as part of the pipeline clear process.
1495	 */
1496	endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT);
1497	command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1498	if (endpoint_id == command_endpoint->endpoint_id) {
1499		complete(&ipa->completion);
1500	} else {
1501		dev_err(ipa->dev, "unexpected tagged packet from endpoint %u\n",
1502			endpoint_id);
1503	}
1504
1505	return true;
1506}
1507
1508/* Return whether the status indicates the packet should be dropped */
1509static bool
1510ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, const void *data)
1511{
1512	enum ipa_status_exception exception;
1513	struct ipa *ipa = endpoint->ipa;
1514	u32 rule;
1515
1516	/* If the status indicates a tagged transfer, we'll drop the packet */
1517	if (ipa_endpoint_status_tag_valid(endpoint, data))
1518		return true;
1519
1520	/* Deaggregation exceptions we drop; all other types we consume */
1521	exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION);
1522	if (exception)
1523		return exception == IPA_STATUS_EXCEPTION_DEAGGR;
1524
1525	/* Drop the packet if it fails to match a routing rule; otherwise no */
1526	rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX);
1527
1528	return rule == IPA_STATUS_RULE_MISS;
1529}
1530
1531static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1532				      struct page *page, u32 total_len)
1533{
1534	u32 buffer_size = endpoint->config.rx.buffer_size;
1535	void *data = page_address(page) + NET_SKB_PAD;
1536	u32 unused = buffer_size - total_len;
1537	struct ipa *ipa = endpoint->ipa;
1538	struct device *dev = ipa->dev;
1539	u32 resid = total_len;
1540
1541	while (resid) {
1542		u32 length;
1543		u32 align;
1544		u32 len;
1545
1546		if (resid < IPA_STATUS_SIZE) {
1547			dev_err(dev,
1548				"short message (%u bytes < %zu byte status)\n",
1549				resid, IPA_STATUS_SIZE);
1550			break;
1551		}
1552
1553		/* Skip over status packets that lack packet data */
1554		length = ipa_status_extract(ipa, data, STATUS_LENGTH);
1555		if (!length || ipa_endpoint_status_skip(endpoint, data)) {
1556			data += IPA_STATUS_SIZE;
1557			resid -= IPA_STATUS_SIZE;
1558			continue;
1559		}
1560
1561		/* Compute the amount of buffer space consumed by the packet,
1562		 * including the status.  If the hardware is configured to
1563		 * pad packet data to an aligned boundary, account for that.
1564		 * And if checksum offload is enabled a trailer containing
1565		 * computed checksum information will be appended.
 
1566		 */
1567		align = endpoint->config.rx.pad_align ? : 1;
1568		len = IPA_STATUS_SIZE + ALIGN(length, align);
1569		if (endpoint->config.checksum)
 
1570			len += sizeof(struct rmnet_map_dl_csum_trailer);
1571
1572		if (!ipa_endpoint_status_drop(endpoint, data)) {
1573			void *data2;
1574			u32 extra;
 
 
 
 
 
1575
1576			/* Client receives only packet data (no status) */
1577			data2 = data + IPA_STATUS_SIZE;
1578
1579			/* Have the true size reflect the extra unused space in
1580			 * the original receive buffer.  Distribute the "cost"
1581			 * proportionately across all aggregated packets in the
1582			 * buffer.
1583			 */
1584			extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1585			ipa_endpoint_skb_copy(endpoint, data2, length, extra);
1586		}
1587
1588		/* Consume status and the full packet it describes */
1589		data += len;
1590		resid -= len;
1591	}
1592}
1593
1594void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1595				 struct gsi_trans *trans)
 
 
 
 
 
 
 
1596{
1597	struct page *page;
1598
1599	if (endpoint->toward_ipa)
1600		return;
1601
1602	if (trans->cancelled)
1603		goto done;
1604
1605	/* Parse or build a socket buffer using the actual received length */
1606	page = trans->data;
1607	if (endpoint->config.status_enable)
1608		ipa_endpoint_status_parse(endpoint, page, trans->len);
1609	else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1610		trans->data = NULL;	/* Pages have been consumed */
1611done:
1612	ipa_endpoint_replenish(endpoint);
 
 
 
 
 
 
 
1613}
1614
1615void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1616				struct gsi_trans *trans)
1617{
1618	if (endpoint->toward_ipa) {
1619		struct ipa *ipa = endpoint->ipa;
1620
1621		/* Nothing to do for command transactions */
1622		if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1623			struct sk_buff *skb = trans->data;
1624
1625			if (skb)
1626				dev_kfree_skb_any(skb);
1627		}
1628	} else {
1629		struct page *page = trans->data;
1630
1631		if (page)
1632			put_page(page);
1633	}
1634}
1635
1636void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1637{
1638	const struct reg *reg;
1639	u32 val;
1640
1641	reg = ipa_reg(ipa, ROUTE);
1642	/* ROUTE_DIS is 0 */
1643	val = reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
1644	val |= reg_bit(reg, ROUTE_DEF_HDR_TABLE);
1645	/* ROUTE_DEF_HDR_OFST is 0 */
1646	val |= reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
1647	val |= reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
1648
1649	iowrite32(val, ipa->reg_virt + reg_offset(reg));
1650}
1651
1652void ipa_endpoint_default_route_clear(struct ipa *ipa)
1653{
1654	ipa_endpoint_default_route_set(ipa, 0);
1655}
1656
1657/**
1658 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1659 * @endpoint:	Endpoint to be reset
1660 *
1661 * If aggregation is active on an RX endpoint when a reset is performed
1662 * on its underlying GSI channel, a special sequence of actions must be
1663 * taken to ensure the IPA pipeline is properly cleared.
1664 *
1665 * Return:	0 if successful, or a negative error code
1666 */
1667static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1668{
 
1669	struct ipa *ipa = endpoint->ipa;
1670	struct device *dev = ipa->dev;
1671	struct gsi *gsi = &ipa->gsi;
1672	bool suspended = false;
1673	dma_addr_t addr;
 
1674	u32 retries;
1675	u32 len = 1;
1676	void *virt;
1677	int ret;
1678
1679	virt = kzalloc(len, GFP_KERNEL);
1680	if (!virt)
1681		return -ENOMEM;
1682
1683	addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1684	if (dma_mapping_error(dev, addr)) {
1685		ret = -ENOMEM;
1686		goto out_kfree;
1687	}
1688
1689	/* Force close aggregation before issuing the reset */
1690	ipa_endpoint_force_close(endpoint);
1691
1692	/* Reset and reconfigure the channel with the doorbell engine
1693	 * disabled.  Then poll until we know aggregation is no longer
1694	 * active.  We'll re-enable the doorbell (if appropriate) when
1695	 * we reset again below.
1696	 */
1697	gsi_channel_reset(gsi, endpoint->channel_id, false);
1698
1699	/* Make sure the channel isn't suspended */
1700	suspended = ipa_endpoint_program_suspend(endpoint, false);
1701
1702	/* Start channel and do a 1 byte read */
1703	ret = gsi_channel_start(gsi, endpoint->channel_id);
1704	if (ret)
1705		goto out_suspend_again;
1706
1707	ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1708	if (ret)
1709		goto err_endpoint_stop;
1710
1711	/* Wait for aggregation to be closed on the channel */
1712	retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1713	do {
1714		if (!ipa_endpoint_aggr_active(endpoint))
1715			break;
1716		usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1717	} while (retries--);
1718
1719	/* Check one last time */
1720	if (ipa_endpoint_aggr_active(endpoint))
1721		dev_err(dev, "endpoint %u still active during reset\n",
1722			endpoint->endpoint_id);
1723
1724	gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1725
1726	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1727	if (ret)
1728		goto out_suspend_again;
1729
1730	/* Finally, reset and reconfigure the channel again (re-enabling
1731	 * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1732	 * complete the channel reset sequence.  Finish by suspending the
1733	 * channel again (if necessary).
1734	 */
1735	gsi_channel_reset(gsi, endpoint->channel_id, true);
 
1736
1737	usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1738
1739	goto out_suspend_again;
1740
1741err_endpoint_stop:
1742	(void)gsi_channel_stop(gsi, endpoint->channel_id);
1743out_suspend_again:
1744	if (suspended)
1745		(void)ipa_endpoint_program_suspend(endpoint, true);
1746	dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1747out_kfree:
1748	kfree(virt);
1749
1750	return ret;
1751}
1752
1753static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1754{
1755	u32 channel_id = endpoint->channel_id;
1756	struct ipa *ipa = endpoint->ipa;
1757	bool special;
 
1758	int ret = 0;
1759
1760	/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1761	 * is active, we need to handle things specially to recover.
1762	 * All other cases just need to reset the underlying GSI channel.
 
 
1763	 */
1764	special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1765			endpoint->config.aggregation;
1766	if (special && ipa_endpoint_aggr_active(endpoint))
1767		ret = ipa_endpoint_reset_rx_aggr(endpoint);
1768	else
1769		gsi_channel_reset(&ipa->gsi, channel_id, true);
1770
1771	if (ret)
1772		dev_err(ipa->dev,
1773			"error %d resetting channel %u for endpoint %u\n",
1774			ret, endpoint->channel_id, endpoint->endpoint_id);
1775}
1776
1777static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1778{
1779	if (endpoint->toward_ipa) {
1780		/* Newer versions of IPA use GSI channel flow control
1781		 * instead of endpoint DELAY mode to prevent sending data.
1782		 * Flow control is disabled for newly-allocated channels,
1783		 * and we can assume flow control is not (ever) enabled
1784		 * for AP TX channels.
1785		 */
1786		if (endpoint->ipa->version < IPA_VERSION_4_2)
1787			ipa_endpoint_program_delay(endpoint, false);
1788	} else {
1789		/* Ensure suspend mode is off on all AP RX endpoints */
1790		(void)ipa_endpoint_program_suspend(endpoint, false);
1791	}
1792	ipa_endpoint_init_cfg(endpoint);
1793	ipa_endpoint_init_nat(endpoint);
1794	ipa_endpoint_init_hdr(endpoint);
1795	ipa_endpoint_init_hdr_ext(endpoint);
1796	ipa_endpoint_init_hdr_metadata_mask(endpoint);
1797	ipa_endpoint_init_mode(endpoint);
1798	ipa_endpoint_init_aggr(endpoint);
1799	if (!endpoint->toward_ipa) {
1800		if (endpoint->config.rx.holb_drop)
1801			ipa_endpoint_init_hol_block_enable(endpoint, 0);
1802		else
1803			ipa_endpoint_init_hol_block_disable(endpoint);
1804	}
1805	ipa_endpoint_init_deaggr(endpoint);
1806	ipa_endpoint_init_rsrc_grp(endpoint);
1807	ipa_endpoint_init_seq(endpoint);
1808	ipa_endpoint_status(endpoint);
1809}
1810
1811int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1812{
1813	u32 endpoint_id = endpoint->endpoint_id;
1814	struct ipa *ipa = endpoint->ipa;
1815	struct gsi *gsi = &ipa->gsi;
1816	int ret;
1817
1818	ret = gsi_channel_start(gsi, endpoint->channel_id);
1819	if (ret) {
1820		dev_err(ipa->dev,
1821			"error %d starting %cX channel %u for endpoint %u\n",
1822			ret, endpoint->toward_ipa ? 'T' : 'R',
1823			endpoint->channel_id, endpoint_id);
1824		return ret;
1825	}
1826
1827	if (!endpoint->toward_ipa) {
1828		ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id);
 
1829		ipa_endpoint_replenish_enable(endpoint);
1830	}
1831
1832	__set_bit(endpoint_id, ipa->enabled);
1833
1834	return 0;
1835}
1836
1837void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1838{
1839	u32 endpoint_id = endpoint->endpoint_id;
1840	struct ipa *ipa = endpoint->ipa;
1841	struct gsi *gsi = &ipa->gsi;
1842	int ret;
1843
1844	if (!test_bit(endpoint_id, ipa->enabled))
1845		return;
1846
1847	__clear_bit(endpoint_id, endpoint->ipa->enabled);
1848
1849	if (!endpoint->toward_ipa) {
1850		ipa_endpoint_replenish_disable(endpoint);
1851		ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
 
1852	}
1853
1854	/* Note that if stop fails, the channel's state is not well-defined */
1855	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1856	if (ret)
1857		dev_err(ipa->dev, "error %d attempting to stop endpoint %u\n",
1858			ret, endpoint_id);
 
1859}
1860
1861void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1862{
1863	struct device *dev = endpoint->ipa->dev;
1864	struct gsi *gsi = &endpoint->ipa->gsi;
 
1865	int ret;
1866
1867	if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1868		return;
1869
1870	if (!endpoint->toward_ipa) {
1871		ipa_endpoint_replenish_disable(endpoint);
 
 
1872		(void)ipa_endpoint_program_suspend(endpoint, true);
1873	}
1874
1875	ret = gsi_channel_suspend(gsi, endpoint->channel_id);
 
 
1876	if (ret)
1877		dev_err(dev, "error %d suspending channel %u\n", ret,
1878			endpoint->channel_id);
1879}
1880
1881void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1882{
1883	struct device *dev = endpoint->ipa->dev;
1884	struct gsi *gsi = &endpoint->ipa->gsi;
 
1885	int ret;
1886
1887	if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1888		return;
1889
1890	if (!endpoint->toward_ipa)
1891		(void)ipa_endpoint_program_suspend(endpoint, false);
1892
1893	ret = gsi_channel_resume(gsi, endpoint->channel_id);
 
 
1894	if (ret)
1895		dev_err(dev, "error %d resuming channel %u\n", ret,
1896			endpoint->channel_id);
1897	else if (!endpoint->toward_ipa)
1898		ipa_endpoint_replenish_enable(endpoint);
1899}
1900
1901void ipa_endpoint_suspend(struct ipa *ipa)
1902{
1903	if (!ipa->setup_complete)
1904		return;
1905
1906	if (ipa->modem_netdev)
1907		ipa_modem_suspend(ipa->modem_netdev);
1908
 
 
1909	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1910	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1911}
1912
1913void ipa_endpoint_resume(struct ipa *ipa)
1914{
1915	if (!ipa->setup_complete)
1916		return;
1917
1918	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1919	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1920
1921	if (ipa->modem_netdev)
1922		ipa_modem_resume(ipa->modem_netdev);
1923}
1924
1925static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1926{
1927	struct gsi *gsi = &endpoint->ipa->gsi;
1928	u32 channel_id = endpoint->channel_id;
1929
1930	/* Only AP endpoints get set up */
1931	if (endpoint->ee_id != GSI_EE_AP)
1932		return;
1933
1934	endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
1935	if (!endpoint->toward_ipa) {
1936		/* RX transactions require a single TRE, so the maximum
1937		 * backlog is the same as the maximum outstanding TREs.
1938		 */
1939		clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1940		clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
 
 
1941		INIT_DELAYED_WORK(&endpoint->replenish_work,
1942				  ipa_endpoint_replenish_work);
1943	}
1944
1945	ipa_endpoint_program(endpoint);
1946
1947	__set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1948}
1949
1950static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1951{
1952	__clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1953
1954	if (!endpoint->toward_ipa)
1955		cancel_delayed_work_sync(&endpoint->replenish_work);
1956
1957	ipa_endpoint_reset(endpoint);
1958}
1959
1960void ipa_endpoint_setup(struct ipa *ipa)
1961{
1962	u32 endpoint_id;
 
 
 
 
 
 
1963
1964	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
1965		ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
 
1966}
1967
1968void ipa_endpoint_teardown(struct ipa *ipa)
1969{
1970	u32 endpoint_id;
1971
1972	for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count)
1973		ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1974}
1975
1976void ipa_endpoint_deconfig(struct ipa *ipa)
1977{
1978	ipa->available_count = 0;
1979	bitmap_free(ipa->available);
1980	ipa->available = NULL;
1981}
1982
1983int ipa_endpoint_config(struct ipa *ipa)
1984{
1985	struct device *dev = ipa->dev;
1986	const struct reg *reg;
1987	u32 endpoint_id;
1988	u32 hw_limit;
1989	u32 tx_count;
1990	u32 rx_count;
1991	u32 rx_base;
1992	u32 limit;
 
 
 
1993	u32 val;
1994
1995	/* Prior to IPA v3.5, the FLAVOR_0 register was not supported.
1996	 * Furthermore, the endpoints were not grouped such that TX
1997	 * endpoint numbers started with 0 and RX endpoints had numbers
1998	 * higher than all TX endpoints, so we can't do the simple
1999	 * direction check used for newer hardware below.
2000	 *
2001	 * For hardware that doesn't support the FLAVOR_0 register,
2002	 * just set the available mask to support any endpoint, and
2003	 * assume the configuration is valid.
2004	 */
2005	if (ipa->version < IPA_VERSION_3_5) {
2006		ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL);
2007		if (!ipa->available)
2008			return -ENOMEM;
2009		ipa->available_count = IPA_ENDPOINT_MAX;
2010
2011		bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
2012
2013		return 0;
2014	}
2015
2016	/* Find out about the endpoints supplied by the hardware, and ensure
2017	 * the highest one doesn't exceed the number supported by software.
2018	 */
2019	reg = ipa_reg(ipa, FLAVOR_0);
2020	val = ioread32(ipa->reg_virt + reg_offset(reg));
2021
2022	/* Our RX is an IPA producer; our TX is an IPA consumer. */
2023	tx_count = reg_decode(reg, MAX_CONS_PIPES, val);
2024	rx_count = reg_decode(reg, MAX_PROD_PIPES, val);
2025	rx_base = reg_decode(reg, PROD_LOWEST, val);
2026
2027	limit = rx_base + rx_count;
2028	if (limit > IPA_ENDPOINT_MAX) {
2029		dev_err(dev, "too many endpoints, %u > %u\n",
2030			limit, IPA_ENDPOINT_MAX);
2031		return -EINVAL;
2032	}
 
2033
2034	/* Until IPA v5.0, the max endpoint ID was 32 */
2035	hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1;
2036	if (limit > hw_limit) {
2037		dev_err(dev, "unexpected endpoint count, %u > %u\n",
2038			limit, hw_limit);
2039		return -EINVAL;
2040	}
2041
2042	/* Allocate and initialize the available endpoint bitmap */
2043	ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
2044	if (!ipa->available)
2045		return -ENOMEM;
2046	ipa->available_count = limit;
2047
2048	/* Mark all supported RX and TX endpoints as available */
2049	bitmap_set(ipa->available, 0, tx_count);
2050	bitmap_set(ipa->available, rx_base, rx_count);
 
 
 
2051
2052	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
 
 
2053		struct ipa_endpoint *endpoint;
2054
2055		if (endpoint_id >= limit) {
2056			dev_err(dev, "invalid endpoint id, %u > %u\n",
2057				endpoint_id, limit - 1);
2058			goto err_free_bitmap;
2059		}
2060
2061		if (!test_bit(endpoint_id, ipa->available)) {
2062			dev_err(dev, "unavailable endpoint id %u\n",
2063				endpoint_id);
2064			goto err_free_bitmap;
2065		}
2066
2067		/* Make sure it's pointing in the right direction */
2068		endpoint = &ipa->endpoint[endpoint_id];
2069		if (endpoint->toward_ipa) {
2070			if (endpoint_id < tx_count)
2071				continue;
2072		} else if (endpoint_id >= rx_base) {
2073			continue;
2074		}
2075
2076		dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id);
2077		goto err_free_bitmap;
2078	}
2079
2080	return 0;
2081
2082err_free_bitmap:
2083	ipa_endpoint_deconfig(ipa);
2084
2085	return -EINVAL;
 
 
2086}
2087
2088static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
2089				  const struct ipa_gsi_endpoint_data *data)
2090{
2091	struct ipa_endpoint *endpoint;
2092
2093	endpoint = &ipa->endpoint[data->endpoint_id];
2094
2095	if (data->ee_id == GSI_EE_AP)
2096		ipa->channel_map[data->channel_id] = endpoint;
2097	ipa->name_map[name] = endpoint;
2098
2099	endpoint->ipa = ipa;
2100	endpoint->ee_id = data->ee_id;
 
2101	endpoint->channel_id = data->channel_id;
2102	endpoint->endpoint_id = data->endpoint_id;
2103	endpoint->toward_ipa = data->toward_ipa;
2104	endpoint->config = data->endpoint.config;
2105
2106	__set_bit(endpoint->endpoint_id, ipa->defined);
2107}
2108
2109static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
2110{
2111	__clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
2112
2113	memset(endpoint, 0, sizeof(*endpoint));
2114}
2115
2116void ipa_endpoint_exit(struct ipa *ipa)
2117{
2118	u32 endpoint_id;
2119
2120	ipa->filtered = 0;
 
2121
2122	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
2123		ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
2124
2125	bitmap_free(ipa->enabled);
2126	ipa->enabled = NULL;
2127	bitmap_free(ipa->set_up);
2128	ipa->set_up = NULL;
2129	bitmap_free(ipa->defined);
2130	ipa->defined = NULL;
2131
 
 
2132	memset(ipa->name_map, 0, sizeof(ipa->name_map));
2133	memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
2134}
2135
2136/* Returns a bitmask of endpoints that support filtering, or 0 on error */
2137int ipa_endpoint_init(struct ipa *ipa, u32 count,
2138		      const struct ipa_gsi_endpoint_data *data)
2139{
2140	enum ipa_endpoint_name name;
2141	u32 filtered;
2142
2143	BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
2144
2145	/* Number of endpoints is one more than the maximum ID */
2146	ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
2147	if (!ipa->endpoint_count)
2148		return -EINVAL;
2149
2150	/* Initialize endpoint state bitmaps */
2151	ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2152	if (!ipa->defined)
2153		return -ENOMEM;
2154
2155	ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2156	if (!ipa->set_up)
2157		goto err_free_defined;
2158
2159	ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2160	if (!ipa->enabled)
2161		goto err_free_set_up;
2162
2163	filtered = 0;
2164	for (name = 0; name < count; name++, data++) {
2165		if (ipa_gsi_endpoint_data_empty(data))
2166			continue;	/* Skip over empty slots */
2167
2168		ipa_endpoint_init_one(ipa, name, data);
2169
2170		if (data->endpoint.filter_support)
2171			filtered |= BIT(data->endpoint_id);
2172		if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
2173			ipa->modem_tx_count++;
2174	}
2175
2176	/* Make sure the set of filtered endpoints is valid */
2177	if (!ipa_filtered_valid(ipa, filtered)) {
2178		ipa_endpoint_exit(ipa);
2179
2180		return -EINVAL;
2181	}
2182
2183	ipa->filtered = filtered;
2184
2185	return 0;
 
2186
2187err_free_set_up:
2188	bitmap_free(ipa->set_up);
2189	ipa->set_up = NULL;
2190err_free_defined:
2191	bitmap_free(ipa->defined);
2192	ipa->defined = NULL;
2193
2194	return -ENOMEM;
2195}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2019-2020 Linaro Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <linux/device.h>
   9#include <linux/slab.h>
  10#include <linux/bitfield.h>
  11#include <linux/if_rmnet.h>
  12#include <linux/dma-direction.h>
  13
  14#include "gsi.h"
  15#include "gsi_trans.h"
  16#include "ipa.h"
  17#include "ipa_data.h"
  18#include "ipa_endpoint.h"
  19#include "ipa_cmd.h"
  20#include "ipa_mem.h"
  21#include "ipa_modem.h"
  22#include "ipa_table.h"
  23#include "ipa_gsi.h"
  24#include "ipa_clock.h"
  25
  26#define atomic_dec_not_zero(v)	atomic_add_unless((v), -1, 0)
  27
  28#define IPA_REPLENISH_BATCH	16
  29
  30/* RX buffer is 1 page (or a power-of-2 contiguous pages) */
  31#define IPA_RX_BUFFER_SIZE	8192	/* PAGE_SIZE > 4096 wastes a LOT */
  32
  33/* The amount of RX buffer space consumed by standard skb overhead */
  34#define IPA_RX_BUFFER_OVERHEAD	(PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
  35
  36/* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
  37#define IPA_ENDPOINT_QMAP_METADATA_MASK		0x000000ff /* host byte order */
  38
  39#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX	3
  40#define IPA_AGGR_TIME_LIMIT_DEFAULT		500	/* microseconds */
  41
  42/** enum ipa_status_opcode - status element opcode hardware values */
  43enum ipa_status_opcode {
  44	IPA_STATUS_OPCODE_PACKET		= 0x01,
  45	IPA_STATUS_OPCODE_NEW_FRAG_RULE		= 0x02,
  46	IPA_STATUS_OPCODE_DROPPED_PACKET	= 0x04,
  47	IPA_STATUS_OPCODE_SUSPENDED_PACKET	= 0x08,
  48	IPA_STATUS_OPCODE_LOG			= 0x10,
  49	IPA_STATUS_OPCODE_DCMP			= 0x20,
  50	IPA_STATUS_OPCODE_PACKET_2ND_PASS	= 0x40,
  51};
  52
  53/** enum ipa_status_exception - status element exception type */
  54enum ipa_status_exception {
  55	/* 0 means no exception */
  56	IPA_STATUS_EXCEPTION_DEAGGR		= 0x01,
  57	IPA_STATUS_EXCEPTION_IPTYPE		= 0x04,
  58	IPA_STATUS_EXCEPTION_PACKET_LENGTH	= 0x08,
  59	IPA_STATUS_EXCEPTION_FRAG_RULE_MISS	= 0x10,
  60	IPA_STATUS_EXCEPTION_SW_FILT		= 0x20,
  61	/* The meaning of the next value depends on whether the IP version */
  62	IPA_STATUS_EXCEPTION_NAT		= 0x40,		/* IPv4 */
  63	IPA_STATUS_EXCEPTION_IPV6CT		= IPA_STATUS_EXCEPTION_NAT,
 
 
 
  64};
  65
  66/* Status element provided by hardware */
  67struct ipa_status {
  68	u8 opcode;		/* enum ipa_status_opcode */
  69	u8 exception;		/* enum ipa_status_exception */
  70	__le16 mask;
  71	__le16 pkt_len;
  72	u8 endp_src_idx;
  73	u8 endp_dst_idx;
  74	__le32 metadata;
  75	__le32 flags1;
  76	__le64 flags2;
  77	__le32 flags3;
  78	__le32 flags4;
 
 
 
 
 
  79};
  80
  81/* Field masks for struct ipa_status structure fields */
 
 
 
  82
  83#define IPA_STATUS_SRC_IDX_FMASK		GENMASK(4, 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  84
  85#define IPA_STATUS_DST_IDX_FMASK		GENMASK(4, 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  86
  87#define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK	GENMASK(0, 0)
  88#define IPA_STATUS_FLAGS1_FLT_HASH_FMASK	GENMASK(1, 1)
  89#define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK	GENMASK(2, 2)
  90#define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK	GENMASK(3, 3)
  91#define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK	GENMASK(13, 4)
  92#define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK	GENMASK(14, 14)
  93#define IPA_STATUS_FLAGS1_RT_HASH_FMASK		GENMASK(15, 15)
  94#define IPA_STATUS_FLAGS1_UCP_FMASK		GENMASK(16, 16)
  95#define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK	GENMASK(21, 17)
  96#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK	GENMASK(31, 22)
  97
  98#define IPA_STATUS_FLAGS2_NAT_HIT_FMASK		GENMASK_ULL(0, 0)
  99#define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK	GENMASK_ULL(13, 1)
 100#define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK	GENMASK_ULL(15, 14)
 101#define IPA_STATUS_FLAGS2_TAG_INFO_FMASK	GENMASK_ULL(63, 16)
 102
 103#define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK		GENMASK(7, 0)
 104#define IPA_STATUS_FLAGS3_TOD_CTR_FMASK		GENMASK(31, 8)
 105
 106#define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK	GENMASK(0, 0)
 107#define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK	GENMASK(10, 1)
 108#define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK	GENMASK(11, 11)
 109#define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK	GENMASK(15, 12)
 110#define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK	GENMASK(31, 16)
 111
 112#ifdef IPA_VALIDATE
 113
 114static void ipa_endpoint_validate_build(void)
 115{
 116	/* The aggregation byte limit defines the point at which an
 117	 * aggregation window will close.  It is programmed into the
 118	 * IPA hardware as a number of KB.  We don't use "hard byte
 119	 * limit" aggregation, which means that we need to supply
 120	 * enough space in a receive buffer to hold a complete MTU
 121	 * plus normal skb overhead *after* that aggregation byte
 122	 * limit has been crossed.
 123	 *
 124	 * This check just ensures we don't define a receive buffer
 125	 * size that would exceed what we can represent in the field
 126	 * that is used to program its size.
 127	 */
 128	BUILD_BUG_ON(IPA_RX_BUFFER_SIZE >
 129		     field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K +
 130		     IPA_MTU + IPA_RX_BUFFER_OVERHEAD);
 131
 132	/* I honestly don't know where this requirement comes from.  But
 133	 * it holds, and if we someday need to loosen the constraint we
 134	 * can try to track it down.
 135	 */
 136	BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
 137}
 138
 139static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
 140			    const struct ipa_gsi_endpoint_data *all_data,
 141			    const struct ipa_gsi_endpoint_data *data)
 142{
 143	const struct ipa_gsi_endpoint_data *other_data;
 144	struct device *dev = &ipa->pdev->dev;
 145	enum ipa_endpoint_name other_name;
 
 146
 147	if (ipa_gsi_endpoint_data_empty(data))
 148		return true;
 149
 150	if (!data->toward_ipa) {
 
 
 
 
 
 
 151		if (data->endpoint.filter_support) {
 152			dev_err(dev, "filtering not supported for "
 153					"RX endpoint %u\n",
 154				data->endpoint_id);
 155			return false;
 156		}
 157
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 158		return true;	/* Nothing more to check for RX */
 159	}
 160
 
 
 
 
 
 
 
 
 
 161	if (data->endpoint.config.status_enable) {
 162		other_name = data->endpoint.config.tx.status_endpoint;
 163		if (other_name >= count) {
 164			dev_err(dev, "status endpoint name %u out of range "
 165					"for endpoint %u\n",
 166				other_name, data->endpoint_id);
 167			return false;
 168		}
 169
 170		/* Status endpoint must be defined... */
 171		other_data = &all_data[other_name];
 172		if (ipa_gsi_endpoint_data_empty(other_data)) {
 173			dev_err(dev, "DMA endpoint name %u undefined "
 174					"for endpoint %u\n",
 175				other_name, data->endpoint_id);
 176			return false;
 177		}
 178
 179		/* ...and has to be an RX endpoint... */
 180		if (other_data->toward_ipa) {
 181			dev_err(dev,
 182				"status endpoint for endpoint %u not RX\n",
 183				data->endpoint_id);
 184			return false;
 185		}
 186
 187		/* ...and if it's to be an AP endpoint... */
 188		if (other_data->ee_id == GSI_EE_AP) {
 189			/* ...make sure it has status enabled. */
 190			if (!other_data->endpoint.config.status_enable) {
 191				dev_err(dev,
 192					"status not enabled for endpoint %u\n",
 193					other_data->endpoint_id);
 194				return false;
 195			}
 196		}
 197	}
 198
 199	if (data->endpoint.config.dma_mode) {
 200		other_name = data->endpoint.config.dma_endpoint;
 201		if (other_name >= count) {
 202			dev_err(dev, "DMA endpoint name %u out of range "
 203					"for endpoint %u\n",
 204				other_name, data->endpoint_id);
 205			return false;
 206		}
 207
 208		other_data = &all_data[other_name];
 209		if (ipa_gsi_endpoint_data_empty(other_data)) {
 210			dev_err(dev, "DMA endpoint name %u undefined "
 211					"for endpoint %u\n",
 212				other_name, data->endpoint_id);
 213			return false;
 214		}
 215	}
 216
 217	return true;
 218}
 219
 220static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
 221				    const struct ipa_gsi_endpoint_data *data)
 
 222{
 223	const struct ipa_gsi_endpoint_data *dp = data;
 224	struct device *dev = &ipa->pdev->dev;
 225	enum ipa_endpoint_name name;
 226
 227	ipa_endpoint_validate_build();
 228
 229	if (count > IPA_ENDPOINT_COUNT) {
 230		dev_err(dev, "too many endpoints specified (%u > %u)\n",
 231			count, IPA_ENDPOINT_COUNT);
 232		return false;
 233	}
 234
 235	/* Make sure needed endpoints have defined data */
 236	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
 237		dev_err(dev, "command TX endpoint not defined\n");
 238		return false;
 239	}
 240	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
 241		dev_err(dev, "LAN RX endpoint not defined\n");
 242		return false;
 243	}
 244	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
 245		dev_err(dev, "AP->modem TX endpoint not defined\n");
 246		return false;
 247	}
 248	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
 249		dev_err(dev, "AP<-modem RX endpoint not defined\n");
 250		return false;
 251	}
 252
 253	for (name = 0; name < count; name++, dp++)
 
 254		if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
 255			return false;
 256
 257	return true;
 258}
 259
 260#else /* !IPA_VALIDATE */
 261
 262static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
 263				    const struct ipa_gsi_endpoint_data *data)
 264{
 265	return true;
 266}
 267
 268#endif /* !IPA_VALIDATE */
 269
 270/* Allocate a transaction to use on a non-command endpoint */
 271static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
 272						  u32 tre_count)
 273{
 274	struct gsi *gsi = &endpoint->ipa->gsi;
 275	u32 channel_id = endpoint->channel_id;
 276	enum dma_data_direction direction;
 277
 278	direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 279
 280	return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
 281}
 282
 283/* suspend_delay represents suspend for RX, delay for TX endpoints.
 284 * Note that suspend is not supported starting with IPA v4.0.
 
 285 */
 286static bool
 287ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
 288{
 289	u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
 290	struct ipa *ipa = endpoint->ipa;
 
 
 
 291	bool state;
 292	u32 mask;
 293	u32 val;
 294
 295	/* Suspend is not supported for IPA v4.0+.  Delay doesn't work
 296	 * correctly on IPA v4.2.
 297	 *
 298	 * if (endpoint->toward_ipa)
 299	 * 	assert(ipa->version != IPA_VERSION_4.2);
 300	 * else
 301	 * 	assert(ipa->version == IPA_VERSION_3_5_1);
 302	 */
 303	mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
 304
 
 
 305	val = ioread32(ipa->reg_virt + offset);
 
 
 
 
 
 
 306	/* Don't bother if it's already in the requested state */
 307	state = !!(val & mask);
 308	if (suspend_delay != state) {
 309		val ^= mask;
 310		iowrite32(val, ipa->reg_virt + offset);
 311	}
 312
 313	return state;
 314}
 315
 316/* We currently don't care what the previous state was for delay mode */
 317static void
 318ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
 319{
 320	/* assert(endpoint->toward_ipa); */
 
 
 321
 322	/* Delay mode doesn't work properly for IPA v4.2 */
 323	if (endpoint->ipa->version != IPA_VERSION_4_2)
 324		(void)ipa_endpoint_init_ctrl(endpoint, enable);
 325}
 326
 327static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
 328{
 329	u32 mask = BIT(endpoint->endpoint_id);
 330	struct ipa *ipa = endpoint->ipa;
 331	u32 offset;
 
 332	u32 val;
 333
 334	/* assert(mask & ipa->available); */
 335	offset = ipa_reg_state_aggr_active_offset(ipa->version);
 336	val = ioread32(ipa->reg_virt + offset);
 
 337
 338	return !!(val & mask);
 339}
 340
 341static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
 342{
 343	u32 mask = BIT(endpoint->endpoint_id);
 
 344	struct ipa *ipa = endpoint->ipa;
 
 
 
 
 345
 346	/* assert(mask & ipa->available); */
 347	iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
 348}
 349
 350/**
 351 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
 352 * @endpoint:	Endpoint on which to emulate a suspend
 353 *
 354 *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
 355 *  with an open aggregation frame.  This is to work around a hardware
 356 *  issue in IPA version 3.5.1 where the suspend interrupt will not be
 357 *  generated when it should be.
 358 */
 359static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
 360{
 361	struct ipa *ipa = endpoint->ipa;
 362
 363	if (!endpoint->data->aggregation)
 364		return;
 365
 366	/* Nothing to do if the endpoint doesn't have aggregation open */
 367	if (!ipa_endpoint_aggr_active(endpoint))
 368		return;
 369
 370	/* Force close aggregation */
 371	ipa_endpoint_force_close(endpoint);
 372
 373	ipa_interrupt_simulate_suspend(ipa->interrupt);
 374}
 375
 376/* Returns previous suspend state (true means suspend was enabled) */
 377static bool
 378ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
 379{
 380	bool suspended;
 381
 382	if (endpoint->ipa->version != IPA_VERSION_3_5_1)
 383		return enable;	/* For IPA v4.0+, no change made */
 384
 385	/* assert(!endpoint->toward_ipa); */
 386
 387	suspended = ipa_endpoint_init_ctrl(endpoint, enable);
 388
 389	/* A client suspended with an open aggregation frame will not
 390	 * generate a SUSPEND IPA interrupt.  If enabling suspend, have
 391	 * ipa_endpoint_suspend_aggr() handle this.
 392	 */
 393	if (enable && !suspended)
 394		ipa_endpoint_suspend_aggr(endpoint);
 395
 396	return suspended;
 397}
 398
 399/* Enable or disable delay or suspend mode on all modem endpoints */
 
 
 
 
 400void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
 401{
 402	u32 endpoint_id;
 403
 404	/* DELAY mode doesn't work correctly on IPA v4.2 */
 405	if (ipa->version == IPA_VERSION_4_2)
 406		return;
 407
 408	for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
 409		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
 410
 411		if (endpoint->ee_id != GSI_EE_MODEM)
 412			continue;
 413
 414		/* Set TX delay mode or RX suspend mode */
 415		if (endpoint->toward_ipa)
 
 416			ipa_endpoint_program_delay(endpoint, enable);
 417		else
 418			(void)ipa_endpoint_program_suspend(endpoint, enable);
 
 
 419	}
 420}
 421
 422/* Reset all modem endpoints to use the default exception endpoint */
 423int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
 424{
 425	u32 initialized = ipa->initialized;
 426	struct gsi_trans *trans;
 
 427	u32 count;
 428
 429	/* We need one command per modem TX endpoint.  We can get an upper
 430	 * bound on that by assuming all initialized endpoints are modem->IPA.
 431	 * That won't happen, and we could be more precise, but this is fine
 432	 * for now.  We need to end the transaction with a "tag process."
 433	 */
 434	count = hweight32(initialized) + ipa_cmd_tag_process_count();
 435	trans = ipa_cmd_trans_alloc(ipa, count);
 436	if (!trans) {
 437		dev_err(&ipa->pdev->dev,
 438			"no transaction to reset modem exception endpoints\n");
 439		return -EBUSY;
 440	}
 441
 442	while (initialized) {
 443		u32 endpoint_id = __ffs(initialized);
 444		struct ipa_endpoint *endpoint;
 
 445		u32 offset;
 446
 447		initialized ^= BIT(endpoint_id);
 448
 449		/* We only reset modem TX endpoints */
 450		endpoint = &ipa->endpoint[endpoint_id];
 451		if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
 452			continue;
 453
 454		offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
 
 455
 456		/* Value written is 0, and all bits are updated.  That
 457		 * means status is disabled on the endpoint, and as a
 458		 * result all other fields in the register are ignored.
 459		 */
 460		ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
 461	}
 462
 463	ipa_cmd_tag_process_add(trans);
 464
 465	/* XXX This should have a 1 second timeout */
 466	gsi_trans_commit_wait(trans);
 467
 
 
 468	return 0;
 469}
 470
 471static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
 472{
 473	u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
 
 
 
 474	u32 val = 0;
 475
 
 476	/* FRAG_OFFLOAD_EN is 0 */
 477	if (endpoint->data->checksum) {
 
 
 478		if (endpoint->toward_ipa) {
 479			u32 checksum_offset;
 480
 481			val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
 482					       CS_OFFLOAD_EN_FMASK);
 483			/* Checksum header offset is in 4-byte units */
 484			checksum_offset = sizeof(struct rmnet_map_header);
 485			checksum_offset /= sizeof(u32);
 486			val |= u32_encode_bits(checksum_offset,
 487					       CS_METADATA_HDR_OFFSET_FMASK);
 
 
 488		} else {
 489			val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
 490					       CS_OFFLOAD_EN_FMASK);
 
 491		}
 492	} else {
 493		val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
 494				       CS_OFFLOAD_EN_FMASK);
 495	}
 
 496	/* CS_GEN_QMB_MASTER_SEL is 0 */
 497
 498	iowrite32(val, endpoint->ipa->reg_virt + offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 499}
 500
 501/**
 502 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
 503 * @endpoint:	Endpoint pointer
 504 *
 505 * We program QMAP endpoints so each packet received is preceded by a QMAP
 506 * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
 507 * packet size field, and we have the IPA hardware populate both for each
 508 * received packet.  The header is configured (in the HDR_EXT register)
 509 * to use big endian format.
 510 *
 511 * The packet size is written into the QMAP header's pkt_len field.  That
 512 * location is defined here using the HDR_OFST_PKT_SIZE field.
 513 *
 514 * The mux_id comes from a 4-byte metadata value supplied with each packet
 515 * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
 516 * value that we want, in its low-order byte.  A bitmask defined in the
 517 * endpoint's METADATA_MASK register defines which byte within the modem
 518 * metadata contains the mux_id.  And the OFST_METADATA field programmed
 519 * here indicates where the extracted byte should be placed within the QMAP
 520 * header.
 521 */
 522static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
 523{
 524	u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
 
 
 525	u32 val = 0;
 526
 527	if (endpoint->data->qmap) {
 528		size_t header_size = sizeof(struct rmnet_map_header);
 
 
 529
 530		/* We might supply a checksum header after the QMAP header */
 531		if (endpoint->toward_ipa && endpoint->data->checksum)
 532			header_size += sizeof(struct rmnet_map_ul_csum_header);
 533		val |= u32_encode_bits(header_size, HDR_LEN_FMASK);
 534
 535		/* Define how to fill fields in a received QMAP header */
 536		if (!endpoint->toward_ipa) {
 537			u32 off;	/* Field offset within header */
 538
 539			/* Where IPA will write the metadata value */
 540			off = offsetof(struct rmnet_map_header, mux_id);
 541			val |= u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
 542
 543			/* Where IPA will write the length */
 544			off = offsetof(struct rmnet_map_header, pkt_len);
 545			val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
 546			val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK);
 
 
 
 
 547		}
 548		/* For QMAP TX, metadata offset is 0 (modem assumes this) */
 549		val |= HDR_OFST_METADATA_VALID_FMASK;
 550
 551		/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
 552		/* HDR_A5_MUX is 0 */
 553		/* HDR_LEN_INC_DEAGG_HDR is 0 */
 554		/* HDR_METADATA_REG_VALID is 0 (TX only) */
 555	}
 556
 557	iowrite32(val, endpoint->ipa->reg_virt + offset);
 558}
 559
 560static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
 561{
 562	u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
 563	u32 pad_align = endpoint->data->rx.pad_align;
 
 
 564	u32 val = 0;
 565
 566	val |= HDR_ENDIANNESS_FMASK;		/* big endian */
 567
 568	/* A QMAP header contains a 6 bit pad field at offset 0.  The RMNet
 569	 * driver assumes this field is meaningful in packets it receives,
 570	 * and assumes the header's payload length includes that padding.
 571	 * The RMNet driver does *not* pad packets it sends, however, so
 572	 * the pad field (although 0) should be ignored.
 573	 */
 574	if (endpoint->data->qmap && !endpoint->toward_ipa) {
 575		val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
 576		/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
 577		val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
 578		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
 
 
 
 
 
 579	}
 580
 581	/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
 582	if (!endpoint->toward_ipa)
 583		val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 584
 585	iowrite32(val, endpoint->ipa->reg_virt + offset);
 586}
 587
 588
 589static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
 590{
 591	u32 endpoint_id = endpoint->endpoint_id;
 
 
 592	u32 val = 0;
 593	u32 offset;
 594
 595	if (endpoint->toward_ipa)
 596		return;		/* Register not valid for TX endpoints */
 597
 598	offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
 
 599
 600	/* Note that HDR_ENDIANNESS indicates big endian header fields */
 601	if (endpoint->data->qmap)
 602		val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
 603
 604	iowrite32(val, endpoint->ipa->reg_virt + offset);
 605}
 606
 607static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
 608{
 609	u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
 
 
 610	u32 val;
 611
 612	if (!endpoint->toward_ipa)
 613		return;		/* Register not valid for RX endpoints */
 614
 615	if (endpoint->data->dma_mode) {
 616		enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
 617		u32 dma_endpoint_id;
 618
 619		dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
 620
 621		val = u32_encode_bits(IPA_DMA, MODE_FMASK);
 622		val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
 623	} else {
 624		val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
 625	}
 626	/* All other bits unspecified (and 0) */
 627
 628	iowrite32(val, endpoint->ipa->reg_virt + offset);
 
 629}
 630
 631/* Compute the aggregation size value to use for a given buffer size */
 632static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 633{
 634	/* We don't use "hard byte limit" aggregation, so we define the
 635	 * aggregation limit such that our buffer has enough space *after*
 636	 * that limit to receive a full MTU of data, plus overhead.
 637	 */
 638	rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 639
 640	return rx_buffer_size / SZ_1K;
 641}
 642
 643static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
 644{
 645	u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
 
 
 646	u32 val = 0;
 647
 648	if (endpoint->data->aggregation) {
 
 649		if (!endpoint->toward_ipa) {
 
 
 650			u32 limit;
 651
 652			val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
 653			val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
 
 
 
 
 
 
 654
 655			limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
 656			val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK);
 657
 658			limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
 659			limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
 660			val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK);
 661
 662			/* AGGR_PKT_LIMIT is 0 (unlimited) */
 663
 664			if (endpoint->data->rx.aggr_close_eof)
 665				val |= AGGR_SW_EOF_ACTIVE_FMASK;
 666			/* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
 667		} else {
 668			val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
 669					       AGGR_EN_FMASK);
 670			val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
 671			/* other fields ignored */
 672		}
 673		/* AGGR_FORCE_CLOSE is 0 */
 
 674	} else {
 675		val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
 676		/* other fields ignored */
 677	}
 678
 679	iowrite32(val, endpoint->ipa->reg_virt + offset);
 680}
 681
 682/* The head-of-line blocking timer is defined as a tick count, where each
 683 * tick represents 128 cycles of the IPA core clock.  Return the value
 684 * that should be written to that register that represents the timeout
 685 * period provided.
 
 
 
 686 */
 687static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
 
 688{
 689	u32 width;
 690	u32 scale;
 691	u64 ticks;
 692	u64 rate;
 693	u32 high;
 694	u32 val;
 695
 696	if (!microseconds)
 697		return 0;	/* Nothing to compute if timer period is 0 */
 698
 699	/* Use 64 bit arithmetic to avoid overflow... */
 700	rate = ipa_clock_rate(ipa);
 
 
 
 
 
 
 
 
 
 
 
 701	ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
 702	/* ...but we still need to fit into a 32-bit register */
 703	WARN_ON(ticks > U32_MAX);
 704
 705	/* IPA v3.5.1 just records the tick count */
 706	if (ipa->version == IPA_VERSION_3_5_1)
 707		return (u32)ticks;
 
 
 
 708
 709	/* For IPA v4.2, the tick count is represented by base and
 710	 * scale fields within the 32-bit timer register, where:
 711	 *     ticks = base << scale;
 712	 * The best precision is achieved when the base value is as
 713	 * large as possible.  Find the highest set bit in the tick
 714	 * count, and extract the number of bits in the base field
 715	 * such that that high bit is included.
 716	 */
 717	high = fls(ticks);		/* 1..32 */
 718	width = HWEIGHT32(BASE_VALUE_FMASK);
 719	scale = high > width ? high - width : 0;
 720	if (scale) {
 721		/* If we're scaling, round up to get a closer result */
 722		ticks += 1 << (scale - 1);
 723		/* High bit was set, so rounding might have affected it */
 724		if (fls(ticks) != high)
 725			scale++;
 726	}
 727
 728	val = u32_encode_bits(scale, SCALE_FMASK);
 729	val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
 730
 731	return val;
 732}
 733
 734/* If microseconds is 0, timeout is immediate */
 735static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
 736					      u32 microseconds)
 737{
 738	u32 endpoint_id = endpoint->endpoint_id;
 739	struct ipa *ipa = endpoint->ipa;
 740	u32 offset;
 741	u32 val;
 742
 743	offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
 744	val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
 745	iowrite32(val, ipa->reg_virt + offset);
 
 
 746}
 747
 748static void
 749ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
 750{
 751	u32 endpoint_id = endpoint->endpoint_id;
 
 
 752	u32 offset;
 753	u32 val;
 754
 755	val = enable ? HOL_BLOCK_EN_FMASK : 0;
 756	offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
 757	iowrite32(val, endpoint->ipa->reg_virt + offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 758}
 759
 760void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
 761{
 762	u32 i;
 763
 764	for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
 765		struct ipa_endpoint *endpoint = &ipa->endpoint[i];
 766
 767		if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
 768			continue;
 769
 770		ipa_endpoint_init_hol_block_timer(endpoint, 0);
 771		ipa_endpoint_init_hol_block_enable(endpoint, true);
 772	}
 773}
 774
 775static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
 776{
 777	u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
 
 
 778	u32 val = 0;
 779
 780	if (!endpoint->toward_ipa)
 781		return;		/* Register not valid for RX endpoints */
 782
 
 783	/* DEAGGR_HDR_LEN is 0 */
 784	/* PACKET_OFFSET_VALID is 0 */
 785	/* PACKET_OFFSET_LOCATION is ignored (not valid) */
 786	/* MAX_PACKET_LEN is 0 (not enforced) */
 787
 788	iowrite32(val, endpoint->ipa->reg_virt + offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 789}
 790
 791static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
 792{
 793	u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
 794	u32 seq_type = endpoint->seq_type;
 795	u32 val = 0;
 
 796
 797	if (!endpoint->toward_ipa)
 798		return;		/* Register not valid for RX endpoints */
 799
 800	/* Sequencer type is made up of four nibbles */
 801	val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
 802	val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
 803	/* The second two apply to replicated packets */
 804	val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK);
 805	val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK);
 
 
 
 806
 807	iowrite32(val, endpoint->ipa->reg_virt + offset);
 808}
 809
 810/**
 811 * ipa_endpoint_skb_tx() - Transmit a socket buffer
 812 * @endpoint:	Endpoint pointer
 813 * @skb:	Socket buffer to send
 814 *
 815 * Returns:	0 if successful, or a negative error code
 816 */
 817int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
 818{
 819	struct gsi_trans *trans;
 820	u32 nr_frags;
 821	int ret;
 822
 823	/* Make sure source endpoint's TLV FIFO has enough entries to
 824	 * hold the linear portion of the skb and all its fragments.
 825	 * If not, see if we can linearize it before giving up.
 826	 */
 827	nr_frags = skb_shinfo(skb)->nr_frags;
 828	if (1 + nr_frags > endpoint->trans_tre_max) {
 829		if (skb_linearize(skb))
 830			return -E2BIG;
 831		nr_frags = 0;
 832	}
 833
 834	trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
 835	if (!trans)
 836		return -EBUSY;
 837
 838	ret = gsi_trans_skb_add(trans, skb);
 839	if (ret)
 840		goto err_trans_free;
 841	trans->data = skb;	/* transaction owns skb now */
 842
 843	gsi_trans_commit(trans, !netdev_xmit_more());
 844
 845	return 0;
 846
 847err_trans_free:
 848	gsi_trans_free(trans);
 849
 850	return -ENOMEM;
 851}
 852
 853static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
 854{
 855	u32 endpoint_id = endpoint->endpoint_id;
 856	struct ipa *ipa = endpoint->ipa;
 
 857	u32 val = 0;
 858	u32 offset;
 859
 860	offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
 861
 862	if (endpoint->data->status_enable) {
 863		val |= STATUS_EN_FMASK;
 
 864		if (endpoint->toward_ipa) {
 865			enum ipa_endpoint_name name;
 866			u32 status_endpoint_id;
 867
 868			name = endpoint->data->tx.status_endpoint;
 869			status_endpoint_id = ipa->name_map[name]->endpoint_id;
 870
 871			val |= u32_encode_bits(status_endpoint_id,
 872					       STATUS_ENDP_FMASK);
 873		}
 874		/* STATUS_LOCATION is 0 (status element precedes packet) */
 875		/* The next field is present for IPA v4.0 and above */
 876		/* STATUS_PKT_SUPPRESS_FMASK is 0 */
 
 877	}
 878
 879	iowrite32(val, ipa->reg_virt + offset);
 880}
 881
 882static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
 
 883{
 884	struct gsi_trans *trans;
 885	bool doorbell = false;
 886	struct page *page;
 
 887	u32 offset;
 888	u32 len;
 889	int ret;
 890
 891	page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
 
 892	if (!page)
 893		return -ENOMEM;
 894
 895	trans = ipa_endpoint_trans_alloc(endpoint, 1);
 896	if (!trans)
 897		goto err_free_pages;
 898
 899	/* Offset the buffer to make space for skb headroom */
 900	offset = NET_SKB_PAD;
 901	len = IPA_RX_BUFFER_SIZE - offset;
 902
 903	ret = gsi_trans_page_add(trans, page, len, offset);
 904	if (ret)
 905		goto err_trans_free;
 906	trans->data = page;	/* transaction owns page now */
 
 907
 908	if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
 909		doorbell = true;
 910		endpoint->replenish_ready = 0;
 911	}
 912
 913	gsi_trans_commit(trans, doorbell);
 914
 915	return 0;
 916
 917err_trans_free:
 918	gsi_trans_free(trans);
 919err_free_pages:
 920	__free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
 921
 922	return -ENOMEM;
 923}
 924
 925/**
 926 * ipa_endpoint_replenish() - Replenish the Rx packets cache.
 927 * @endpoint:	Endpoint to be replenished
 928 * @count:	Number of buffers to send to hardware
 929 *
 930 * Allocate RX packet wrapper structures with maximal socket buffers
 931 * for an endpoint.  These are supplied to the hardware, which fills
 932 * them with incoming data.
 
 
 
 933 */
 934static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
 935{
 936	struct gsi *gsi;
 937	u32 backlog;
 
 
 938
 939	if (!endpoint->replenish_enabled) {
 940		if (count)
 941			atomic_add(count, &endpoint->replenish_saved);
 942		return;
 943	}
 944
 
 
 945
 946	while (atomic_dec_not_zero(&endpoint->replenish_backlog))
 947		if (ipa_endpoint_replenish_one(endpoint))
 948			goto try_again_later;
 949	if (count)
 950		atomic_add(count, &endpoint->replenish_backlog);
 
 
 
 
 
 
 951
 952	return;
 953
 954try_again_later:
 955	/* The last one didn't succeed, so fix the backlog */
 956	backlog = atomic_inc_return(&endpoint->replenish_backlog);
 957
 958	if (count)
 959		atomic_add(count, &endpoint->replenish_backlog);
 960
 961	/* Whenever a receive buffer transaction completes we'll try to
 962	 * replenish again.  It's unlikely, but if we fail to supply even
 963	 * one buffer, nothing will trigger another replenish attempt.
 964	 * Receive buffer transactions use one TRE, so schedule work to
 965	 * try replenishing again if our backlog is *all* available TREs.
 966	 */
 967	gsi = &endpoint->ipa->gsi;
 968	if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
 969		schedule_delayed_work(&endpoint->replenish_work,
 970				      msecs_to_jiffies(1));
 971}
 972
 973static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
 974{
 975	struct gsi *gsi = &endpoint->ipa->gsi;
 976	u32 max_backlog;
 977	u32 saved;
 978
 979	endpoint->replenish_enabled = true;
 980	while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
 981		atomic_add(saved, &endpoint->replenish_backlog);
 982
 983	/* Start replenishing if hardware currently has no buffers */
 984	max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
 985	if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
 986		ipa_endpoint_replenish(endpoint, 0);
 987}
 988
 989static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
 990{
 991	u32 backlog;
 992
 993	endpoint->replenish_enabled = false;
 994	while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
 995		atomic_add(backlog, &endpoint->replenish_saved);
 996}
 997
 998static void ipa_endpoint_replenish_work(struct work_struct *work)
 999{
1000	struct delayed_work *dwork = to_delayed_work(work);
1001	struct ipa_endpoint *endpoint;
1002
1003	endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1004
1005	ipa_endpoint_replenish(endpoint, 0);
1006}
1007
1008static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1009				  void *data, u32 len, u32 extra)
1010{
1011	struct sk_buff *skb;
1012
 
 
 
1013	skb = __dev_alloc_skb(len, GFP_ATOMIC);
1014	if (skb) {
 
1015		skb_put(skb, len);
1016		memcpy(skb->data, data, len);
1017		skb->truesize += extra;
1018	}
1019
1020	/* Now receive it, or drop it if there's no netdev */
1021	if (endpoint->netdev)
1022		ipa_modem_skb_rx(endpoint->netdev, skb);
1023	else if (skb)
1024		dev_kfree_skb_any(skb);
1025}
1026
1027static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1028				   struct page *page, u32 len)
1029{
 
1030	struct sk_buff *skb;
1031
1032	/* Nothing to do if there's no netdev */
1033	if (!endpoint->netdev)
1034		return false;
1035
1036	/* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
1037	skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
 
1038	if (skb) {
1039		/* Reserve the headroom and account for the data */
1040		skb_reserve(skb, NET_SKB_PAD);
1041		skb_put(skb, len);
1042	}
1043
1044	/* Receive the buffer (or record drop if unable to build it) */
1045	ipa_modem_skb_rx(endpoint->netdev, skb);
1046
1047	return skb != NULL;
1048}
1049
1050/* The format of a packet status element is the same for several status
1051 * types (opcodes).  The NEW_FRAG_RULE, LOG, DCMP (decompression) types
1052 * aren't currently supported
1053 */
1054static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1055{
1056	switch (opcode) {
1057	case IPA_STATUS_OPCODE_PACKET:
1058	case IPA_STATUS_OPCODE_DROPPED_PACKET:
1059	case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1060	case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1061		return true;
1062	default:
1063		return false;
1064	}
1065}
1066
1067static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1068				     const struct ipa_status *status)
1069{
 
 
1070	u32 endpoint_id;
1071
1072	if (!ipa_status_format_packet(status->opcode))
 
1073		return true;
1074	if (!status->pkt_len)
1075		return true;
1076	endpoint_id = u32_get_bits(status->endp_dst_idx,
1077				   IPA_STATUS_DST_IDX_FMASK);
1078	if (endpoint_id != endpoint->endpoint_id)
1079		return true;
1080
1081	return false;	/* Don't skip this packet, process it */
1082}
1083
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1084/* Return whether the status indicates the packet should be dropped */
1085static bool ipa_status_drop_packet(const struct ipa_status *status)
 
1086{
1087	u32 val;
 
 
 
 
 
 
1088
1089	/* Deaggregation exceptions we drop; others we consume */
1090	if (status->exception)
1091		return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
 
1092
1093	/* Drop the packet if it fails to match a routing rule; otherwise no */
1094	val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1095
1096	return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1097}
1098
1099static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1100				      struct page *page, u32 total_len)
1101{
 
1102	void *data = page_address(page) + NET_SKB_PAD;
1103	u32 unused = IPA_RX_BUFFER_SIZE - total_len;
 
 
1104	u32 resid = total_len;
1105
1106	while (resid) {
1107		const struct ipa_status *status = data;
1108		u32 align;
1109		u32 len;
1110
1111		if (resid < sizeof(*status)) {
1112			dev_err(&endpoint->ipa->pdev->dev,
1113				"short message (%u bytes < %zu byte status)\n",
1114				resid, sizeof(*status));
1115			break;
1116		}
1117
1118		/* Skip over status packets that lack packet data */
1119		if (ipa_endpoint_status_skip(endpoint, status)) {
1120			data += sizeof(*status);
1121			resid -= sizeof(*status);
 
1122			continue;
1123		}
1124
1125		/* Compute the amount of buffer space consumed by the
1126		 * packet, including the status element.  If the hardware
1127		 * is configured to pad packet data to an aligned boundary,
1128		 * account for that.  And if checksum offload is is enabled
1129		 * a trailer containing computed checksum information will
1130		 * be appended.
1131		 */
1132		align = endpoint->data->rx.pad_align ? : 1;
1133		len = le16_to_cpu(status->pkt_len);
1134		len = sizeof(*status) + ALIGN(len, align);
1135		if (endpoint->data->checksum)
1136			len += sizeof(struct rmnet_map_dl_csum_trailer);
1137
1138		/* Charge the new packet with a proportional fraction of
1139		 * the unused space in the original receive buffer.
1140		 * XXX Charge a proportion of the *whole* receive buffer?
1141		 */
1142		if (!ipa_status_drop_packet(status)) {
1143			u32 extra = unused * len / total_len;
1144			void *data2 = data + sizeof(*status);
1145			u32 len2 = le16_to_cpu(status->pkt_len);
1146
1147			/* Client receives only packet data (no status) */
1148			ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
 
 
 
 
 
 
 
 
1149		}
1150
1151		/* Consume status and the full packet it describes */
1152		data += len;
1153		resid -= len;
1154	}
1155}
1156
1157/* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
1158static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1159				     struct gsi_trans *trans)
1160{
1161}
1162
1163/* Complete transaction initiated in ipa_endpoint_replenish_one() */
1164static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1165				     struct gsi_trans *trans)
1166{
1167	struct page *page;
1168
1169	ipa_endpoint_replenish(endpoint, 1);
 
1170
1171	if (trans->cancelled)
1172		return;
1173
1174	/* Parse or build a socket buffer using the actual received length */
1175	page = trans->data;
1176	if (endpoint->data->status_enable)
1177		ipa_endpoint_status_parse(endpoint, page, trans->len);
1178	else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1179		trans->data = NULL;	/* Pages have been consumed */
1180}
1181
1182void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1183				 struct gsi_trans *trans)
1184{
1185	if (endpoint->toward_ipa)
1186		ipa_endpoint_tx_complete(endpoint, trans);
1187	else
1188		ipa_endpoint_rx_complete(endpoint, trans);
1189}
1190
1191void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1192				struct gsi_trans *trans)
1193{
1194	if (endpoint->toward_ipa) {
1195		struct ipa *ipa = endpoint->ipa;
1196
1197		/* Nothing to do for command transactions */
1198		if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1199			struct sk_buff *skb = trans->data;
1200
1201			if (skb)
1202				dev_kfree_skb_any(skb);
1203		}
1204	} else {
1205		struct page *page = trans->data;
1206
1207		if (page)
1208			__free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
1209	}
1210}
1211
1212void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1213{
 
1214	u32 val;
1215
 
1216	/* ROUTE_DIS is 0 */
1217	val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1218	val |= ROUTE_DEF_HDR_TABLE_FMASK;
1219	val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1220	val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1221	val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1222
1223	iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1224}
1225
1226void ipa_endpoint_default_route_clear(struct ipa *ipa)
1227{
1228	ipa_endpoint_default_route_set(ipa, 0);
1229}
1230
1231/**
1232 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1233 * @endpoint:	Endpoint to be reset
1234 *
1235 * If aggregation is active on an RX endpoint when a reset is performed
1236 * on its underlying GSI channel, a special sequence of actions must be
1237 * taken to ensure the IPA pipeline is properly cleared.
1238 *
1239 * Return:	0 if successful, or a negative error code
1240 */
1241static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1242{
1243	struct device *dev = &endpoint->ipa->pdev->dev;
1244	struct ipa *ipa = endpoint->ipa;
 
1245	struct gsi *gsi = &ipa->gsi;
1246	bool suspended = false;
1247	dma_addr_t addr;
1248	bool legacy;
1249	u32 retries;
1250	u32 len = 1;
1251	void *virt;
1252	int ret;
1253
1254	virt = kzalloc(len, GFP_KERNEL);
1255	if (!virt)
1256		return -ENOMEM;
1257
1258	addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1259	if (dma_mapping_error(dev, addr)) {
1260		ret = -ENOMEM;
1261		goto out_kfree;
1262	}
1263
1264	/* Force close aggregation before issuing the reset */
1265	ipa_endpoint_force_close(endpoint);
1266
1267	/* Reset and reconfigure the channel with the doorbell engine
1268	 * disabled.  Then poll until we know aggregation is no longer
1269	 * active.  We'll re-enable the doorbell (if appropriate) when
1270	 * we reset again below.
1271	 */
1272	gsi_channel_reset(gsi, endpoint->channel_id, false);
1273
1274	/* Make sure the channel isn't suspended */
1275	suspended = ipa_endpoint_program_suspend(endpoint, false);
1276
1277	/* Start channel and do a 1 byte read */
1278	ret = gsi_channel_start(gsi, endpoint->channel_id);
1279	if (ret)
1280		goto out_suspend_again;
1281
1282	ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1283	if (ret)
1284		goto err_endpoint_stop;
1285
1286	/* Wait for aggregation to be closed on the channel */
1287	retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1288	do {
1289		if (!ipa_endpoint_aggr_active(endpoint))
1290			break;
1291		msleep(1);
1292	} while (retries--);
1293
1294	/* Check one last time */
1295	if (ipa_endpoint_aggr_active(endpoint))
1296		dev_err(dev, "endpoint %u still active during reset\n",
1297			endpoint->endpoint_id);
1298
1299	gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1300
1301	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1302	if (ret)
1303		goto out_suspend_again;
1304
1305	/* Finally, reset and reconfigure the channel again (re-enabling the
1306	 * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1307	 * complete the channel reset sequence.  Finish by suspending the
1308	 * channel again (if necessary).
1309	 */
1310	legacy = ipa->version == IPA_VERSION_3_5_1;
1311	gsi_channel_reset(gsi, endpoint->channel_id, legacy);
1312
1313	msleep(1);
1314
1315	goto out_suspend_again;
1316
1317err_endpoint_stop:
1318	(void)gsi_channel_stop(gsi, endpoint->channel_id);
1319out_suspend_again:
1320	if (suspended)
1321		(void)ipa_endpoint_program_suspend(endpoint, true);
1322	dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1323out_kfree:
1324	kfree(virt);
1325
1326	return ret;
1327}
1328
1329static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1330{
1331	u32 channel_id = endpoint->channel_id;
1332	struct ipa *ipa = endpoint->ipa;
1333	bool special;
1334	bool legacy;
1335	int ret = 0;
1336
1337	/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1338	 * is active, we need to handle things specially to recover.
1339	 * All other cases just need to reset the underlying GSI channel.
1340	 *
1341	 * IPA v3.5.1 enables the doorbell engine.  Newer versions do not.
1342	 */
1343	legacy = ipa->version == IPA_VERSION_3_5_1;
1344	special = !endpoint->toward_ipa && endpoint->data->aggregation;
1345	if (special && ipa_endpoint_aggr_active(endpoint))
1346		ret = ipa_endpoint_reset_rx_aggr(endpoint);
1347	else
1348		gsi_channel_reset(&ipa->gsi, channel_id, legacy);
1349
1350	if (ret)
1351		dev_err(&ipa->pdev->dev,
1352			"error %d resetting channel %u for endpoint %u\n",
1353			ret, endpoint->channel_id, endpoint->endpoint_id);
1354}
1355
1356static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1357{
1358	if (endpoint->toward_ipa)
1359		ipa_endpoint_program_delay(endpoint, false);
1360	else
 
 
 
 
 
 
 
 
1361		(void)ipa_endpoint_program_suspend(endpoint, false);
 
1362	ipa_endpoint_init_cfg(endpoint);
 
1363	ipa_endpoint_init_hdr(endpoint);
1364	ipa_endpoint_init_hdr_ext(endpoint);
1365	ipa_endpoint_init_hdr_metadata_mask(endpoint);
1366	ipa_endpoint_init_mode(endpoint);
1367	ipa_endpoint_init_aggr(endpoint);
 
 
 
 
 
 
1368	ipa_endpoint_init_deaggr(endpoint);
 
1369	ipa_endpoint_init_seq(endpoint);
1370	ipa_endpoint_status(endpoint);
1371}
1372
1373int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1374{
 
1375	struct ipa *ipa = endpoint->ipa;
1376	struct gsi *gsi = &ipa->gsi;
1377	int ret;
1378
1379	ret = gsi_channel_start(gsi, endpoint->channel_id);
1380	if (ret) {
1381		dev_err(&ipa->pdev->dev,
1382			"error %d starting %cX channel %u for endpoint %u\n",
1383			ret, endpoint->toward_ipa ? 'T' : 'R',
1384			endpoint->channel_id, endpoint->endpoint_id);
1385		return ret;
1386	}
1387
1388	if (!endpoint->toward_ipa) {
1389		ipa_interrupt_suspend_enable(ipa->interrupt,
1390					     endpoint->endpoint_id);
1391		ipa_endpoint_replenish_enable(endpoint);
1392	}
1393
1394	ipa->enabled |= BIT(endpoint->endpoint_id);
1395
1396	return 0;
1397}
1398
1399void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1400{
1401	u32 mask = BIT(endpoint->endpoint_id);
1402	struct ipa *ipa = endpoint->ipa;
1403	struct gsi *gsi = &ipa->gsi;
1404	int ret;
1405
1406	if (!(ipa->enabled & mask))
1407		return;
1408
1409	ipa->enabled ^= mask;
1410
1411	if (!endpoint->toward_ipa) {
1412		ipa_endpoint_replenish_disable(endpoint);
1413		ipa_interrupt_suspend_disable(ipa->interrupt,
1414					      endpoint->endpoint_id);
1415	}
1416
1417	/* Note that if stop fails, the channel's state is not well-defined */
1418	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1419	if (ret)
1420		dev_err(&ipa->pdev->dev,
1421			"error %d attempting to stop endpoint %u\n", ret,
1422			endpoint->endpoint_id);
1423}
1424
1425void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1426{
1427	struct device *dev = &endpoint->ipa->pdev->dev;
1428	struct gsi *gsi = &endpoint->ipa->gsi;
1429	bool stop_channel;
1430	int ret;
1431
1432	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1433		return;
1434
1435	if (!endpoint->toward_ipa)
1436		ipa_endpoint_replenish_disable(endpoint);
1437
1438	if (!endpoint->toward_ipa)
1439		(void)ipa_endpoint_program_suspend(endpoint, true);
 
1440
1441	/* IPA v3.5.1 doesn't use channel stop for suspend */
1442	stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1443	ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
1444	if (ret)
1445		dev_err(dev, "error %d suspending channel %u\n", ret,
1446			endpoint->channel_id);
1447}
1448
1449void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1450{
1451	struct device *dev = &endpoint->ipa->pdev->dev;
1452	struct gsi *gsi = &endpoint->ipa->gsi;
1453	bool start_channel;
1454	int ret;
1455
1456	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1457		return;
1458
1459	if (!endpoint->toward_ipa)
1460		(void)ipa_endpoint_program_suspend(endpoint, false);
1461
1462	/* IPA v3.5.1 doesn't use channel start for resume */
1463	start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1464	ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
1465	if (ret)
1466		dev_err(dev, "error %d resuming channel %u\n", ret,
1467			endpoint->channel_id);
1468	else if (!endpoint->toward_ipa)
1469		ipa_endpoint_replenish_enable(endpoint);
1470}
1471
1472void ipa_endpoint_suspend(struct ipa *ipa)
1473{
 
 
 
1474	if (ipa->modem_netdev)
1475		ipa_modem_suspend(ipa->modem_netdev);
1476
1477	ipa_cmd_tag_process(ipa);
1478
1479	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1480	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1481}
1482
1483void ipa_endpoint_resume(struct ipa *ipa)
1484{
 
 
 
1485	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1486	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1487
1488	if (ipa->modem_netdev)
1489		ipa_modem_resume(ipa->modem_netdev);
1490}
1491
1492static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1493{
1494	struct gsi *gsi = &endpoint->ipa->gsi;
1495	u32 channel_id = endpoint->channel_id;
1496
1497	/* Only AP endpoints get set up */
1498	if (endpoint->ee_id != GSI_EE_AP)
1499		return;
1500
1501	endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1502	if (!endpoint->toward_ipa) {
1503		/* RX transactions require a single TRE, so the maximum
1504		 * backlog is the same as the maximum outstanding TREs.
1505		 */
1506		endpoint->replenish_enabled = false;
1507		atomic_set(&endpoint->replenish_saved,
1508			   gsi_channel_tre_max(gsi, endpoint->channel_id));
1509		atomic_set(&endpoint->replenish_backlog, 0);
1510		INIT_DELAYED_WORK(&endpoint->replenish_work,
1511				  ipa_endpoint_replenish_work);
1512	}
1513
1514	ipa_endpoint_program(endpoint);
1515
1516	endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1517}
1518
1519static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1520{
1521	endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1522
1523	if (!endpoint->toward_ipa)
1524		cancel_delayed_work_sync(&endpoint->replenish_work);
1525
1526	ipa_endpoint_reset(endpoint);
1527}
1528
1529void ipa_endpoint_setup(struct ipa *ipa)
1530{
1531	u32 initialized = ipa->initialized;
1532
1533	ipa->set_up = 0;
1534	while (initialized) {
1535		u32 endpoint_id = __ffs(initialized);
1536
1537		initialized ^= BIT(endpoint_id);
1538
 
1539		ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1540	}
1541}
1542
1543void ipa_endpoint_teardown(struct ipa *ipa)
1544{
1545	u32 set_up = ipa->set_up;
1546
1547	while (set_up) {
1548		u32 endpoint_id = __fls(set_up);
 
1549
1550		set_up ^= BIT(endpoint_id);
1551
1552		ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1553	}
1554	ipa->set_up = 0;
1555}
1556
1557int ipa_endpoint_config(struct ipa *ipa)
1558{
1559	struct device *dev = &ipa->pdev->dev;
1560	u32 initialized;
 
 
 
 
1561	u32 rx_base;
1562	u32 rx_mask;
1563	u32 tx_mask;
1564	int ret = 0;
1565	u32 max;
1566	u32 val;
1567
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1568	/* Find out about the endpoints supplied by the hardware, and ensure
1569	 * the highest one doesn't exceed the number we support.
1570	 */
1571	val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
 
1572
1573	/* Our RX is an IPA producer */
1574	rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK);
1575	max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK);
1576	if (max > IPA_ENDPOINT_MAX) {
1577		dev_err(dev, "too many endpoints (%u > %u)\n",
1578			max, IPA_ENDPOINT_MAX);
 
 
 
1579		return -EINVAL;
1580	}
1581	rx_mask = GENMASK(max - 1, rx_base);
1582
1583	/* Our TX is an IPA consumer */
1584	max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK);
1585	tx_mask = GENMASK(max - 1, 0);
 
 
 
 
1586
1587	ipa->available = rx_mask | tx_mask;
 
 
 
 
1588
1589	/* Check for initialized endpoints not supported by the hardware */
1590	if (ipa->initialized & ~ipa->available) {
1591		dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1592			ipa->initialized & ~ipa->available);
1593		ret = -EINVAL;		/* Report other errors too */
1594	}
1595
1596	initialized = ipa->initialized;
1597	while (initialized) {
1598		u32 endpoint_id = __ffs(initialized);
1599		struct ipa_endpoint *endpoint;
1600
1601		initialized ^= BIT(endpoint_id);
 
 
 
 
 
 
 
 
 
 
1602
1603		/* Make sure it's pointing in the right direction */
1604		endpoint = &ipa->endpoint[endpoint_id];
1605		if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
1606			dev_err(dev, "endpoint id %u wrong direction\n",
1607				endpoint_id);
1608			ret = -EINVAL;
 
1609		}
 
 
 
1610	}
1611
1612	return ret;
1613}
 
 
1614
1615void ipa_endpoint_deconfig(struct ipa *ipa)
1616{
1617	ipa->available = 0;	/* Nothing more to do */
1618}
1619
1620static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1621				  const struct ipa_gsi_endpoint_data *data)
1622{
1623	struct ipa_endpoint *endpoint;
1624
1625	endpoint = &ipa->endpoint[data->endpoint_id];
1626
1627	if (data->ee_id == GSI_EE_AP)
1628		ipa->channel_map[data->channel_id] = endpoint;
1629	ipa->name_map[name] = endpoint;
1630
1631	endpoint->ipa = ipa;
1632	endpoint->ee_id = data->ee_id;
1633	endpoint->seq_type = data->endpoint.seq_type;
1634	endpoint->channel_id = data->channel_id;
1635	endpoint->endpoint_id = data->endpoint_id;
1636	endpoint->toward_ipa = data->toward_ipa;
1637	endpoint->data = &data->endpoint.config;
1638
1639	ipa->initialized |= BIT(endpoint->endpoint_id);
1640}
1641
1642void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1643{
1644	endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1645
1646	memset(endpoint, 0, sizeof(*endpoint));
1647}
1648
1649void ipa_endpoint_exit(struct ipa *ipa)
1650{
1651	u32 initialized = ipa->initialized;
1652
1653	while (initialized) {
1654		u32 endpoint_id = __fls(initialized);
1655
1656		initialized ^= BIT(endpoint_id);
 
 
 
 
 
 
 
 
1657
1658		ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1659	}
1660	memset(ipa->name_map, 0, sizeof(ipa->name_map));
1661	memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1662}
1663
1664/* Returns a bitmask of endpoints that support filtering, or 0 on error */
1665u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1666		      const struct ipa_gsi_endpoint_data *data)
1667{
1668	enum ipa_endpoint_name name;
1669	u32 filter_map;
 
 
 
 
 
 
 
1670
1671	if (!ipa_endpoint_data_valid(ipa, count, data))
1672		return 0;	/* Error */
 
 
1673
1674	ipa->initialized = 0;
 
 
 
 
 
 
1675
1676	filter_map = 0;
1677	for (name = 0; name < count; name++, data++) {
1678		if (ipa_gsi_endpoint_data_empty(data))
1679			continue;	/* Skip over empty slots */
1680
1681		ipa_endpoint_init_one(ipa, name, data);
1682
1683		if (data->endpoint.filter_support)
1684			filter_map |= BIT(data->endpoint_id);
 
 
1685	}
1686
1687	if (!ipa_filter_map_valid(ipa, filter_map))
1688		goto err_endpoint_exit;
 
1689
1690	return filter_map;	/* Non-zero bitmask */
 
 
 
1691
1692err_endpoint_exit:
1693	ipa_endpoint_exit(ipa);
1694
1695	return 0;	/* Error */
 
 
 
 
 
 
 
1696}