Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2019-2021 Linaro Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <linux/device.h>
   9#include <linux/slab.h>
  10#include <linux/bitfield.h>
  11#include <linux/if_rmnet.h>
  12#include <linux/dma-direction.h>
  13
  14#include "gsi.h"
  15#include "gsi_trans.h"
  16#include "ipa.h"
  17#include "ipa_data.h"
  18#include "ipa_endpoint.h"
  19#include "ipa_cmd.h"
  20#include "ipa_mem.h"
  21#include "ipa_modem.h"
  22#include "ipa_table.h"
  23#include "ipa_gsi.h"
  24#include "ipa_clock.h"
  25
  26#define atomic_dec_not_zero(v)	atomic_add_unless((v), -1, 0)
  27
  28#define IPA_REPLENISH_BATCH	16
  29
  30/* RX buffer is 1 page (or a power-of-2 contiguous pages) */
  31#define IPA_RX_BUFFER_SIZE	8192	/* PAGE_SIZE > 4096 wastes a LOT */
  32
  33/* The amount of RX buffer space consumed by standard skb overhead */
  34#define IPA_RX_BUFFER_OVERHEAD	(PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
  35
  36/* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
  37#define IPA_ENDPOINT_QMAP_METADATA_MASK		0x000000ff /* host byte order */
  38
  39#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX	3
  40#define IPA_AGGR_TIME_LIMIT			500	/* microseconds */
  41
  42/** enum ipa_status_opcode - status element opcode hardware values */
  43enum ipa_status_opcode {
  44	IPA_STATUS_OPCODE_PACKET		= 0x01,
  45	IPA_STATUS_OPCODE_DROPPED_PACKET	= 0x04,
  46	IPA_STATUS_OPCODE_SUSPENDED_PACKET	= 0x08,
  47	IPA_STATUS_OPCODE_PACKET_2ND_PASS	= 0x40,
  48};
  49
  50/** enum ipa_status_exception - status element exception type */
  51enum ipa_status_exception {
  52	/* 0 means no exception */
  53	IPA_STATUS_EXCEPTION_DEAGGR		= 0x01,
  54};
  55
  56/* Status element provided by hardware */
  57struct ipa_status {
  58	u8 opcode;		/* enum ipa_status_opcode */
  59	u8 exception;		/* enum ipa_status_exception */
  60	__le16 mask;
  61	__le16 pkt_len;
  62	u8 endp_src_idx;
  63	u8 endp_dst_idx;
  64	__le32 metadata;
  65	__le32 flags1;
  66	__le64 flags2;
  67	__le32 flags3;
  68	__le32 flags4;
  69};
  70
  71/* Field masks for struct ipa_status structure fields */
  72#define IPA_STATUS_MASK_TAG_VALID_FMASK		GENMASK(4, 4)
  73#define IPA_STATUS_SRC_IDX_FMASK		GENMASK(4, 0)
  74#define IPA_STATUS_DST_IDX_FMASK		GENMASK(4, 0)
  75#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK	GENMASK(31, 22)
  76#define IPA_STATUS_FLAGS2_TAG_FMASK		GENMASK_ULL(63, 16)
  77
  78static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
  79			    const struct ipa_gsi_endpoint_data *all_data,
  80			    const struct ipa_gsi_endpoint_data *data)
  81{
  82	const struct ipa_gsi_endpoint_data *other_data;
  83	struct device *dev = &ipa->pdev->dev;
  84	enum ipa_endpoint_name other_name;
  85
  86	if (ipa_gsi_endpoint_data_empty(data))
  87		return true;
  88
  89	if (!data->toward_ipa) {
  90		if (data->endpoint.filter_support) {
  91			dev_err(dev, "filtering not supported for "
  92					"RX endpoint %u\n",
  93				data->endpoint_id);
  94			return false;
  95		}
  96
  97		return true;	/* Nothing more to check for RX */
  98	}
  99
 100	if (data->endpoint.config.status_enable) {
 101		other_name = data->endpoint.config.tx.status_endpoint;
 102		if (other_name >= count) {
 103			dev_err(dev, "status endpoint name %u out of range "
 104					"for endpoint %u\n",
 105				other_name, data->endpoint_id);
 106			return false;
 107		}
 108
 109		/* Status endpoint must be defined... */
 110		other_data = &all_data[other_name];
 111		if (ipa_gsi_endpoint_data_empty(other_data)) {
 112			dev_err(dev, "DMA endpoint name %u undefined "
 113					"for endpoint %u\n",
 114				other_name, data->endpoint_id);
 115			return false;
 116		}
 117
 118		/* ...and has to be an RX endpoint... */
 119		if (other_data->toward_ipa) {
 120			dev_err(dev,
 121				"status endpoint for endpoint %u not RX\n",
 122				data->endpoint_id);
 123			return false;
 124		}
 125
 126		/* ...and if it's to be an AP endpoint... */
 127		if (other_data->ee_id == GSI_EE_AP) {
 128			/* ...make sure it has status enabled. */
 129			if (!other_data->endpoint.config.status_enable) {
 130				dev_err(dev,
 131					"status not enabled for endpoint %u\n",
 132					other_data->endpoint_id);
 133				return false;
 134			}
 135		}
 136	}
 137
 138	if (data->endpoint.config.dma_mode) {
 139		other_name = data->endpoint.config.dma_endpoint;
 140		if (other_name >= count) {
 141			dev_err(dev, "DMA endpoint name %u out of range "
 142					"for endpoint %u\n",
 143				other_name, data->endpoint_id);
 144			return false;
 145		}
 146
 147		other_data = &all_data[other_name];
 148		if (ipa_gsi_endpoint_data_empty(other_data)) {
 149			dev_err(dev, "DMA endpoint name %u undefined "
 150					"for endpoint %u\n",
 151				other_name, data->endpoint_id);
 152			return false;
 153		}
 154	}
 155
 156	return true;
 157}
 158
 159static u32 aggr_byte_limit_max(enum ipa_version version)
 160{
 161	if (version < IPA_VERSION_4_5)
 162		return field_max(aggr_byte_limit_fmask(true));
 163
 164	return field_max(aggr_byte_limit_fmask(false));
 165}
 166
 167static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
 168				    const struct ipa_gsi_endpoint_data *data)
 169{
 170	const struct ipa_gsi_endpoint_data *dp = data;
 171	struct device *dev = &ipa->pdev->dev;
 172	enum ipa_endpoint_name name;
 173	u32 limit;
 174
 175	if (count > IPA_ENDPOINT_COUNT) {
 176		dev_err(dev, "too many endpoints specified (%u > %u)\n",
 177			count, IPA_ENDPOINT_COUNT);
 178		return false;
 179	}
 180
 181	/* The aggregation byte limit defines the point at which an
 182	 * aggregation window will close.  It is programmed into the
 183	 * IPA hardware as a number of KB.  We don't use "hard byte
 184	 * limit" aggregation, which means that we need to supply
 185	 * enough space in a receive buffer to hold a complete MTU
 186	 * plus normal skb overhead *after* that aggregation byte
 187	 * limit has been crossed.
 188	 *
 189	 * This check ensures we don't define a receive buffer size
 190	 * that would exceed what we can represent in the field that
 191	 * is used to program its size.
 192	 */
 193	limit = aggr_byte_limit_max(ipa->version) * SZ_1K;
 194	limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
 195	if (limit < IPA_RX_BUFFER_SIZE) {
 196		dev_err(dev, "buffer size too big for aggregation (%u > %u)\n",
 197			IPA_RX_BUFFER_SIZE, limit);
 198		return false;
 199	}
 200
 201	/* Make sure needed endpoints have defined data */
 202	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
 203		dev_err(dev, "command TX endpoint not defined\n");
 204		return false;
 205	}
 206	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
 207		dev_err(dev, "LAN RX endpoint not defined\n");
 208		return false;
 209	}
 210	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
 211		dev_err(dev, "AP->modem TX endpoint not defined\n");
 212		return false;
 213	}
 214	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
 215		dev_err(dev, "AP<-modem RX endpoint not defined\n");
 216		return false;
 217	}
 218
 219	for (name = 0; name < count; name++, dp++)
 220		if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
 221			return false;
 222
 223	return true;
 224}
 225
 226/* Allocate a transaction to use on a non-command endpoint */
 227static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
 228						  u32 tre_count)
 229{
 230	struct gsi *gsi = &endpoint->ipa->gsi;
 231	u32 channel_id = endpoint->channel_id;
 232	enum dma_data_direction direction;
 233
 234	direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 235
 236	return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
 237}
 238
 239/* suspend_delay represents suspend for RX, delay for TX endpoints.
 240 * Note that suspend is not supported starting with IPA v4.0.
 241 */
 242static bool
 243ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
 244{
 245	u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
 246	struct ipa *ipa = endpoint->ipa;
 247	bool state;
 248	u32 mask;
 249	u32 val;
 250
 251	/* Suspend is not supported for IPA v4.0+.  Delay doesn't work
 252	 * correctly on IPA v4.2.
 253	 *
 254	 * if (endpoint->toward_ipa)
 255	 * 	assert(ipa->version != IPA_VERSION_4.2);
 256	 * else
 257	 *	assert(ipa->version < IPA_VERSION_4_0);
 258	 */
 259	mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
 260
 261	val = ioread32(ipa->reg_virt + offset);
 262	/* Don't bother if it's already in the requested state */
 263	state = !!(val & mask);
 264	if (suspend_delay != state) {
 265		val ^= mask;
 266		iowrite32(val, ipa->reg_virt + offset);
 267	}
 268
 269	return state;
 270}
 271
 272/* We currently don't care what the previous state was for delay mode */
 273static void
 274ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
 275{
 276	/* assert(endpoint->toward_ipa); */
 277
 278	/* Delay mode doesn't work properly for IPA v4.2 */
 279	if (endpoint->ipa->version != IPA_VERSION_4_2)
 280		(void)ipa_endpoint_init_ctrl(endpoint, enable);
 281}
 282
 283static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
 284{
 285	u32 mask = BIT(endpoint->endpoint_id);
 286	struct ipa *ipa = endpoint->ipa;
 287	u32 offset;
 288	u32 val;
 289
 290	/* assert(mask & ipa->available); */
 291	offset = ipa_reg_state_aggr_active_offset(ipa->version);
 292	val = ioread32(ipa->reg_virt + offset);
 293
 294	return !!(val & mask);
 295}
 296
 297static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
 298{
 299	u32 mask = BIT(endpoint->endpoint_id);
 300	struct ipa *ipa = endpoint->ipa;
 301
 302	/* assert(mask & ipa->available); */
 303	iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
 304}
 305
 306/**
 307 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
 308 * @endpoint:	Endpoint on which to emulate a suspend
 309 *
 310 *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
 311 *  with an open aggregation frame.  This is to work around a hardware
 312 *  issue in IPA version 3.5.1 where the suspend interrupt will not be
 313 *  generated when it should be.
 314 */
 315static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
 316{
 317	struct ipa *ipa = endpoint->ipa;
 318
 319	if (!endpoint->data->aggregation)
 320		return;
 321
 322	/* Nothing to do if the endpoint doesn't have aggregation open */
 323	if (!ipa_endpoint_aggr_active(endpoint))
 324		return;
 325
 326	/* Force close aggregation */
 327	ipa_endpoint_force_close(endpoint);
 328
 329	ipa_interrupt_simulate_suspend(ipa->interrupt);
 330}
 331
 332/* Returns previous suspend state (true means suspend was enabled) */
 333static bool
 334ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
 335{
 336	bool suspended;
 337
 338	if (endpoint->ipa->version >= IPA_VERSION_4_0)
 339		return enable;	/* For IPA v4.0+, no change made */
 340
 341	/* assert(!endpoint->toward_ipa); */
 342
 343	suspended = ipa_endpoint_init_ctrl(endpoint, enable);
 344
 345	/* A client suspended with an open aggregation frame will not
 346	 * generate a SUSPEND IPA interrupt.  If enabling suspend, have
 347	 * ipa_endpoint_suspend_aggr() handle this.
 348	 */
 349	if (enable && !suspended)
 350		ipa_endpoint_suspend_aggr(endpoint);
 351
 352	return suspended;
 353}
 354
 355/* Enable or disable delay or suspend mode on all modem endpoints */
 356void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
 357{
 358	u32 endpoint_id;
 359
 360	/* DELAY mode doesn't work correctly on IPA v4.2 */
 361	if (ipa->version == IPA_VERSION_4_2)
 362		return;
 363
 364	for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
 365		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
 366
 367		if (endpoint->ee_id != GSI_EE_MODEM)
 368			continue;
 369
 370		/* Set TX delay mode or RX suspend mode */
 371		if (endpoint->toward_ipa)
 372			ipa_endpoint_program_delay(endpoint, enable);
 373		else
 374			(void)ipa_endpoint_program_suspend(endpoint, enable);
 375	}
 376}
 377
 378/* Reset all modem endpoints to use the default exception endpoint */
 379int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
 380{
 381	u32 initialized = ipa->initialized;
 382	struct gsi_trans *trans;
 383	u32 count;
 384
 385	/* We need one command per modem TX endpoint.  We can get an upper
 386	 * bound on that by assuming all initialized endpoints are modem->IPA.
 387	 * That won't happen, and we could be more precise, but this is fine
 388	 * for now.  End the transaction with commands to clear the pipeline.
 389	 */
 390	count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
 391	trans = ipa_cmd_trans_alloc(ipa, count);
 392	if (!trans) {
 393		dev_err(&ipa->pdev->dev,
 394			"no transaction to reset modem exception endpoints\n");
 395		return -EBUSY;
 396	}
 397
 398	while (initialized) {
 399		u32 endpoint_id = __ffs(initialized);
 400		struct ipa_endpoint *endpoint;
 401		u32 offset;
 402
 403		initialized ^= BIT(endpoint_id);
 404
 405		/* We only reset modem TX endpoints */
 406		endpoint = &ipa->endpoint[endpoint_id];
 407		if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
 408			continue;
 409
 410		offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
 411
 412		/* Value written is 0, and all bits are updated.  That
 413		 * means status is disabled on the endpoint, and as a
 414		 * result all other fields in the register are ignored.
 415		 */
 416		ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
 417	}
 418
 419	ipa_cmd_pipeline_clear_add(trans);
 420
 421	/* XXX This should have a 1 second timeout */
 422	gsi_trans_commit_wait(trans);
 423
 424	ipa_cmd_pipeline_clear_wait(ipa);
 425
 426	return 0;
 427}
 428
 429static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
 430{
 431	u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
 432	enum ipa_cs_offload_en enabled;
 433	u32 val = 0;
 434
 435	/* FRAG_OFFLOAD_EN is 0 */
 436	if (endpoint->data->checksum) {
 437		enum ipa_version version = endpoint->ipa->version;
 438
 439		if (endpoint->toward_ipa) {
 440			u32 checksum_offset;
 441
 442			/* Checksum header offset is in 4-byte units */
 443			checksum_offset = sizeof(struct rmnet_map_header);
 444			checksum_offset /= sizeof(u32);
 445			val |= u32_encode_bits(checksum_offset,
 446					       CS_METADATA_HDR_OFFSET_FMASK);
 447
 448			enabled = version < IPA_VERSION_4_5
 449					? IPA_CS_OFFLOAD_UL
 450					: IPA_CS_OFFLOAD_INLINE;
 451		} else {
 452			enabled = version < IPA_VERSION_4_5
 453					? IPA_CS_OFFLOAD_DL
 454					: IPA_CS_OFFLOAD_INLINE;
 455		}
 456	} else {
 457		enabled = IPA_CS_OFFLOAD_NONE;
 458	}
 459	val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
 460	/* CS_GEN_QMB_MASTER_SEL is 0 */
 461
 462	iowrite32(val, endpoint->ipa->reg_virt + offset);
 463}
 464
 465static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
 466{
 467	u32 offset;
 468	u32 val;
 469
 470	if (!endpoint->toward_ipa)
 471		return;
 472
 473	offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
 474	val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
 475
 476	iowrite32(val, endpoint->ipa->reg_virt + offset);
 477}
 478
 479static u32
 480ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
 481{
 482	u32 header_size = sizeof(struct rmnet_map_header);
 483
 484	/* Without checksum offload, we just have the MAP header */
 485	if (!endpoint->data->checksum)
 486		return header_size;
 487
 488	if (version < IPA_VERSION_4_5) {
 489		/* Checksum header inserted for AP TX endpoints only */
 490		if (endpoint->toward_ipa)
 491			header_size += sizeof(struct rmnet_map_ul_csum_header);
 492	} else {
 493		/* Checksum header is used in both directions */
 494		header_size += sizeof(struct rmnet_map_v5_csum_header);
 495	}
 496
 497	return header_size;
 498}
 499
 500/**
 501 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
 502 * @endpoint:	Endpoint pointer
 503 *
 504 * We program QMAP endpoints so each packet received is preceded by a QMAP
 505 * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
 506 * packet size field, and we have the IPA hardware populate both for each
 507 * received packet.  The header is configured (in the HDR_EXT register)
 508 * to use big endian format.
 509 *
 510 * The packet size is written into the QMAP header's pkt_len field.  That
 511 * location is defined here using the HDR_OFST_PKT_SIZE field.
 512 *
 513 * The mux_id comes from a 4-byte metadata value supplied with each packet
 514 * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
 515 * value that we want, in its low-order byte.  A bitmask defined in the
 516 * endpoint's METADATA_MASK register defines which byte within the modem
 517 * metadata contains the mux_id.  And the OFST_METADATA field programmed
 518 * here indicates where the extracted byte should be placed within the QMAP
 519 * header.
 520 */
 521static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
 522{
 523	u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
 524	struct ipa *ipa = endpoint->ipa;
 525	u32 val = 0;
 526
 527	if (endpoint->data->qmap) {
 528		enum ipa_version version = ipa->version;
 529		size_t header_size;
 530
 531		header_size = ipa_qmap_header_size(version, endpoint);
 532		val = ipa_header_size_encoded(version, header_size);
 533
 534		/* Define how to fill fields in a received QMAP header */
 535		if (!endpoint->toward_ipa) {
 536			u32 offset;	/* Field offset within header */
 537
 538			/* Where IPA will write the metadata value */
 539			offset = offsetof(struct rmnet_map_header, mux_id);
 540			val |= ipa_metadata_offset_encoded(version, offset);
 541
 542			/* Where IPA will write the length */
 543			offset = offsetof(struct rmnet_map_header, pkt_len);
 544			/* Upper bits are stored in HDR_EXT with IPA v4.5 */
 545			if (version >= IPA_VERSION_4_5)
 546				offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
 547
 548			val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
 549			val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
 550		}
 551		/* For QMAP TX, metadata offset is 0 (modem assumes this) */
 552		val |= HDR_OFST_METADATA_VALID_FMASK;
 553
 554		/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
 555		/* HDR_A5_MUX is 0 */
 556		/* HDR_LEN_INC_DEAGG_HDR is 0 */
 557		/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
 558	}
 559
 560	iowrite32(val, ipa->reg_virt + offset);
 561}
 562
 563static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
 564{
 565	u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
 566	u32 pad_align = endpoint->data->rx.pad_align;
 567	struct ipa *ipa = endpoint->ipa;
 568	u32 val = 0;
 569
 570	val |= HDR_ENDIANNESS_FMASK;		/* big endian */
 571
 572	/* A QMAP header contains a 6 bit pad field at offset 0.  The RMNet
 573	 * driver assumes this field is meaningful in packets it receives,
 574	 * and assumes the header's payload length includes that padding.
 575	 * The RMNet driver does *not* pad packets it sends, however, so
 576	 * the pad field (although 0) should be ignored.
 577	 */
 578	if (endpoint->data->qmap && !endpoint->toward_ipa) {
 579		val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
 580		/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
 581		val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
 582		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
 583	}
 584
 585	/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
 586	if (!endpoint->toward_ipa)
 587		val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
 588
 589	/* IPA v4.5 adds some most-significant bits to a few fields,
 590	 * two of which are defined in the HDR (not HDR_EXT) register.
 591	 */
 592	if (ipa->version >= IPA_VERSION_4_5) {
 593		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
 594		if (endpoint->data->qmap && !endpoint->toward_ipa) {
 595			u32 offset;
 596
 597			offset = offsetof(struct rmnet_map_header, pkt_len);
 598			offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
 599			val |= u32_encode_bits(offset,
 600					       HDR_OFST_PKT_SIZE_MSB_FMASK);
 601			/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
 602		}
 603	}
 604	iowrite32(val, ipa->reg_virt + offset);
 605}
 606
 607static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
 608{
 609	u32 endpoint_id = endpoint->endpoint_id;
 610	u32 val = 0;
 611	u32 offset;
 612
 613	if (endpoint->toward_ipa)
 614		return;		/* Register not valid for TX endpoints */
 615
 616	offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
 617
 618	/* Note that HDR_ENDIANNESS indicates big endian header fields */
 619	if (endpoint->data->qmap)
 620		val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
 621
 622	iowrite32(val, endpoint->ipa->reg_virt + offset);
 623}
 624
 625static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
 626{
 627	u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
 628	u32 val;
 629
 630	if (!endpoint->toward_ipa)
 631		return;		/* Register not valid for RX endpoints */
 632
 633	if (endpoint->data->dma_mode) {
 634		enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
 635		u32 dma_endpoint_id;
 636
 637		dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
 638
 639		val = u32_encode_bits(IPA_DMA, MODE_FMASK);
 640		val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
 641	} else {
 642		val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
 643	}
 644	/* All other bits unspecified (and 0) */
 645
 646	iowrite32(val, endpoint->ipa->reg_virt + offset);
 647}
 648
 649/* Compute the aggregation size value to use for a given buffer size */
 650static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
 651{
 652	/* We don't use "hard byte limit" aggregation, so we define the
 653	 * aggregation limit such that our buffer has enough space *after*
 654	 * that limit to receive a full MTU of data, plus overhead.
 655	 */
 656	rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
 657
 658	return rx_buffer_size / SZ_1K;
 659}
 660
 661/* Encoded values for AGGR endpoint register fields */
 662static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
 663{
 664	if (version < IPA_VERSION_4_5)
 665		return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
 666
 667	return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
 668}
 669
 670/* Encode the aggregation timer limit (microseconds) based on IPA version */
 671static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
 672{
 673	u32 gran_sel;
 674	u32 fmask;
 675	u32 val;
 676
 677	if (version < IPA_VERSION_4_5) {
 678		/* We set aggregation granularity in ipa_hardware_config() */
 679		limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
 680
 681		return u32_encode_bits(limit, aggr_time_limit_fmask(true));
 682	}
 683
 684	/* IPA v4.5 expresses the time limit using Qtime.  The AP has
 685	 * pulse generators 0 and 1 available, which were configured
 686	 * in ipa_qtime_config() to have granularity 100 usec and
 687	 * 1 msec, respectively.  Use pulse generator 0 if possible,
 688	 * otherwise fall back to pulse generator 1.
 689	 */
 690	fmask = aggr_time_limit_fmask(false);
 691	val = DIV_ROUND_CLOSEST(limit, 100);
 692	if (val > field_max(fmask)) {
 693		/* Have to use pulse generator 1 (millisecond granularity) */
 694		gran_sel = AGGR_GRAN_SEL_FMASK;
 695		val = DIV_ROUND_CLOSEST(limit, 1000);
 696	} else {
 697		/* We can use pulse generator 0 (100 usec granularity) */
 698		gran_sel = 0;
 699	}
 700
 701	return gran_sel | u32_encode_bits(val, fmask);
 702}
 703
 704static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
 705{
 706	u32 val = enabled ? 1 : 0;
 707
 708	if (version < IPA_VERSION_4_5)
 709		return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
 710
 711	return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
 712}
 713
 714static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
 715{
 716	u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
 717	enum ipa_version version = endpoint->ipa->version;
 718	u32 val = 0;
 719
 720	if (endpoint->data->aggregation) {
 721		if (!endpoint->toward_ipa) {
 722			bool close_eof;
 723			u32 limit;
 724
 725			val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
 726			val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
 727
 728			limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
 729			val |= aggr_byte_limit_encoded(version, limit);
 730
 731			limit = IPA_AGGR_TIME_LIMIT;
 732			val |= aggr_time_limit_encoded(version, limit);
 733
 734			/* AGGR_PKT_LIMIT is 0 (unlimited) */
 735
 736			close_eof = endpoint->data->rx.aggr_close_eof;
 737			val |= aggr_sw_eof_active_encoded(version, close_eof);
 738
 739			/* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
 740		} else {
 741			val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
 742					       AGGR_EN_FMASK);
 743			val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
 744			/* other fields ignored */
 745		}
 746		/* AGGR_FORCE_CLOSE is 0 */
 747		/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
 748	} else {
 749		val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
 750		/* other fields ignored */
 751	}
 752
 753	iowrite32(val, endpoint->ipa->reg_virt + offset);
 754}
 755
 756/* Return the Qtime-based head-of-line blocking timer value that
 757 * represents the given number of microseconds.  The result
 758 * includes both the timer value and the selected timer granularity.
 759 */
 760static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
 761{
 762	u32 gran_sel;
 763	u32 val;
 764
 765	/* IPA v4.5 expresses time limits using Qtime.  The AP has
 766	 * pulse generators 0 and 1 available, which were configured
 767	 * in ipa_qtime_config() to have granularity 100 usec and
 768	 * 1 msec, respectively.  Use pulse generator 0 if possible,
 769	 * otherwise fall back to pulse generator 1.
 770	 */
 771	val = DIV_ROUND_CLOSEST(microseconds, 100);
 772	if (val > field_max(TIME_LIMIT_FMASK)) {
 773		/* Have to use pulse generator 1 (millisecond granularity) */
 774		gran_sel = GRAN_SEL_FMASK;
 775		val = DIV_ROUND_CLOSEST(microseconds, 1000);
 776	} else {
 777		/* We can use pulse generator 0 (100 usec granularity) */
 778		gran_sel = 0;
 779	}
 780
 781	return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
 782}
 783
 784/* The head-of-line blocking timer is defined as a tick count.  For
 785 * IPA version 4.5 the tick count is based on the Qtimer, which is
 786 * derived from the 19.2 MHz SoC XO clock.  For older IPA versions
 787 * each tick represents 128 cycles of the IPA core clock.
 788 *
 789 * Return the encoded value that should be written to that register
 790 * that represents the timeout period provided.  For IPA v4.2 this
 791 * encodes a base and scale value, while for earlier versions the
 792 * value is a simple tick count.
 793 */
 794static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
 795{
 796	u32 width;
 797	u32 scale;
 798	u64 ticks;
 799	u64 rate;
 800	u32 high;
 801	u32 val;
 802
 803	if (!microseconds)
 804		return 0;	/* Nothing to compute if timer period is 0 */
 805
 806	if (ipa->version >= IPA_VERSION_4_5)
 807		return hol_block_timer_qtime_val(ipa, microseconds);
 808
 809	/* Use 64 bit arithmetic to avoid overflow... */
 810	rate = ipa_clock_rate(ipa);
 811	ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
 812	/* ...but we still need to fit into a 32-bit register */
 813	WARN_ON(ticks > U32_MAX);
 814
 815	/* IPA v3.5.1 through v4.1 just record the tick count */
 816	if (ipa->version < IPA_VERSION_4_2)
 817		return (u32)ticks;
 818
 819	/* For IPA v4.2, the tick count is represented by base and
 820	 * scale fields within the 32-bit timer register, where:
 821	 *     ticks = base << scale;
 822	 * The best precision is achieved when the base value is as
 823	 * large as possible.  Find the highest set bit in the tick
 824	 * count, and extract the number of bits in the base field
 825	 * such that high bit is included.
 826	 */
 827	high = fls(ticks);		/* 1..32 */
 828	width = HWEIGHT32(BASE_VALUE_FMASK);
 829	scale = high > width ? high - width : 0;
 830	if (scale) {
 831		/* If we're scaling, round up to get a closer result */
 832		ticks += 1 << (scale - 1);
 833		/* High bit was set, so rounding might have affected it */
 834		if (fls(ticks) != high)
 835			scale++;
 836	}
 837
 838	val = u32_encode_bits(scale, SCALE_FMASK);
 839	val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
 840
 841	return val;
 842}
 843
 844/* If microseconds is 0, timeout is immediate */
 845static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
 846					      u32 microseconds)
 847{
 848	u32 endpoint_id = endpoint->endpoint_id;
 849	struct ipa *ipa = endpoint->ipa;
 850	u32 offset;
 851	u32 val;
 852
 853	offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
 854	val = hol_block_timer_val(ipa, microseconds);
 855	iowrite32(val, ipa->reg_virt + offset);
 856}
 857
 858static void
 859ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
 860{
 861	u32 endpoint_id = endpoint->endpoint_id;
 862	u32 offset;
 863	u32 val;
 864
 865	val = enable ? HOL_BLOCK_EN_FMASK : 0;
 866	offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
 867	iowrite32(val, endpoint->ipa->reg_virt + offset);
 868}
 869
 870void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
 871{
 872	u32 i;
 873
 874	for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
 875		struct ipa_endpoint *endpoint = &ipa->endpoint[i];
 876
 877		if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
 878			continue;
 879
 880		ipa_endpoint_init_hol_block_timer(endpoint, 0);
 881		ipa_endpoint_init_hol_block_enable(endpoint, true);
 882	}
 883}
 884
 885static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
 886{
 887	u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
 888	u32 val = 0;
 889
 890	if (!endpoint->toward_ipa)
 891		return;		/* Register not valid for RX endpoints */
 892
 893	/* DEAGGR_HDR_LEN is 0 */
 894	/* PACKET_OFFSET_VALID is 0 */
 895	/* PACKET_OFFSET_LOCATION is ignored (not valid) */
 896	/* MAX_PACKET_LEN is 0 (not enforced) */
 897
 898	iowrite32(val, endpoint->ipa->reg_virt + offset);
 899}
 900
 901static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
 902{
 903	u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
 904	struct ipa *ipa = endpoint->ipa;
 905	u32 val;
 906
 907	val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group);
 908	iowrite32(val, ipa->reg_virt + offset);
 909}
 910
 911static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
 912{
 913	u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
 914	u32 val = 0;
 915
 916	if (!endpoint->toward_ipa)
 917		return;		/* Register not valid for RX endpoints */
 918
 919	/* Low-order byte configures primary packet processing */
 920	val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK);
 921
 922	/* Second byte configures replicated packet processing */
 923	val |= u32_encode_bits(endpoint->data->tx.seq_rep_type,
 924			       SEQ_REP_TYPE_FMASK);
 925
 926	iowrite32(val, endpoint->ipa->reg_virt + offset);
 927}
 928
 929/**
 930 * ipa_endpoint_skb_tx() - Transmit a socket buffer
 931 * @endpoint:	Endpoint pointer
 932 * @skb:	Socket buffer to send
 933 *
 934 * Returns:	0 if successful, or a negative error code
 935 */
 936int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
 937{
 938	struct gsi_trans *trans;
 939	u32 nr_frags;
 940	int ret;
 941
 942	/* Make sure source endpoint's TLV FIFO has enough entries to
 943	 * hold the linear portion of the skb and all its fragments.
 944	 * If not, see if we can linearize it before giving up.
 945	 */
 946	nr_frags = skb_shinfo(skb)->nr_frags;
 947	if (1 + nr_frags > endpoint->trans_tre_max) {
 948		if (skb_linearize(skb))
 949			return -E2BIG;
 950		nr_frags = 0;
 951	}
 952
 953	trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
 954	if (!trans)
 955		return -EBUSY;
 956
 957	ret = gsi_trans_skb_add(trans, skb);
 958	if (ret)
 959		goto err_trans_free;
 960	trans->data = skb;	/* transaction owns skb now */
 961
 962	gsi_trans_commit(trans, !netdev_xmit_more());
 963
 964	return 0;
 965
 966err_trans_free:
 967	gsi_trans_free(trans);
 968
 969	return -ENOMEM;
 970}
 971
 972static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
 973{
 974	u32 endpoint_id = endpoint->endpoint_id;
 975	struct ipa *ipa = endpoint->ipa;
 976	u32 val = 0;
 977	u32 offset;
 978
 979	offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
 980
 981	if (endpoint->data->status_enable) {
 982		val |= STATUS_EN_FMASK;
 983		if (endpoint->toward_ipa) {
 984			enum ipa_endpoint_name name;
 985			u32 status_endpoint_id;
 986
 987			name = endpoint->data->tx.status_endpoint;
 988			status_endpoint_id = ipa->name_map[name]->endpoint_id;
 989
 990			val |= u32_encode_bits(status_endpoint_id,
 991					       STATUS_ENDP_FMASK);
 992		}
 993		/* STATUS_LOCATION is 0, meaning status element precedes
 994		 * packet (not present for IPA v4.5)
 995		 */
 996		/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
 997	}
 998
 999	iowrite32(val, ipa->reg_virt + offset);
1000}
1001
1002static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
1003{
1004	struct gsi_trans *trans;
1005	bool doorbell = false;
1006	struct page *page;
1007	u32 offset;
1008	u32 len;
1009	int ret;
1010
1011	page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
1012	if (!page)
1013		return -ENOMEM;
1014
1015	trans = ipa_endpoint_trans_alloc(endpoint, 1);
1016	if (!trans)
1017		goto err_free_pages;
1018
1019	/* Offset the buffer to make space for skb headroom */
1020	offset = NET_SKB_PAD;
1021	len = IPA_RX_BUFFER_SIZE - offset;
1022
1023	ret = gsi_trans_page_add(trans, page, len, offset);
1024	if (ret)
1025		goto err_trans_free;
1026	trans->data = page;	/* transaction owns page now */
1027
1028	if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
1029		doorbell = true;
1030		endpoint->replenish_ready = 0;
1031	}
1032
1033	gsi_trans_commit(trans, doorbell);
1034
1035	return 0;
1036
1037err_trans_free:
1038	gsi_trans_free(trans);
1039err_free_pages:
1040	__free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
1041
1042	return -ENOMEM;
1043}
1044
1045/**
1046 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1047 * @endpoint:	Endpoint to be replenished
1048 * @add_one:	Whether this is replacing a just-consumed buffer
1049 *
1050 * The IPA hardware can hold a fixed number of receive buffers for an RX
1051 * endpoint, based on the number of entries in the underlying channel ring
1052 * buffer.  If an endpoint's "backlog" is non-zero, it indicates how many
1053 * more receive buffers can be supplied to the hardware.  Replenishing for
1054 * an endpoint can be disabled, in which case requests to replenish a
1055 * buffer are "saved", and transferred to the backlog once it is re-enabled
1056 * again.
1057 */
1058static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
1059{
1060	struct gsi *gsi;
1061	u32 backlog;
1062
1063	if (!endpoint->replenish_enabled) {
1064		if (add_one)
1065			atomic_inc(&endpoint->replenish_saved);
1066		return;
1067	}
1068
1069	while (atomic_dec_not_zero(&endpoint->replenish_backlog))
1070		if (ipa_endpoint_replenish_one(endpoint))
1071			goto try_again_later;
1072	if (add_one)
1073		atomic_inc(&endpoint->replenish_backlog);
1074
1075	return;
1076
1077try_again_later:
1078	/* The last one didn't succeed, so fix the backlog */
1079	backlog = atomic_inc_return(&endpoint->replenish_backlog);
1080
1081	if (add_one)
1082		atomic_inc(&endpoint->replenish_backlog);
1083
1084	/* Whenever a receive buffer transaction completes we'll try to
1085	 * replenish again.  It's unlikely, but if we fail to supply even
1086	 * one buffer, nothing will trigger another replenish attempt.
1087	 * Receive buffer transactions use one TRE, so schedule work to
1088	 * try replenishing again if our backlog is *all* available TREs.
1089	 */
1090	gsi = &endpoint->ipa->gsi;
1091	if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
1092		schedule_delayed_work(&endpoint->replenish_work,
1093				      msecs_to_jiffies(1));
1094}
1095
1096static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1097{
1098	struct gsi *gsi = &endpoint->ipa->gsi;
1099	u32 max_backlog;
1100	u32 saved;
1101
1102	endpoint->replenish_enabled = true;
1103	while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
1104		atomic_add(saved, &endpoint->replenish_backlog);
1105
1106	/* Start replenishing if hardware currently has no buffers */
1107	max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
1108	if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
1109		ipa_endpoint_replenish(endpoint, false);
1110}
1111
1112static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1113{
1114	u32 backlog;
1115
1116	endpoint->replenish_enabled = false;
1117	while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
1118		atomic_add(backlog, &endpoint->replenish_saved);
1119}
1120
1121static void ipa_endpoint_replenish_work(struct work_struct *work)
1122{
1123	struct delayed_work *dwork = to_delayed_work(work);
1124	struct ipa_endpoint *endpoint;
1125
1126	endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1127
1128	ipa_endpoint_replenish(endpoint, false);
1129}
1130
1131static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1132				  void *data, u32 len, u32 extra)
1133{
1134	struct sk_buff *skb;
1135
1136	skb = __dev_alloc_skb(len, GFP_ATOMIC);
1137	if (skb) {
1138		skb_put(skb, len);
1139		memcpy(skb->data, data, len);
1140		skb->truesize += extra;
1141	}
1142
1143	/* Now receive it, or drop it if there's no netdev */
1144	if (endpoint->netdev)
1145		ipa_modem_skb_rx(endpoint->netdev, skb);
1146	else if (skb)
1147		dev_kfree_skb_any(skb);
1148}
1149
1150static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1151				   struct page *page, u32 len)
1152{
1153	struct sk_buff *skb;
1154
1155	/* Nothing to do if there's no netdev */
1156	if (!endpoint->netdev)
1157		return false;
1158
1159	/* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
1160	skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
1161	if (skb) {
1162		/* Reserve the headroom and account for the data */
1163		skb_reserve(skb, NET_SKB_PAD);
1164		skb_put(skb, len);
1165	}
1166
1167	/* Receive the buffer (or record drop if unable to build it) */
1168	ipa_modem_skb_rx(endpoint->netdev, skb);
1169
1170	return skb != NULL;
1171}
1172
1173/* The format of a packet status element is the same for several status
1174 * types (opcodes).  Other types aren't currently supported.
1175 */
1176static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1177{
1178	switch (opcode) {
1179	case IPA_STATUS_OPCODE_PACKET:
1180	case IPA_STATUS_OPCODE_DROPPED_PACKET:
1181	case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1182	case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1183		return true;
1184	default:
1185		return false;
1186	}
1187}
1188
1189static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1190				     const struct ipa_status *status)
1191{
1192	u32 endpoint_id;
1193
1194	if (!ipa_status_format_packet(status->opcode))
1195		return true;
1196	if (!status->pkt_len)
1197		return true;
1198	endpoint_id = u8_get_bits(status->endp_dst_idx,
1199				  IPA_STATUS_DST_IDX_FMASK);
1200	if (endpoint_id != endpoint->endpoint_id)
1201		return true;
1202
1203	return false;	/* Don't skip this packet, process it */
1204}
1205
1206static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
1207				    const struct ipa_status *status)
1208{
1209	struct ipa_endpoint *command_endpoint;
1210	struct ipa *ipa = endpoint->ipa;
1211	u32 endpoint_id;
1212
1213	if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
1214		return false;	/* No valid tag */
1215
1216	/* The status contains a valid tag.  We know the packet was sent to
1217	 * this endpoint (already verified by ipa_endpoint_status_skip()).
1218	 * If the packet came from the AP->command TX endpoint we know
1219	 * this packet was sent as part of the pipeline clear process.
1220	 */
1221	endpoint_id = u8_get_bits(status->endp_src_idx,
1222				  IPA_STATUS_SRC_IDX_FMASK);
1223	command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1224	if (endpoint_id == command_endpoint->endpoint_id) {
1225		complete(&ipa->completion);
1226	} else {
1227		dev_err(&ipa->pdev->dev,
1228			"unexpected tagged packet from endpoint %u\n",
1229			endpoint_id);
1230	}
1231
1232	return true;
1233}
1234
1235/* Return whether the status indicates the packet should be dropped */
1236static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
1237				     const struct ipa_status *status)
1238{
1239	u32 val;
1240
1241	/* If the status indicates a tagged transfer, we'll drop the packet */
1242	if (ipa_endpoint_status_tag(endpoint, status))
1243		return true;
1244
1245	/* Deaggregation exceptions we drop; all other types we consume */
1246	if (status->exception)
1247		return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1248
1249	/* Drop the packet if it fails to match a routing rule; otherwise no */
1250	val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1251
1252	return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1253}
1254
1255static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1256				      struct page *page, u32 total_len)
1257{
1258	void *data = page_address(page) + NET_SKB_PAD;
1259	u32 unused = IPA_RX_BUFFER_SIZE - total_len;
1260	u32 resid = total_len;
1261
1262	while (resid) {
1263		const struct ipa_status *status = data;
1264		u32 align;
1265		u32 len;
1266
1267		if (resid < sizeof(*status)) {
1268			dev_err(&endpoint->ipa->pdev->dev,
1269				"short message (%u bytes < %zu byte status)\n",
1270				resid, sizeof(*status));
1271			break;
1272		}
1273
1274		/* Skip over status packets that lack packet data */
1275		if (ipa_endpoint_status_skip(endpoint, status)) {
1276			data += sizeof(*status);
1277			resid -= sizeof(*status);
1278			continue;
1279		}
1280
1281		/* Compute the amount of buffer space consumed by the packet,
1282		 * including the status element.  If the hardware is configured
1283		 * to pad packet data to an aligned boundary, account for that.
1284		 * And if checksum offload is enabled a trailer containing
1285		 * computed checksum information will be appended.
1286		 */
1287		align = endpoint->data->rx.pad_align ? : 1;
1288		len = le16_to_cpu(status->pkt_len);
1289		len = sizeof(*status) + ALIGN(len, align);
1290		if (endpoint->data->checksum)
1291			len += sizeof(struct rmnet_map_dl_csum_trailer);
1292
1293		if (!ipa_endpoint_status_drop(endpoint, status)) {
1294			void *data2;
1295			u32 extra;
1296			u32 len2;
1297
1298			/* Client receives only packet data (no status) */
1299			data2 = data + sizeof(*status);
1300			len2 = le16_to_cpu(status->pkt_len);
1301
1302			/* Have the true size reflect the extra unused space in
1303			 * the original receive buffer.  Distribute the "cost"
1304			 * proportionately across all aggregated packets in the
1305			 * buffer.
1306			 */
1307			extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1308			ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1309		}
1310
1311		/* Consume status and the full packet it describes */
1312		data += len;
1313		resid -= len;
1314	}
1315}
1316
1317/* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
1318static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1319				     struct gsi_trans *trans)
1320{
1321}
1322
1323/* Complete transaction initiated in ipa_endpoint_replenish_one() */
1324static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1325				     struct gsi_trans *trans)
1326{
1327	struct page *page;
1328
1329	ipa_endpoint_replenish(endpoint, true);
1330
1331	if (trans->cancelled)
1332		return;
1333
1334	/* Parse or build a socket buffer using the actual received length */
1335	page = trans->data;
1336	if (endpoint->data->status_enable)
1337		ipa_endpoint_status_parse(endpoint, page, trans->len);
1338	else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1339		trans->data = NULL;	/* Pages have been consumed */
1340}
1341
1342void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1343				 struct gsi_trans *trans)
1344{
1345	if (endpoint->toward_ipa)
1346		ipa_endpoint_tx_complete(endpoint, trans);
1347	else
1348		ipa_endpoint_rx_complete(endpoint, trans);
1349}
1350
1351void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1352				struct gsi_trans *trans)
1353{
1354	if (endpoint->toward_ipa) {
1355		struct ipa *ipa = endpoint->ipa;
1356
1357		/* Nothing to do for command transactions */
1358		if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1359			struct sk_buff *skb = trans->data;
1360
1361			if (skb)
1362				dev_kfree_skb_any(skb);
1363		}
1364	} else {
1365		struct page *page = trans->data;
1366
1367		if (page)
1368			__free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
1369	}
1370}
1371
1372void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1373{
1374	u32 val;
1375
1376	/* ROUTE_DIS is 0 */
1377	val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1378	val |= ROUTE_DEF_HDR_TABLE_FMASK;
1379	val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1380	val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1381	val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1382
1383	iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1384}
1385
1386void ipa_endpoint_default_route_clear(struct ipa *ipa)
1387{
1388	ipa_endpoint_default_route_set(ipa, 0);
1389}
1390
1391/**
1392 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1393 * @endpoint:	Endpoint to be reset
1394 *
1395 * If aggregation is active on an RX endpoint when a reset is performed
1396 * on its underlying GSI channel, a special sequence of actions must be
1397 * taken to ensure the IPA pipeline is properly cleared.
1398 *
1399 * Return:	0 if successful, or a negative error code
1400 */
1401static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1402{
1403	struct device *dev = &endpoint->ipa->pdev->dev;
1404	struct ipa *ipa = endpoint->ipa;
1405	struct gsi *gsi = &ipa->gsi;
1406	bool suspended = false;
1407	dma_addr_t addr;
1408	u32 retries;
1409	u32 len = 1;
1410	void *virt;
1411	int ret;
1412
1413	virt = kzalloc(len, GFP_KERNEL);
1414	if (!virt)
1415		return -ENOMEM;
1416
1417	addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1418	if (dma_mapping_error(dev, addr)) {
1419		ret = -ENOMEM;
1420		goto out_kfree;
1421	}
1422
1423	/* Force close aggregation before issuing the reset */
1424	ipa_endpoint_force_close(endpoint);
1425
1426	/* Reset and reconfigure the channel with the doorbell engine
1427	 * disabled.  Then poll until we know aggregation is no longer
1428	 * active.  We'll re-enable the doorbell (if appropriate) when
1429	 * we reset again below.
1430	 */
1431	gsi_channel_reset(gsi, endpoint->channel_id, false);
1432
1433	/* Make sure the channel isn't suspended */
1434	suspended = ipa_endpoint_program_suspend(endpoint, false);
1435
1436	/* Start channel and do a 1 byte read */
1437	ret = gsi_channel_start(gsi, endpoint->channel_id);
1438	if (ret)
1439		goto out_suspend_again;
1440
1441	ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1442	if (ret)
1443		goto err_endpoint_stop;
1444
1445	/* Wait for aggregation to be closed on the channel */
1446	retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1447	do {
1448		if (!ipa_endpoint_aggr_active(endpoint))
1449			break;
1450		usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1451	} while (retries--);
1452
1453	/* Check one last time */
1454	if (ipa_endpoint_aggr_active(endpoint))
1455		dev_err(dev, "endpoint %u still active during reset\n",
1456			endpoint->endpoint_id);
1457
1458	gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1459
1460	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1461	if (ret)
1462		goto out_suspend_again;
1463
1464	/* Finally, reset and reconfigure the channel again (re-enabling
1465	 * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1466	 * complete the channel reset sequence.  Finish by suspending the
1467	 * channel again (if necessary).
1468	 */
1469	gsi_channel_reset(gsi, endpoint->channel_id, true);
1470
1471	usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1472
1473	goto out_suspend_again;
1474
1475err_endpoint_stop:
1476	(void)gsi_channel_stop(gsi, endpoint->channel_id);
1477out_suspend_again:
1478	if (suspended)
1479		(void)ipa_endpoint_program_suspend(endpoint, true);
1480	dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1481out_kfree:
1482	kfree(virt);
1483
1484	return ret;
1485}
1486
1487static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1488{
1489	u32 channel_id = endpoint->channel_id;
1490	struct ipa *ipa = endpoint->ipa;
1491	bool special;
1492	int ret = 0;
1493
1494	/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1495	 * is active, we need to handle things specially to recover.
1496	 * All other cases just need to reset the underlying GSI channel.
1497	 */
1498	special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1499			endpoint->data->aggregation;
1500	if (special && ipa_endpoint_aggr_active(endpoint))
1501		ret = ipa_endpoint_reset_rx_aggr(endpoint);
1502	else
1503		gsi_channel_reset(&ipa->gsi, channel_id, true);
1504
1505	if (ret)
1506		dev_err(&ipa->pdev->dev,
1507			"error %d resetting channel %u for endpoint %u\n",
1508			ret, endpoint->channel_id, endpoint->endpoint_id);
1509}
1510
1511static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1512{
1513	if (endpoint->toward_ipa)
1514		ipa_endpoint_program_delay(endpoint, false);
1515	else
1516		(void)ipa_endpoint_program_suspend(endpoint, false);
1517	ipa_endpoint_init_cfg(endpoint);
1518	ipa_endpoint_init_nat(endpoint);
1519	ipa_endpoint_init_hdr(endpoint);
1520	ipa_endpoint_init_hdr_ext(endpoint);
1521	ipa_endpoint_init_hdr_metadata_mask(endpoint);
1522	ipa_endpoint_init_mode(endpoint);
1523	ipa_endpoint_init_aggr(endpoint);
1524	ipa_endpoint_init_deaggr(endpoint);
1525	ipa_endpoint_init_rsrc_grp(endpoint);
1526	ipa_endpoint_init_seq(endpoint);
1527	ipa_endpoint_status(endpoint);
1528}
1529
1530int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1531{
1532	struct ipa *ipa = endpoint->ipa;
1533	struct gsi *gsi = &ipa->gsi;
1534	int ret;
1535
1536	ret = gsi_channel_start(gsi, endpoint->channel_id);
1537	if (ret) {
1538		dev_err(&ipa->pdev->dev,
1539			"error %d starting %cX channel %u for endpoint %u\n",
1540			ret, endpoint->toward_ipa ? 'T' : 'R',
1541			endpoint->channel_id, endpoint->endpoint_id);
1542		return ret;
1543	}
1544
1545	if (!endpoint->toward_ipa) {
1546		ipa_interrupt_suspend_enable(ipa->interrupt,
1547					     endpoint->endpoint_id);
1548		ipa_endpoint_replenish_enable(endpoint);
1549	}
1550
1551	ipa->enabled |= BIT(endpoint->endpoint_id);
1552
1553	return 0;
1554}
1555
1556void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1557{
1558	u32 mask = BIT(endpoint->endpoint_id);
1559	struct ipa *ipa = endpoint->ipa;
1560	struct gsi *gsi = &ipa->gsi;
1561	int ret;
1562
1563	if (!(ipa->enabled & mask))
1564		return;
1565
1566	ipa->enabled ^= mask;
1567
1568	if (!endpoint->toward_ipa) {
1569		ipa_endpoint_replenish_disable(endpoint);
1570		ipa_interrupt_suspend_disable(ipa->interrupt,
1571					      endpoint->endpoint_id);
1572	}
1573
1574	/* Note that if stop fails, the channel's state is not well-defined */
1575	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1576	if (ret)
1577		dev_err(&ipa->pdev->dev,
1578			"error %d attempting to stop endpoint %u\n", ret,
1579			endpoint->endpoint_id);
1580}
1581
1582void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1583{
1584	struct device *dev = &endpoint->ipa->pdev->dev;
1585	struct gsi *gsi = &endpoint->ipa->gsi;
1586	bool stop_channel;
1587	int ret;
1588
1589	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1590		return;
1591
1592	if (!endpoint->toward_ipa) {
1593		ipa_endpoint_replenish_disable(endpoint);
1594		(void)ipa_endpoint_program_suspend(endpoint, true);
1595	}
1596
1597	/* Starting with IPA v4.0, endpoints are suspended by stopping the
1598	 * underlying GSI channel rather than using endpoint suspend mode.
1599	 */
1600	stop_channel = endpoint->ipa->version >= IPA_VERSION_4_0;
1601	ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
1602	if (ret)
1603		dev_err(dev, "error %d suspending channel %u\n", ret,
1604			endpoint->channel_id);
1605}
1606
1607void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1608{
1609	struct device *dev = &endpoint->ipa->pdev->dev;
1610	struct gsi *gsi = &endpoint->ipa->gsi;
1611	bool start_channel;
1612	int ret;
1613
1614	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1615		return;
1616
1617	if (!endpoint->toward_ipa)
1618		(void)ipa_endpoint_program_suspend(endpoint, false);
1619
1620	/* Starting with IPA v4.0, the underlying GSI channel must be
1621	 * restarted for resume.
1622	 */
1623	start_channel = endpoint->ipa->version >= IPA_VERSION_4_0;
1624	ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
1625	if (ret)
1626		dev_err(dev, "error %d resuming channel %u\n", ret,
1627			endpoint->channel_id);
1628	else if (!endpoint->toward_ipa)
1629		ipa_endpoint_replenish_enable(endpoint);
1630}
1631
1632void ipa_endpoint_suspend(struct ipa *ipa)
1633{
1634	if (!ipa->setup_complete)
1635		return;
1636
1637	if (ipa->modem_netdev)
1638		ipa_modem_suspend(ipa->modem_netdev);
1639
1640	ipa_cmd_pipeline_clear(ipa);
1641
1642	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1643	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1644}
1645
1646void ipa_endpoint_resume(struct ipa *ipa)
1647{
1648	if (!ipa->setup_complete)
1649		return;
1650
1651	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1652	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1653
1654	if (ipa->modem_netdev)
1655		ipa_modem_resume(ipa->modem_netdev);
1656}
1657
1658static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1659{
1660	struct gsi *gsi = &endpoint->ipa->gsi;
1661	u32 channel_id = endpoint->channel_id;
1662
1663	/* Only AP endpoints get set up */
1664	if (endpoint->ee_id != GSI_EE_AP)
1665		return;
1666
1667	endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1668	if (!endpoint->toward_ipa) {
1669		/* RX transactions require a single TRE, so the maximum
1670		 * backlog is the same as the maximum outstanding TREs.
1671		 */
1672		endpoint->replenish_enabled = false;
1673		atomic_set(&endpoint->replenish_saved,
1674			   gsi_channel_tre_max(gsi, endpoint->channel_id));
1675		atomic_set(&endpoint->replenish_backlog, 0);
1676		INIT_DELAYED_WORK(&endpoint->replenish_work,
1677				  ipa_endpoint_replenish_work);
1678	}
1679
1680	ipa_endpoint_program(endpoint);
1681
1682	endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1683}
1684
1685static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1686{
1687	endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1688
1689	if (!endpoint->toward_ipa)
1690		cancel_delayed_work_sync(&endpoint->replenish_work);
1691
1692	ipa_endpoint_reset(endpoint);
1693}
1694
1695void ipa_endpoint_setup(struct ipa *ipa)
1696{
1697	u32 initialized = ipa->initialized;
1698
1699	ipa->set_up = 0;
1700	while (initialized) {
1701		u32 endpoint_id = __ffs(initialized);
1702
1703		initialized ^= BIT(endpoint_id);
1704
1705		ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1706	}
1707}
1708
1709void ipa_endpoint_teardown(struct ipa *ipa)
1710{
1711	u32 set_up = ipa->set_up;
1712
1713	while (set_up) {
1714		u32 endpoint_id = __fls(set_up);
1715
1716		set_up ^= BIT(endpoint_id);
1717
1718		ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1719	}
1720	ipa->set_up = 0;
1721}
1722
1723int ipa_endpoint_config(struct ipa *ipa)
1724{
1725	struct device *dev = &ipa->pdev->dev;
1726	u32 initialized;
1727	u32 rx_base;
1728	u32 rx_mask;
1729	u32 tx_mask;
1730	int ret = 0;
1731	u32 max;
1732	u32 val;
1733
1734	/* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
1735	 * Furthermore, the endpoints were not grouped such that TX
1736	 * endpoint numbers started with 0 and RX endpoints had numbers
1737	 * higher than all TX endpoints, so we can't do the simple
1738	 * direction check used for newer hardware below.
1739	 *
1740	 * For hardware that doesn't support the FLAVOR_0 register,
1741	 * just set the available mask to support any endpoint, and
1742	 * assume the configuration is valid.
1743	 */
1744	if (ipa->version < IPA_VERSION_3_5) {
1745		ipa->available = ~0;
1746		return 0;
1747	}
1748
1749	/* Find out about the endpoints supplied by the hardware, and ensure
1750	 * the highest one doesn't exceed the number we support.
1751	 */
1752	val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1753
1754	/* Our RX is an IPA producer */
1755	rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
1756	max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
1757	if (max > IPA_ENDPOINT_MAX) {
1758		dev_err(dev, "too many endpoints (%u > %u)\n",
1759			max, IPA_ENDPOINT_MAX);
1760		return -EINVAL;
1761	}
1762	rx_mask = GENMASK(max - 1, rx_base);
1763
1764	/* Our TX is an IPA consumer */
1765	max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
1766	tx_mask = GENMASK(max - 1, 0);
1767
1768	ipa->available = rx_mask | tx_mask;
1769
1770	/* Check for initialized endpoints not supported by the hardware */
1771	if (ipa->initialized & ~ipa->available) {
1772		dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1773			ipa->initialized & ~ipa->available);
1774		ret = -EINVAL;		/* Report other errors too */
1775	}
1776
1777	initialized = ipa->initialized;
1778	while (initialized) {
1779		u32 endpoint_id = __ffs(initialized);
1780		struct ipa_endpoint *endpoint;
1781
1782		initialized ^= BIT(endpoint_id);
1783
1784		/* Make sure it's pointing in the right direction */
1785		endpoint = &ipa->endpoint[endpoint_id];
1786		if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
1787			dev_err(dev, "endpoint id %u wrong direction\n",
1788				endpoint_id);
1789			ret = -EINVAL;
1790		}
1791	}
1792
1793	return ret;
1794}
1795
1796void ipa_endpoint_deconfig(struct ipa *ipa)
1797{
1798	ipa->available = 0;	/* Nothing more to do */
1799}
1800
1801static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1802				  const struct ipa_gsi_endpoint_data *data)
1803{
1804	struct ipa_endpoint *endpoint;
1805
1806	endpoint = &ipa->endpoint[data->endpoint_id];
1807
1808	if (data->ee_id == GSI_EE_AP)
1809		ipa->channel_map[data->channel_id] = endpoint;
1810	ipa->name_map[name] = endpoint;
1811
1812	endpoint->ipa = ipa;
1813	endpoint->ee_id = data->ee_id;
1814	endpoint->channel_id = data->channel_id;
1815	endpoint->endpoint_id = data->endpoint_id;
1816	endpoint->toward_ipa = data->toward_ipa;
1817	endpoint->data = &data->endpoint.config;
1818
1819	ipa->initialized |= BIT(endpoint->endpoint_id);
1820}
1821
1822static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1823{
1824	endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1825
1826	memset(endpoint, 0, sizeof(*endpoint));
1827}
1828
1829void ipa_endpoint_exit(struct ipa *ipa)
1830{
1831	u32 initialized = ipa->initialized;
1832
1833	while (initialized) {
1834		u32 endpoint_id = __fls(initialized);
1835
1836		initialized ^= BIT(endpoint_id);
1837
1838		ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1839	}
1840	memset(ipa->name_map, 0, sizeof(ipa->name_map));
1841	memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1842}
1843
1844/* Returns a bitmask of endpoints that support filtering, or 0 on error */
1845u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1846		      const struct ipa_gsi_endpoint_data *data)
1847{
1848	enum ipa_endpoint_name name;
1849	u32 filter_map;
1850
1851	if (!ipa_endpoint_data_valid(ipa, count, data))
1852		return 0;	/* Error */
1853
1854	ipa->initialized = 0;
1855
1856	filter_map = 0;
1857	for (name = 0; name < count; name++, data++) {
1858		if (ipa_gsi_endpoint_data_empty(data))
1859			continue;	/* Skip over empty slots */
1860
1861		ipa_endpoint_init_one(ipa, name, data);
1862
1863		if (data->endpoint.filter_support)
1864			filter_map |= BIT(data->endpoint_id);
1865	}
1866
1867	if (!ipa_filter_map_valid(ipa, filter_map))
1868		goto err_endpoint_exit;
1869
1870	return filter_map;	/* Non-zero bitmask */
1871
1872err_endpoint_exit:
1873	ipa_endpoint_exit(ipa);
1874
1875	return 0;	/* Error */
1876}