Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
Note: File does not exist in v4.17.
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2018-2022 Linaro Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <linux/bits.h>
   9#include <linux/bitfield.h>
  10#include <linux/mutex.h>
  11#include <linux/completion.h>
  12#include <linux/io.h>
  13#include <linux/bug.h>
  14#include <linux/interrupt.h>
  15#include <linux/platform_device.h>
  16#include <linux/netdevice.h>
  17
  18#include "gsi.h"
  19#include "gsi_reg.h"
  20#include "gsi_private.h"
  21#include "gsi_trans.h"
  22#include "ipa_gsi.h"
  23#include "ipa_data.h"
  24#include "ipa_version.h"
  25
  26/**
  27 * DOC: The IPA Generic Software Interface
  28 *
  29 * The generic software interface (GSI) is an integral component of the IPA,
  30 * providing a well-defined communication layer between the AP subsystem
  31 * and the IPA core.  The modem uses the GSI layer as well.
  32 *
  33 *	--------	     ---------
  34 *	|      |	     |	     |
  35 *	|  AP  +<---.	.----+ Modem |
  36 *	|      +--. |	| .->+	     |
  37 *	|      |  | |	| |  |	     |
  38 *	--------  | |	| |  ---------
  39 *		  v |	v |
  40 *		--+-+---+-+--
  41 *		|    GSI    |
  42 *		|-----------|
  43 *		|	    |
  44 *		|    IPA    |
  45 *		|	    |
  46 *		-------------
  47 *
  48 * In the above diagram, the AP and Modem represent "execution environments"
  49 * (EEs), which are independent operating environments that use the IPA for
  50 * data transfer.
  51 *
  52 * Each EE uses a set of unidirectional GSI "channels," which allow transfer
  53 * of data to or from the IPA.  A channel is implemented as a ring buffer,
  54 * with a DRAM-resident array of "transfer elements" (TREs) available to
  55 * describe transfers to or from other EEs through the IPA.  A transfer
  56 * element can also contain an immediate command, requesting the IPA perform
  57 * actions other than data transfer.
  58 *
  59 * Each TRE refers to a block of data--also located in DRAM.  After writing
  60 * one or more TREs to a channel, the writer (either the IPA or an EE) writes
  61 * a doorbell register to inform the receiving side how many elements have
  62 * been written.
  63 *
  64 * Each channel has a GSI "event ring" associated with it.  An event ring
  65 * is implemented very much like a channel ring, but is always directed from
  66 * the IPA to an EE.  The IPA notifies an EE (such as the AP) about channel
  67 * events by adding an entry to the event ring associated with the channel.
  68 * The GSI then writes its doorbell for the event ring, causing the target
  69 * EE to be interrupted.  Each entry in an event ring contains a pointer
  70 * to the channel TRE whose completion the event represents.
  71 *
  72 * Each TRE in a channel ring has a set of flags.  One flag indicates whether
  73 * the completion of the transfer operation generates an entry (and possibly
  74 * an interrupt) in the channel's event ring.  Other flags allow transfer
  75 * elements to be chained together, forming a single logical transaction.
  76 * TRE flags are used to control whether and when interrupts are generated
  77 * to signal completion of channel transfers.
  78 *
  79 * Elements in channel and event rings are completed (or consumed) strictly
  80 * in order.  Completion of one entry implies the completion of all preceding
  81 * entries.  A single completion interrupt can therefore communicate the
  82 * completion of many transfers.
  83 *
  84 * Note that all GSI registers are little-endian, which is the assumed
  85 * endianness of I/O space accesses.  The accessor functions perform byte
  86 * swapping if needed (i.e., for a big endian CPU).
  87 */
  88
  89/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
  90#define GSI_EVT_RING_INT_MODT		(32 * 1) /* 1ms under 32KHz clock */
  91
  92#define GSI_CMD_TIMEOUT			50	/* milliseconds */
  93
  94#define GSI_CHANNEL_STOP_RETRIES	10
  95#define GSI_CHANNEL_MODEM_HALT_RETRIES	10
  96#define GSI_CHANNEL_MODEM_FLOW_RETRIES	5	/* disable flow control only */
  97
  98#define GSI_MHI_EVENT_ID_START		10	/* 1st reserved event id */
  99#define GSI_MHI_EVENT_ID_END		16	/* Last reserved event id */
 100
 101#define GSI_ISR_MAX_ITER		50	/* Detect interrupt storms */
 102
 103/* An entry in an event ring */
 104struct gsi_event {
 105	__le64 xfer_ptr;
 106	__le16 len;
 107	u8 reserved1;
 108	u8 code;
 109	__le16 reserved2;
 110	u8 type;
 111	u8 chid;
 112};
 113
 114/** gsi_channel_scratch_gpi - GPI protocol scratch register
 115 * @max_outstanding_tre:
 116 *	Defines the maximum number of TREs allowed in a single transaction
 117 *	on a channel (in bytes).  This determines the amount of prefetch
 118 *	performed by the hardware.  We configure this to equal the size of
 119 *	the TLV FIFO for the channel.
 120 * @outstanding_threshold:
 121 *	Defines the threshold (in bytes) determining when the sequencer
 122 *	should update the channel doorbell.  We configure this to equal
 123 *	the size of two TREs.
 124 */
 125struct gsi_channel_scratch_gpi {
 126	u64 reserved1;
 127	u16 reserved2;
 128	u16 max_outstanding_tre;
 129	u16 reserved3;
 130	u16 outstanding_threshold;
 131};
 132
 133/** gsi_channel_scratch - channel scratch configuration area
 134 *
 135 * The exact interpretation of this register is protocol-specific.
 136 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
 137 */
 138union gsi_channel_scratch {
 139	struct gsi_channel_scratch_gpi gpi;
 140	struct {
 141		u32 word1;
 142		u32 word2;
 143		u32 word3;
 144		u32 word4;
 145	} data;
 146};
 147
 148/* Check things that can be validated at build time. */
 149static void gsi_validate_build(void)
 150{
 151	/* This is used as a divisor */
 152	BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
 153
 154	/* Code assumes the size of channel and event ring element are
 155	 * the same (and fixed).  Make sure the size of an event ring
 156	 * element is what's expected.
 157	 */
 158	BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
 159
 160	/* Hardware requires a 2^n ring size.  We ensure the number of
 161	 * elements in an event ring is a power of 2 elsewhere; this
 162	 * ensure the elements themselves meet the requirement.
 163	 */
 164	BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
 165
 166	/* The channel element size must fit in this field */
 167	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
 168
 169	/* The event ring element size must fit in this field */
 170	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
 171}
 172
 173/* Return the channel id associated with a given channel */
 174static u32 gsi_channel_id(struct gsi_channel *channel)
 175{
 176	return channel - &channel->gsi->channel[0];
 177}
 178
 179/* An initialized channel has a non-null GSI pointer */
 180static bool gsi_channel_initialized(struct gsi_channel *channel)
 181{
 182	return !!channel->gsi;
 183}
 184
 185/* Update the GSI IRQ type register with the cached value */
 186static void gsi_irq_type_update(struct gsi *gsi, u32 val)
 187{
 188	gsi->type_enabled_bitmap = val;
 189	iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
 190}
 191
 192static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
 193{
 194	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
 195}
 196
 197static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
 198{
 199	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
 200}
 201
 202/* Event ring commands are performed one at a time.  Their completion
 203 * is signaled by the event ring control GSI interrupt type, which is
 204 * only enabled when we issue an event ring command.  Only the event
 205 * ring being operated on has this interrupt enabled.
 206 */
 207static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
 208{
 209	u32 val = BIT(evt_ring_id);
 210
 211	/* There's a small chance that a previous command completed
 212	 * after the interrupt was disabled, so make sure we have no
 213	 * pending interrupts before we enable them.
 214	 */
 215	iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
 216
 217	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
 218	gsi_irq_type_enable(gsi, GSI_EV_CTRL);
 219}
 220
 221/* Disable event ring control interrupts */
 222static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
 223{
 224	gsi_irq_type_disable(gsi, GSI_EV_CTRL);
 225	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
 226}
 227
 228/* Channel commands are performed one at a time.  Their completion is
 229 * signaled by the channel control GSI interrupt type, which is only
 230 * enabled when we issue a channel command.  Only the channel being
 231 * operated on has this interrupt enabled.
 232 */
 233static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
 234{
 235	u32 val = BIT(channel_id);
 236
 237	/* There's a small chance that a previous command completed
 238	 * after the interrupt was disabled, so make sure we have no
 239	 * pending interrupts before we enable them.
 240	 */
 241	iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
 242
 243	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
 244	gsi_irq_type_enable(gsi, GSI_CH_CTRL);
 245}
 246
 247/* Disable channel control interrupts */
 248static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
 249{
 250	gsi_irq_type_disable(gsi, GSI_CH_CTRL);
 251	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
 252}
 253
 254static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
 255{
 256	bool enable_ieob = !gsi->ieob_enabled_bitmap;
 257	u32 val;
 258
 259	gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
 260	val = gsi->ieob_enabled_bitmap;
 261	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
 262
 263	/* Enable the interrupt type if this is the first channel enabled */
 264	if (enable_ieob)
 265		gsi_irq_type_enable(gsi, GSI_IEOB);
 266}
 267
 268static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
 269{
 270	u32 val;
 271
 272	gsi->ieob_enabled_bitmap &= ~event_mask;
 273
 274	/* Disable the interrupt type if this was the last enabled channel */
 275	if (!gsi->ieob_enabled_bitmap)
 276		gsi_irq_type_disable(gsi, GSI_IEOB);
 277
 278	val = gsi->ieob_enabled_bitmap;
 279	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
 280}
 281
 282static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
 283{
 284	gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
 285}
 286
 287/* Enable all GSI_interrupt types */
 288static void gsi_irq_enable(struct gsi *gsi)
 289{
 290	u32 val;
 291
 292	/* Global interrupts include hardware error reports.  Enable
 293	 * that so we can at least report the error should it occur.
 294	 */
 295	iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
 296	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
 297
 298	/* General GSI interrupts are reported to all EEs; if they occur
 299	 * they are unrecoverable (without reset).  A breakpoint interrupt
 300	 * also exists, but we don't support that.  We want to be notified
 301	 * of errors so we can report them, even if they can't be handled.
 302	 */
 303	val = BIT(BUS_ERROR);
 304	val |= BIT(CMD_FIFO_OVRFLOW);
 305	val |= BIT(MCS_STACK_OVRFLOW);
 306	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
 307	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
 308}
 309
 310/* Disable all GSI interrupt types */
 311static void gsi_irq_disable(struct gsi *gsi)
 312{
 313	gsi_irq_type_update(gsi, 0);
 314
 315	/* Clear the type-specific interrupt masks set by gsi_irq_enable() */
 316	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
 317	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
 318}
 319
 320/* Return the virtual address associated with a ring index */
 321void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
 322{
 323	/* Note: index *must* be used modulo the ring count here */
 324	return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
 325}
 326
 327/* Return the 32-bit DMA address associated with a ring index */
 328static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
 329{
 330	return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE;
 331}
 332
 333/* Return the ring index of a 32-bit ring offset */
 334static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
 335{
 336	return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
 337}
 338
 339/* Issue a GSI command by writing a value to a register, then wait for
 340 * completion to be signaled.  Returns true if the command completes
 341 * or false if it times out.
 342 */
 343static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
 344{
 345	unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
 346	struct completion *completion = &gsi->completion;
 347
 348	reinit_completion(completion);
 349
 350	iowrite32(val, gsi->virt + reg);
 351
 352	return !!wait_for_completion_timeout(completion, timeout);
 353}
 354
 355/* Return the hardware's notion of the current state of an event ring */
 356static enum gsi_evt_ring_state
 357gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
 358{
 359	u32 val;
 360
 361	val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
 362
 363	return u32_get_bits(val, EV_CHSTATE_FMASK);
 364}
 365
 366/* Issue an event ring command and wait for it to complete */
 367static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
 368				 enum gsi_evt_cmd_opcode opcode)
 369{
 370	struct device *dev = gsi->dev;
 371	bool timeout;
 372	u32 val;
 373
 374	/* Enable the completion interrupt for the command */
 375	gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
 376
 377	val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
 378	val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
 379
 380	timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
 381
 382	gsi_irq_ev_ctrl_disable(gsi);
 383
 384	if (!timeout)
 385		return;
 386
 387	dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
 388		opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
 389}
 390
 391/* Allocate an event ring in NOT_ALLOCATED state */
 392static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
 393{
 394	enum gsi_evt_ring_state state;
 395
 396	/* Get initial event ring state */
 397	state = gsi_evt_ring_state(gsi, evt_ring_id);
 398	if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
 399		dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
 400			evt_ring_id, state);
 401		return -EINVAL;
 402	}
 403
 404	gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
 405
 406	/* If successful the event ring state will have changed */
 407	state = gsi_evt_ring_state(gsi, evt_ring_id);
 408	if (state == GSI_EVT_RING_STATE_ALLOCATED)
 409		return 0;
 410
 411	dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
 412		evt_ring_id, state);
 413
 414	return -EIO;
 415}
 416
 417/* Reset a GSI event ring in ALLOCATED or ERROR state. */
 418static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
 419{
 420	enum gsi_evt_ring_state state;
 421
 422	state = gsi_evt_ring_state(gsi, evt_ring_id);
 423	if (state != GSI_EVT_RING_STATE_ALLOCATED &&
 424	    state != GSI_EVT_RING_STATE_ERROR) {
 425		dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
 426			evt_ring_id, state);
 427		return;
 428	}
 429
 430	gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
 431
 432	/* If successful the event ring state will have changed */
 433	state = gsi_evt_ring_state(gsi, evt_ring_id);
 434	if (state == GSI_EVT_RING_STATE_ALLOCATED)
 435		return;
 436
 437	dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
 438		evt_ring_id, state);
 439}
 440
 441/* Issue a hardware de-allocation request for an allocated event ring */
 442static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
 443{
 444	enum gsi_evt_ring_state state;
 445
 446	state = gsi_evt_ring_state(gsi, evt_ring_id);
 447	if (state != GSI_EVT_RING_STATE_ALLOCATED) {
 448		dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
 449			evt_ring_id, state);
 450		return;
 451	}
 452
 453	gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
 454
 455	/* If successful the event ring state will have changed */
 456	state = gsi_evt_ring_state(gsi, evt_ring_id);
 457	if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
 458		return;
 459
 460	dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
 461		evt_ring_id, state);
 462}
 463
 464/* Fetch the current state of a channel from hardware */
 465static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
 466{
 467	u32 channel_id = gsi_channel_id(channel);
 468	void __iomem *virt = channel->gsi->virt;
 469	u32 val;
 470
 471	val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
 472
 473	return u32_get_bits(val, CHSTATE_FMASK);
 474}
 475
 476/* Issue a channel command and wait for it to complete */
 477static void
 478gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
 479{
 480	u32 channel_id = gsi_channel_id(channel);
 481	struct gsi *gsi = channel->gsi;
 482	struct device *dev = gsi->dev;
 483	bool timeout;
 484	u32 val;
 485
 486	/* Enable the completion interrupt for the command */
 487	gsi_irq_ch_ctrl_enable(gsi, channel_id);
 488
 489	val = u32_encode_bits(channel_id, CH_CHID_FMASK);
 490	val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
 491	timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
 492
 493	gsi_irq_ch_ctrl_disable(gsi);
 494
 495	if (!timeout)
 496		return;
 497
 498	dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
 499		opcode, channel_id, gsi_channel_state(channel));
 500}
 501
 502/* Allocate GSI channel in NOT_ALLOCATED state */
 503static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
 504{
 505	struct gsi_channel *channel = &gsi->channel[channel_id];
 506	struct device *dev = gsi->dev;
 507	enum gsi_channel_state state;
 508
 509	/* Get initial channel state */
 510	state = gsi_channel_state(channel);
 511	if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
 512		dev_err(dev, "channel %u bad state %u before alloc\n",
 513			channel_id, state);
 514		return -EINVAL;
 515	}
 516
 517	gsi_channel_command(channel, GSI_CH_ALLOCATE);
 518
 519	/* If successful the channel state will have changed */
 520	state = gsi_channel_state(channel);
 521	if (state == GSI_CHANNEL_STATE_ALLOCATED)
 522		return 0;
 523
 524	dev_err(dev, "channel %u bad state %u after alloc\n",
 525		channel_id, state);
 526
 527	return -EIO;
 528}
 529
 530/* Start an ALLOCATED channel */
 531static int gsi_channel_start_command(struct gsi_channel *channel)
 532{
 533	struct device *dev = channel->gsi->dev;
 534	enum gsi_channel_state state;
 535
 536	state = gsi_channel_state(channel);
 537	if (state != GSI_CHANNEL_STATE_ALLOCATED &&
 538	    state != GSI_CHANNEL_STATE_STOPPED) {
 539		dev_err(dev, "channel %u bad state %u before start\n",
 540			gsi_channel_id(channel), state);
 541		return -EINVAL;
 542	}
 543
 544	gsi_channel_command(channel, GSI_CH_START);
 545
 546	/* If successful the channel state will have changed */
 547	state = gsi_channel_state(channel);
 548	if (state == GSI_CHANNEL_STATE_STARTED)
 549		return 0;
 550
 551	dev_err(dev, "channel %u bad state %u after start\n",
 552		gsi_channel_id(channel), state);
 553
 554	return -EIO;
 555}
 556
 557/* Stop a GSI channel in STARTED state */
 558static int gsi_channel_stop_command(struct gsi_channel *channel)
 559{
 560	struct device *dev = channel->gsi->dev;
 561	enum gsi_channel_state state;
 562
 563	state = gsi_channel_state(channel);
 564
 565	/* Channel could have entered STOPPED state since last call
 566	 * if it timed out.  If so, we're done.
 567	 */
 568	if (state == GSI_CHANNEL_STATE_STOPPED)
 569		return 0;
 570
 571	if (state != GSI_CHANNEL_STATE_STARTED &&
 572	    state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
 573		dev_err(dev, "channel %u bad state %u before stop\n",
 574			gsi_channel_id(channel), state);
 575		return -EINVAL;
 576	}
 577
 578	gsi_channel_command(channel, GSI_CH_STOP);
 579
 580	/* If successful the channel state will have changed */
 581	state = gsi_channel_state(channel);
 582	if (state == GSI_CHANNEL_STATE_STOPPED)
 583		return 0;
 584
 585	/* We may have to try again if stop is in progress */
 586	if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
 587		return -EAGAIN;
 588
 589	dev_err(dev, "channel %u bad state %u after stop\n",
 590		gsi_channel_id(channel), state);
 591
 592	return -EIO;
 593}
 594
 595/* Reset a GSI channel in ALLOCATED or ERROR state. */
 596static void gsi_channel_reset_command(struct gsi_channel *channel)
 597{
 598	struct device *dev = channel->gsi->dev;
 599	enum gsi_channel_state state;
 600
 601	/* A short delay is required before a RESET command */
 602	usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
 603
 604	state = gsi_channel_state(channel);
 605	if (state != GSI_CHANNEL_STATE_STOPPED &&
 606	    state != GSI_CHANNEL_STATE_ERROR) {
 607		/* No need to reset a channel already in ALLOCATED state */
 608		if (state != GSI_CHANNEL_STATE_ALLOCATED)
 609			dev_err(dev, "channel %u bad state %u before reset\n",
 610				gsi_channel_id(channel), state);
 611		return;
 612	}
 613
 614	gsi_channel_command(channel, GSI_CH_RESET);
 615
 616	/* If successful the channel state will have changed */
 617	state = gsi_channel_state(channel);
 618	if (state != GSI_CHANNEL_STATE_ALLOCATED)
 619		dev_err(dev, "channel %u bad state %u after reset\n",
 620			gsi_channel_id(channel), state);
 621}
 622
 623/* Deallocate an ALLOCATED GSI channel */
 624static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
 625{
 626	struct gsi_channel *channel = &gsi->channel[channel_id];
 627	struct device *dev = gsi->dev;
 628	enum gsi_channel_state state;
 629
 630	state = gsi_channel_state(channel);
 631	if (state != GSI_CHANNEL_STATE_ALLOCATED) {
 632		dev_err(dev, "channel %u bad state %u before dealloc\n",
 633			channel_id, state);
 634		return;
 635	}
 636
 637	gsi_channel_command(channel, GSI_CH_DE_ALLOC);
 638
 639	/* If successful the channel state will have changed */
 640	state = gsi_channel_state(channel);
 641
 642	if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
 643		dev_err(dev, "channel %u bad state %u after dealloc\n",
 644			channel_id, state);
 645}
 646
 647/* Ring an event ring doorbell, reporting the last entry processed by the AP.
 648 * The index argument (modulo the ring count) is the first unfilled entry, so
 649 * we supply one less than that with the doorbell.  Update the event ring
 650 * index field with the value provided.
 651 */
 652static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
 653{
 654	struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
 655	u32 val;
 656
 657	ring->index = index;	/* Next unused entry */
 658
 659	/* Note: index *must* be used modulo the ring count here */
 660	val = gsi_ring_addr(ring, (index - 1) % ring->count);
 661	iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
 662}
 663
 664/* Program an event ring for use */
 665static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
 666{
 667	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
 668	struct gsi_ring *ring = &evt_ring->ring;
 669	size_t size;
 670	u32 val;
 671
 672	/* We program all event rings as GPI type/protocol */
 673	val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
 674	val |= EV_INTYPE_FMASK;
 675	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
 676	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
 677
 678	size = ring->count * GSI_RING_ELEMENT_SIZE;
 679	val = ev_r_length_encoded(gsi->version, size);
 680	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
 681
 682	/* The context 2 and 3 registers store the low-order and
 683	 * high-order 32 bits of the address of the event ring,
 684	 * respectively.
 685	 */
 686	val = lower_32_bits(ring->addr);
 687	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
 688	val = upper_32_bits(ring->addr);
 689	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
 690
 691	/* Enable interrupt moderation by setting the moderation delay */
 692	val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
 693	val |= u32_encode_bits(1, MODC_FMASK);	/* comes from channel */
 694	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
 695
 696	/* No MSI write data, and MSI address high and low address is 0 */
 697	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
 698	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
 699	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
 700
 701	/* We don't need to get event read pointer updates */
 702	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
 703	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
 704
 705	/* Finally, tell the hardware our "last processed" event (arbitrary) */
 706	gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
 707}
 708
 709/* Find the transaction whose completion indicates a channel is quiesced */
 710static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
 711{
 712	struct gsi_trans_info *trans_info = &channel->trans_info;
 713	u32 pending_id = trans_info->pending_id;
 714	struct gsi_trans *trans;
 715	u16 trans_id;
 716
 717	if (channel->toward_ipa && pending_id != trans_info->free_id) {
 718		/* There is a small chance a TX transaction got allocated
 719		 * just before we disabled transmits, so check for that.
 720		 * The last allocated, committed, or pending transaction
 721		 * precedes the first free transaction.
 722		 */
 723		trans_id = trans_info->free_id - 1;
 724	} else if (trans_info->polled_id != pending_id) {
 725		/* Otherwise (TX or RX) we want to wait for anything that
 726		 * has completed, or has been polled but not released yet.
 727		 *
 728		 * The last completed or polled transaction precedes the
 729		 * first pending transaction.
 730		 */
 731		trans_id = pending_id - 1;
 732	} else {
 733		return NULL;
 734	}
 735
 736	/* Caller will wait for this, so take a reference */
 737	trans = &trans_info->trans[trans_id % channel->tre_count];
 738	refcount_inc(&trans->refcount);
 739
 740	return trans;
 741}
 742
 743/* Wait for transaction activity on a channel to complete */
 744static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
 745{
 746	struct gsi_trans *trans;
 747
 748	/* Get the last transaction, and wait for it to complete */
 749	trans = gsi_channel_trans_last(channel);
 750	if (trans) {
 751		wait_for_completion(&trans->completion);
 752		gsi_trans_free(trans);
 753	}
 754}
 755
 756/* Program a channel for use; there is no gsi_channel_deprogram() */
 757static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
 758{
 759	size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
 760	u32 channel_id = gsi_channel_id(channel);
 761	union gsi_channel_scratch scr = { };
 762	struct gsi_channel_scratch_gpi *gpi;
 763	struct gsi *gsi = channel->gsi;
 764	u32 wrr_weight = 0;
 765	u32 val;
 766
 767	/* We program all channels as GPI type/protocol */
 768	val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
 769	if (channel->toward_ipa)
 770		val |= CHTYPE_DIR_FMASK;
 771	val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
 772	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
 773	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
 774
 775	val = r_length_encoded(gsi->version, size);
 776	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
 777
 778	/* The context 2 and 3 registers store the low-order and
 779	 * high-order 32 bits of the address of the channel ring,
 780	 * respectively.
 781	 */
 782	val = lower_32_bits(channel->tre_ring.addr);
 783	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
 784	val = upper_32_bits(channel->tre_ring.addr);
 785	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
 786
 787	/* Command channel gets low weighted round-robin priority */
 788	if (channel->command)
 789		wrr_weight = field_max(WRR_WEIGHT_FMASK);
 790	val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
 791
 792	/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
 793
 794	/* No need to use the doorbell engine starting at IPA v4.0 */
 795	if (gsi->version < IPA_VERSION_4_0 && doorbell)
 796		val |= USE_DB_ENG_FMASK;
 797
 798	/* v4.0 introduces an escape buffer for prefetch.  We use it
 799	 * on all but the AP command channel.
 800	 */
 801	if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
 802		/* If not otherwise set, prefetch buffers are used */
 803		if (gsi->version < IPA_VERSION_4_5)
 804			val |= USE_ESCAPE_BUF_ONLY_FMASK;
 805		else
 806			val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
 807					       PREFETCH_MODE_FMASK);
 808	}
 809	/* All channels set DB_IN_BYTES */
 810	if (gsi->version >= IPA_VERSION_4_9)
 811		val |= DB_IN_BYTES;
 812
 813	iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
 814
 815	/* Now update the scratch registers for GPI protocol */
 816	gpi = &scr.gpi;
 817	gpi->max_outstanding_tre = channel->trans_tre_max *
 818					GSI_RING_ELEMENT_SIZE;
 819	gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
 820
 821	val = scr.data.word1;
 822	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
 823
 824	val = scr.data.word2;
 825	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
 826
 827	val = scr.data.word3;
 828	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
 829
 830	/* We must preserve the upper 16 bits of the last scratch register.
 831	 * The next sequence assumes those bits remain unchanged between the
 832	 * read and the write.
 833	 */
 834	val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
 835	val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
 836	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
 837
 838	/* All done! */
 839}
 840
 841static int __gsi_channel_start(struct gsi_channel *channel, bool resume)
 842{
 843	struct gsi *gsi = channel->gsi;
 844	int ret;
 845
 846	/* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
 847	if (resume && gsi->version < IPA_VERSION_4_0)
 848		return 0;
 849
 850	mutex_lock(&gsi->mutex);
 851
 852	ret = gsi_channel_start_command(channel);
 853
 854	mutex_unlock(&gsi->mutex);
 855
 856	return ret;
 857}
 858
 859/* Start an allocated GSI channel */
 860int gsi_channel_start(struct gsi *gsi, u32 channel_id)
 861{
 862	struct gsi_channel *channel = &gsi->channel[channel_id];
 863	int ret;
 864
 865	/* Enable NAPI and the completion interrupt */
 866	napi_enable(&channel->napi);
 867	gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
 868
 869	ret = __gsi_channel_start(channel, false);
 870	if (ret) {
 871		gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
 872		napi_disable(&channel->napi);
 873	}
 874
 875	return ret;
 876}
 877
 878static int gsi_channel_stop_retry(struct gsi_channel *channel)
 879{
 880	u32 retries = GSI_CHANNEL_STOP_RETRIES;
 881	int ret;
 882
 883	do {
 884		ret = gsi_channel_stop_command(channel);
 885		if (ret != -EAGAIN)
 886			break;
 887		usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
 888	} while (retries--);
 889
 890	return ret;
 891}
 892
 893static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend)
 894{
 895	struct gsi *gsi = channel->gsi;
 896	int ret;
 897
 898	/* Wait for any underway transactions to complete before stopping. */
 899	gsi_channel_trans_quiesce(channel);
 900
 901	/* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
 902	if (suspend && gsi->version < IPA_VERSION_4_0)
 903		return 0;
 904
 905	mutex_lock(&gsi->mutex);
 906
 907	ret = gsi_channel_stop_retry(channel);
 908
 909	mutex_unlock(&gsi->mutex);
 910
 911	return ret;
 912}
 913
 914/* Stop a started channel */
 915int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
 916{
 917	struct gsi_channel *channel = &gsi->channel[channel_id];
 918	int ret;
 919
 920	ret = __gsi_channel_stop(channel, false);
 921	if (ret)
 922		return ret;
 923
 924	/* Disable the completion interrupt and NAPI if successful */
 925	gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
 926	napi_disable(&channel->napi);
 927
 928	return 0;
 929}
 930
 931/* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
 932void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
 933{
 934	struct gsi_channel *channel = &gsi->channel[channel_id];
 935
 936	mutex_lock(&gsi->mutex);
 937
 938	gsi_channel_reset_command(channel);
 939	/* Due to a hardware quirk we may need to reset RX channels twice. */
 940	if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
 941		gsi_channel_reset_command(channel);
 942
 943	/* Hardware assumes this is 0 following reset */
 944	channel->tre_ring.index = 0;
 945	gsi_channel_program(channel, doorbell);
 946	gsi_channel_trans_cancel_pending(channel);
 947
 948	mutex_unlock(&gsi->mutex);
 949}
 950
 951/* Stop a started channel for suspend */
 952int gsi_channel_suspend(struct gsi *gsi, u32 channel_id)
 953{
 954	struct gsi_channel *channel = &gsi->channel[channel_id];
 955	int ret;
 956
 957	ret = __gsi_channel_stop(channel, true);
 958	if (ret)
 959		return ret;
 960
 961	/* Ensure NAPI polling has finished. */
 962	napi_synchronize(&channel->napi);
 963
 964	return 0;
 965}
 966
 967/* Resume a suspended channel (starting if stopped) */
 968int gsi_channel_resume(struct gsi *gsi, u32 channel_id)
 969{
 970	struct gsi_channel *channel = &gsi->channel[channel_id];
 971
 972	return __gsi_channel_start(channel, true);
 973}
 974
 975/* Prevent all GSI interrupts while suspended */
 976void gsi_suspend(struct gsi *gsi)
 977{
 978	disable_irq(gsi->irq);
 979}
 980
 981/* Allow all GSI interrupts again when resuming */
 982void gsi_resume(struct gsi *gsi)
 983{
 984	enable_irq(gsi->irq);
 985}
 986
 987void gsi_trans_tx_committed(struct gsi_trans *trans)
 988{
 989	struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
 990
 991	channel->trans_count++;
 992	channel->byte_count += trans->len;
 993
 994	trans->trans_count = channel->trans_count;
 995	trans->byte_count = channel->byte_count;
 996}
 997
 998void gsi_trans_tx_queued(struct gsi_trans *trans)
 999{
1000	u32 channel_id = trans->channel_id;
1001	struct gsi *gsi = trans->gsi;
1002	struct gsi_channel *channel;
1003	u32 trans_count;
1004	u32 byte_count;
1005
1006	channel = &gsi->channel[channel_id];
1007
1008	byte_count = channel->byte_count - channel->queued_byte_count;
1009	trans_count = channel->trans_count - channel->queued_trans_count;
1010	channel->queued_byte_count = channel->byte_count;
1011	channel->queued_trans_count = channel->trans_count;
1012
1013	ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count);
1014}
1015
1016/**
1017 * gsi_trans_tx_completed() - Report completed TX transactions
1018 * @trans:	TX channel transaction that has completed
1019 *
1020 * Report that a transaction on a TX channel has completed.  At the time a
1021 * transaction is committed, we record *in the transaction* its channel's
1022 * committed transaction and byte counts.  Transactions are completed in
1023 * order, and the difference between the channel's byte/transaction count
1024 * when the transaction was committed and when it completes tells us
1025 * exactly how much data has been transferred while the transaction was
1026 * pending.
1027 *
1028 * We report this information to the network stack, which uses it to manage
1029 * the rate at which data is sent to hardware.
1030 */
1031static void gsi_trans_tx_completed(struct gsi_trans *trans)
1032{
1033	u32 channel_id = trans->channel_id;
1034	struct gsi *gsi = trans->gsi;
1035	struct gsi_channel *channel;
1036	u32 trans_count;
1037	u32 byte_count;
1038
1039	channel = &gsi->channel[channel_id];
1040	trans_count = trans->trans_count - channel->compl_trans_count;
1041	byte_count = trans->byte_count - channel->compl_byte_count;
1042
1043	channel->compl_trans_count += trans_count;
1044	channel->compl_byte_count += byte_count;
1045
1046	ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count);
1047}
1048
1049/* Channel control interrupt handler */
1050static void gsi_isr_chan_ctrl(struct gsi *gsi)
1051{
1052	u32 channel_mask;
1053
1054	channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
1055	iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
1056
1057	while (channel_mask) {
1058		u32 channel_id = __ffs(channel_mask);
1059
1060		channel_mask ^= BIT(channel_id);
1061
1062		complete(&gsi->completion);
1063	}
1064}
1065
1066/* Event ring control interrupt handler */
1067static void gsi_isr_evt_ctrl(struct gsi *gsi)
1068{
1069	u32 event_mask;
1070
1071	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
1072	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
1073
1074	while (event_mask) {
1075		u32 evt_ring_id = __ffs(event_mask);
1076
1077		event_mask ^= BIT(evt_ring_id);
1078
1079		complete(&gsi->completion);
1080	}
1081}
1082
1083/* Global channel error interrupt handler */
1084static void
1085gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
1086{
1087	if (code == GSI_OUT_OF_RESOURCES) {
1088		dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
1089		complete(&gsi->completion);
1090		return;
1091	}
1092
1093	/* Report, but otherwise ignore all other error codes */
1094	dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1095		channel_id, err_ee, code);
1096}
1097
1098/* Global event error interrupt handler */
1099static void
1100gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1101{
1102	if (code == GSI_OUT_OF_RESOURCES) {
1103		struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1104		u32 channel_id = gsi_channel_id(evt_ring->channel);
1105
1106		complete(&gsi->completion);
1107		dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1108			channel_id);
1109		return;
1110	}
1111
1112	/* Report, but otherwise ignore all other error codes */
1113	dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1114		evt_ring_id, err_ee, code);
1115}
1116
1117/* Global error interrupt handler */
1118static void gsi_isr_glob_err(struct gsi *gsi)
1119{
1120	enum gsi_err_type type;
1121	enum gsi_err_code code;
1122	u32 which;
1123	u32 val;
1124	u32 ee;
1125
1126	/* Get the logged error, then reinitialize the log */
1127	val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1128	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1129	iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1130
1131	ee = u32_get_bits(val, ERR_EE_FMASK);
1132	type = u32_get_bits(val, ERR_TYPE_FMASK);
1133	which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1134	code = u32_get_bits(val, ERR_CODE_FMASK);
1135
1136	if (type == GSI_ERR_TYPE_CHAN)
1137		gsi_isr_glob_chan_err(gsi, ee, which, code);
1138	else if (type == GSI_ERR_TYPE_EVT)
1139		gsi_isr_glob_evt_err(gsi, ee, which, code);
1140	else	/* type GSI_ERR_TYPE_GLOB should be fatal */
1141		dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1142}
1143
1144/* Generic EE interrupt handler */
1145static void gsi_isr_gp_int1(struct gsi *gsi)
1146{
1147	u32 result;
1148	u32 val;
1149
1150	/* This interrupt is used to handle completions of GENERIC GSI
1151	 * commands.  We use these to allocate and halt channels on the
1152	 * modem's behalf due to a hardware quirk on IPA v4.2.  The modem
1153	 * "owns" channels even when the AP allocates them, and have no
1154	 * way of knowing whether a modem channel's state has been changed.
1155	 *
1156	 * We also use GENERIC commands to enable/disable channel flow
1157	 * control for IPA v4.2+.
1158	 *
1159	 * It is recommended that we halt the modem channels we allocated
1160	 * when shutting down, but it's possible the channel isn't running
1161	 * at the time we issue the HALT command.  We'll get an error in
1162	 * that case, but it's harmless (the channel is already halted).
1163	 * Similarly, we could get an error back when updating flow control
1164	 * on a channel because it's not in the proper state.
1165	 *
1166	 * In either case, we silently ignore a INCORRECT_CHANNEL_STATE
1167	 * error if we receive it.
1168	 */
1169	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1170	result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1171
1172	switch (result) {
1173	case GENERIC_EE_SUCCESS:
1174	case GENERIC_EE_INCORRECT_CHANNEL_STATE:
1175		gsi->result = 0;
1176		break;
1177
1178	case GENERIC_EE_RETRY:
1179		gsi->result = -EAGAIN;
1180		break;
1181
1182	default:
1183		dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1184		gsi->result = -EIO;
1185		break;
1186	}
1187
1188	complete(&gsi->completion);
1189}
1190
1191/* Inter-EE interrupt handler */
1192static void gsi_isr_glob_ee(struct gsi *gsi)
1193{
1194	u32 val;
1195
1196	val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1197
1198	if (val & BIT(ERROR_INT))
1199		gsi_isr_glob_err(gsi);
1200
1201	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1202
1203	val &= ~BIT(ERROR_INT);
1204
1205	if (val & BIT(GP_INT1)) {
1206		val ^= BIT(GP_INT1);
1207		gsi_isr_gp_int1(gsi);
1208	}
1209
1210	if (val)
1211		dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1212}
1213
1214/* I/O completion interrupt event */
1215static void gsi_isr_ieob(struct gsi *gsi)
1216{
1217	u32 event_mask;
1218
1219	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1220	gsi_irq_ieob_disable(gsi, event_mask);
1221	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1222
1223	while (event_mask) {
1224		u32 evt_ring_id = __ffs(event_mask);
1225
1226		event_mask ^= BIT(evt_ring_id);
1227
1228		napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1229	}
1230}
1231
1232/* General event interrupts represent serious problems, so report them */
1233static void gsi_isr_general(struct gsi *gsi)
1234{
1235	struct device *dev = gsi->dev;
1236	u32 val;
1237
1238	val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1239	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1240
1241	dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1242}
1243
1244/**
1245 * gsi_isr() - Top level GSI interrupt service routine
1246 * @irq:	Interrupt number (ignored)
1247 * @dev_id:	GSI pointer supplied to request_irq()
1248 *
1249 * This is the main handler function registered for the GSI IRQ. Each type
1250 * of interrupt has a separate handler function that is called from here.
1251 */
1252static irqreturn_t gsi_isr(int irq, void *dev_id)
1253{
1254	struct gsi *gsi = dev_id;
1255	u32 intr_mask;
1256	u32 cnt = 0;
1257
1258	/* enum gsi_irq_type_id defines GSI interrupt types */
1259	while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1260		/* intr_mask contains bitmask of pending GSI interrupts */
1261		do {
1262			u32 gsi_intr = BIT(__ffs(intr_mask));
1263
1264			intr_mask ^= gsi_intr;
1265
1266			switch (gsi_intr) {
1267			case BIT(GSI_CH_CTRL):
1268				gsi_isr_chan_ctrl(gsi);
1269				break;
1270			case BIT(GSI_EV_CTRL):
1271				gsi_isr_evt_ctrl(gsi);
1272				break;
1273			case BIT(GSI_GLOB_EE):
1274				gsi_isr_glob_ee(gsi);
1275				break;
1276			case BIT(GSI_IEOB):
1277				gsi_isr_ieob(gsi);
1278				break;
1279			case BIT(GSI_GENERAL):
1280				gsi_isr_general(gsi);
1281				break;
1282			default:
1283				dev_err(gsi->dev,
1284					"unrecognized interrupt type 0x%08x\n",
1285					gsi_intr);
1286				break;
1287			}
1288		} while (intr_mask);
1289
1290		if (++cnt > GSI_ISR_MAX_ITER) {
1291			dev_err(gsi->dev, "interrupt flood\n");
1292			break;
1293		}
1294	}
1295
1296	return IRQ_HANDLED;
1297}
1298
1299/* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */
1300static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
1301{
1302	int ret;
1303
1304	ret = platform_get_irq_byname(pdev, "gsi");
1305	if (ret <= 0)
1306		return ret ? : -EINVAL;
1307
1308	gsi->irq = ret;
1309
1310	return 0;
1311}
1312
1313/* Return the transaction associated with a transfer completion event */
1314static struct gsi_trans *
1315gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
1316{
1317	u32 channel_id = event->chid;
1318	struct gsi_channel *channel;
1319	struct gsi_trans *trans;
1320	u32 tre_offset;
1321	u32 tre_index;
1322
1323	channel = &gsi->channel[channel_id];
1324	if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id))
1325		return NULL;
1326
1327	/* Event xfer_ptr records the TRE it's associated with */
1328	tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
1329	tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1330
1331	trans = gsi_channel_trans_mapped(channel, tre_index);
1332
1333	if (WARN(!trans, "channel %u event with no transaction\n", channel_id))
1334		return NULL;
1335
1336	return trans;
1337}
1338
1339/**
1340 * gsi_evt_ring_update() - Update transaction state from hardware
1341 * @gsi:		GSI pointer
1342 * @evt_ring_id:	Event ring ID
1343 * @index:		Event index in ring reported by hardware
1344 *
1345 * Events for RX channels contain the actual number of bytes received into
1346 * the buffer.  Every event has a transaction associated with it, and here
1347 * we update transactions to record their actual received lengths.
1348 *
1349 * When an event for a TX channel arrives we use information in the
1350 * transaction to report the number of requests and bytes that have
1351 * been transferred.
1352 *
1353 * This function is called whenever we learn that the GSI hardware has filled
1354 * new events since the last time we checked.  The ring's index field tells
1355 * the first entry in need of processing.  The index provided is the
1356 * first *unfilled* event in the ring (following the last filled one).
1357 *
1358 * Events are sequential within the event ring, and transactions are
1359 * sequential within the transaction array.
1360 *
1361 * Note that @index always refers to an element *within* the event ring.
1362 */
1363static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index)
1364{
1365	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1366	struct gsi_ring *ring = &evt_ring->ring;
1367	struct gsi_event *event_done;
1368	struct gsi_event *event;
1369	u32 event_avail;
1370	u32 old_index;
1371
1372	/* Starting with the oldest un-processed event, determine which
1373	 * transaction (and which channel) is associated with the event.
1374	 * For RX channels, update each completed transaction with the
1375	 * number of bytes that were actually received.  For TX channels
1376	 * associated with a network device, report to the network stack
1377	 * the number of transfers and bytes this completion represents.
1378	 */
1379	old_index = ring->index;
1380	event = gsi_ring_virt(ring, old_index);
1381
1382	/* Compute the number of events to process before we wrap,
1383	 * and determine when we'll be done processing events.
1384	 */
1385	event_avail = ring->count - old_index % ring->count;
1386	event_done = gsi_ring_virt(ring, index);
1387	do {
1388		struct gsi_trans *trans;
1389
1390		trans = gsi_event_trans(gsi, event);
1391		if (!trans)
1392			return;
1393
1394		if (trans->direction == DMA_FROM_DEVICE)
1395			trans->len = __le16_to_cpu(event->len);
1396		else
1397			gsi_trans_tx_completed(trans);
1398
1399		gsi_trans_move_complete(trans);
1400
1401		/* Move on to the next event and transaction */
1402		if (--event_avail)
1403			event++;
1404		else
1405			event = gsi_ring_virt(ring, 0);
1406	} while (event != event_done);
1407
1408	/* Tell the hardware we've handled these events */
1409	gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
1410}
1411
1412/* Initialize a ring, including allocating DMA memory for its entries */
1413static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1414{
1415	u32 size = count * GSI_RING_ELEMENT_SIZE;
1416	struct device *dev = gsi->dev;
1417	dma_addr_t addr;
1418
1419	/* Hardware requires a 2^n ring size, with alignment equal to size.
1420	 * The DMA address returned by dma_alloc_coherent() is guaranteed to
1421	 * be a power-of-2 number of pages, which satisfies the requirement.
1422	 */
1423	ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1424	if (!ring->virt)
1425		return -ENOMEM;
1426
1427	ring->addr = addr;
1428	ring->count = count;
1429	ring->index = 0;
1430
1431	return 0;
1432}
1433
1434/* Free a previously-allocated ring */
1435static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1436{
1437	size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1438
1439	dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1440}
1441
1442/* Allocate an available event ring id */
1443static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1444{
1445	u32 evt_ring_id;
1446
1447	if (gsi->event_bitmap == ~0U) {
1448		dev_err(gsi->dev, "event rings exhausted\n");
1449		return -ENOSPC;
1450	}
1451
1452	evt_ring_id = ffz(gsi->event_bitmap);
1453	gsi->event_bitmap |= BIT(evt_ring_id);
1454
1455	return (int)evt_ring_id;
1456}
1457
1458/* Free a previously-allocated event ring id */
1459static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1460{
1461	gsi->event_bitmap &= ~BIT(evt_ring_id);
1462}
1463
1464/* Ring a channel doorbell, reporting the first un-filled entry */
1465void gsi_channel_doorbell(struct gsi_channel *channel)
1466{
1467	struct gsi_ring *tre_ring = &channel->tre_ring;
1468	u32 channel_id = gsi_channel_id(channel);
1469	struct gsi *gsi = channel->gsi;
1470	u32 val;
1471
1472	/* Note: index *must* be used modulo the ring count here */
1473	val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1474	iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1475}
1476
1477/* Consult hardware, move newly completed transactions to completed state */
1478void gsi_channel_update(struct gsi_channel *channel)
1479{
1480	u32 evt_ring_id = channel->evt_ring_id;
1481	struct gsi *gsi = channel->gsi;
1482	struct gsi_evt_ring *evt_ring;
1483	struct gsi_trans *trans;
1484	struct gsi_ring *ring;
1485	u32 offset;
1486	u32 index;
1487
1488	evt_ring = &gsi->evt_ring[evt_ring_id];
1489	ring = &evt_ring->ring;
1490
1491	/* See if there's anything new to process; if not, we're done.  Note
1492	 * that index always refers to an entry *within* the event ring.
1493	 */
1494	offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1495	index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1496	if (index == ring->index % ring->count)
1497		return;
1498
1499	/* Get the transaction for the latest completed event. */
1500	trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
1501	if (!trans)
1502		return;
1503
1504	/* For RX channels, update each completed transaction with the number
1505	 * of bytes that were actually received.  For TX channels, report
1506	 * the number of transactions and bytes this completion represents
1507	 * up the network stack.
1508	 */
1509	gsi_evt_ring_update(gsi, evt_ring_id, index);
1510}
1511
1512/**
1513 * gsi_channel_poll_one() - Return a single completed transaction on a channel
1514 * @channel:	Channel to be polled
1515 *
1516 * Return:	Transaction pointer, or null if none are available
1517 *
1518 * This function returns the first of a channel's completed transactions.
1519 * If no transactions are in completed state, the hardware is consulted to
1520 * determine whether any new transactions have completed.  If so, they're
1521 * moved to completed state and the first such transaction is returned.
1522 * If there are no more completed transactions, a null pointer is returned.
1523 */
1524static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1525{
1526	struct gsi_trans *trans;
1527
1528	/* Get the first completed transaction */
1529	trans = gsi_channel_trans_complete(channel);
1530	if (trans)
1531		gsi_trans_move_polled(trans);
1532
1533	return trans;
1534}
1535
1536/**
1537 * gsi_channel_poll() - NAPI poll function for a channel
1538 * @napi:	NAPI structure for the channel
1539 * @budget:	Budget supplied by NAPI core
1540 *
1541 * Return:	Number of items polled (<= budget)
1542 *
1543 * Single transactions completed by hardware are polled until either
1544 * the budget is exhausted, or there are no more.  Each transaction
1545 * polled is passed to gsi_trans_complete(), to perform remaining
1546 * completion processing and retire/free the transaction.
1547 */
1548static int gsi_channel_poll(struct napi_struct *napi, int budget)
1549{
1550	struct gsi_channel *channel;
1551	int count;
1552
1553	channel = container_of(napi, struct gsi_channel, napi);
1554	for (count = 0; count < budget; count++) {
1555		struct gsi_trans *trans;
1556
1557		trans = gsi_channel_poll_one(channel);
1558		if (!trans)
1559			break;
1560		gsi_trans_complete(trans);
1561	}
1562
1563	if (count < budget && napi_complete(napi))
1564		gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
1565
1566	return count;
1567}
1568
1569/* The event bitmap represents which event ids are available for allocation.
1570 * Set bits are not available, clear bits can be used.  This function
1571 * initializes the map so all events supported by the hardware are available,
1572 * then precludes any reserved events from being allocated.
1573 */
1574static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1575{
1576	u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1577
1578	event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1579
1580	return event_bitmap;
1581}
1582
1583/* Setup function for a single channel */
1584static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
1585{
1586	struct gsi_channel *channel = &gsi->channel[channel_id];
1587	u32 evt_ring_id = channel->evt_ring_id;
1588	int ret;
1589
1590	if (!gsi_channel_initialized(channel))
1591		return 0;
1592
1593	ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1594	if (ret)
1595		return ret;
1596
1597	gsi_evt_ring_program(gsi, evt_ring_id);
1598
1599	ret = gsi_channel_alloc_command(gsi, channel_id);
1600	if (ret)
1601		goto err_evt_ring_de_alloc;
1602
1603	gsi_channel_program(channel, true);
1604
1605	if (channel->toward_ipa)
1606		netif_napi_add_tx(&gsi->dummy_dev, &channel->napi,
1607				  gsi_channel_poll);
1608	else
1609		netif_napi_add(&gsi->dummy_dev, &channel->napi,
1610			       gsi_channel_poll);
1611
1612	return 0;
1613
1614err_evt_ring_de_alloc:
1615	/* We've done nothing with the event ring yet so don't reset */
1616	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1617
1618	return ret;
1619}
1620
1621/* Inverse of gsi_channel_setup_one() */
1622static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1623{
1624	struct gsi_channel *channel = &gsi->channel[channel_id];
1625	u32 evt_ring_id = channel->evt_ring_id;
1626
1627	if (!gsi_channel_initialized(channel))
1628		return;
1629
1630	netif_napi_del(&channel->napi);
1631
1632	gsi_channel_de_alloc_command(gsi, channel_id);
1633	gsi_evt_ring_reset_command(gsi, evt_ring_id);
1634	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1635}
1636
1637/* We use generic commands only to operate on modem channels.  We don't have
1638 * the ability to determine channel state for a modem channel, so we simply
1639 * issue the command and wait for it to complete.
1640 */
1641static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1642			       enum gsi_generic_cmd_opcode opcode,
1643			       u8 params)
1644{
1645	bool timeout;
1646	u32 val;
1647
1648	/* The error global interrupt type is always enabled (until we tear
1649	 * down), so we will keep it enabled.
1650	 *
1651	 * A generic EE command completes with a GSI global interrupt of
1652	 * type GP_INT1.  We only perform one generic command at a time
1653	 * (to allocate, halt, or enable/disable flow control on a modem
1654	 * channel), and only from this function.  So we enable the GP_INT1
1655	 * IRQ type here, and disable it again after the command completes.
1656	 */
1657	val = BIT(ERROR_INT) | BIT(GP_INT1);
1658	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1659
1660	/* First zero the result code field */
1661	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1662	val &= ~GENERIC_EE_RESULT_FMASK;
1663	iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1664
1665	/* Now issue the command */
1666	val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1667	val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1668	val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1669	val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
1670
1671	timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
1672
1673	/* Disable the GP_INT1 IRQ type again */
1674	iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1675
1676	if (!timeout)
1677		return gsi->result;
1678
1679	dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1680		opcode, channel_id);
1681
1682	return -ETIMEDOUT;
1683}
1684
1685static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1686{
1687	return gsi_generic_command(gsi, channel_id,
1688				   GSI_GENERIC_ALLOCATE_CHANNEL, 0);
1689}
1690
1691static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1692{
1693	u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
1694	int ret;
1695
1696	do
1697		ret = gsi_generic_command(gsi, channel_id,
1698					  GSI_GENERIC_HALT_CHANNEL, 0);
1699	while (ret == -EAGAIN && retries--);
1700
1701	if (ret)
1702		dev_err(gsi->dev, "error %d halting modem channel %u\n",
1703			ret, channel_id);
1704}
1705
1706/* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */
1707void
1708gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
1709{
1710	u32 retries = 0;
1711	u32 command;
1712	int ret;
1713
1714	command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL
1715			 : GSI_GENERIC_DISABLE_FLOW_CONTROL;
1716	/* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable
1717	 * is underway.  In this case we need to retry the command.
1718	 */
1719	if (!enable && gsi->version >= IPA_VERSION_4_11)
1720		retries = GSI_CHANNEL_MODEM_FLOW_RETRIES;
1721
1722	do
1723		ret = gsi_generic_command(gsi, channel_id, command, 0);
1724	while (ret == -EAGAIN && retries--);
1725
1726	if (ret)
1727		dev_err(gsi->dev,
1728			"error %d %sabling mode channel %u flow control\n",
1729			ret, enable ? "en" : "dis", channel_id);
1730}
1731
1732/* Setup function for channels */
1733static int gsi_channel_setup(struct gsi *gsi)
1734{
1735	u32 channel_id = 0;
1736	u32 mask;
1737	int ret;
1738
1739	gsi_irq_enable(gsi);
1740
1741	mutex_lock(&gsi->mutex);
1742
1743	do {
1744		ret = gsi_channel_setup_one(gsi, channel_id);
1745		if (ret)
1746			goto err_unwind;
1747	} while (++channel_id < gsi->channel_count);
1748
1749	/* Make sure no channels were defined that hardware does not support */
1750	while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1751		struct gsi_channel *channel = &gsi->channel[channel_id++];
1752
1753		if (!gsi_channel_initialized(channel))
1754			continue;
1755
1756		ret = -EINVAL;
1757		dev_err(gsi->dev, "channel %u not supported by hardware\n",
1758			channel_id - 1);
1759		channel_id = gsi->channel_count;
1760		goto err_unwind;
1761	}
1762
1763	/* Allocate modem channels if necessary */
1764	mask = gsi->modem_channel_bitmap;
1765	while (mask) {
1766		u32 modem_channel_id = __ffs(mask);
1767
1768		ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1769		if (ret)
1770			goto err_unwind_modem;
1771
1772		/* Clear bit from mask only after success (for unwind) */
1773		mask ^= BIT(modem_channel_id);
1774	}
1775
1776	mutex_unlock(&gsi->mutex);
1777
1778	return 0;
1779
1780err_unwind_modem:
1781	/* Compute which modem channels need to be deallocated */
1782	mask ^= gsi->modem_channel_bitmap;
1783	while (mask) {
1784		channel_id = __fls(mask);
1785
1786		mask ^= BIT(channel_id);
1787
1788		gsi_modem_channel_halt(gsi, channel_id);
1789	}
1790
1791err_unwind:
1792	while (channel_id--)
1793		gsi_channel_teardown_one(gsi, channel_id);
1794
1795	mutex_unlock(&gsi->mutex);
1796
1797	gsi_irq_disable(gsi);
1798
1799	return ret;
1800}
1801
1802/* Inverse of gsi_channel_setup() */
1803static void gsi_channel_teardown(struct gsi *gsi)
1804{
1805	u32 mask = gsi->modem_channel_bitmap;
1806	u32 channel_id;
1807
1808	mutex_lock(&gsi->mutex);
1809
1810	while (mask) {
1811		channel_id = __fls(mask);
1812
1813		mask ^= BIT(channel_id);
1814
1815		gsi_modem_channel_halt(gsi, channel_id);
1816	}
1817
1818	channel_id = gsi->channel_count - 1;
1819	do
1820		gsi_channel_teardown_one(gsi, channel_id);
1821	while (channel_id--);
1822
1823	mutex_unlock(&gsi->mutex);
1824
1825	gsi_irq_disable(gsi);
1826}
1827
1828/* Turn off all GSI interrupts initially */
1829static int gsi_irq_setup(struct gsi *gsi)
1830{
1831	int ret;
1832
1833	/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1834	iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1835
1836	/* Disable all interrupt types */
1837	gsi_irq_type_update(gsi, 0);
1838
1839	/* Clear all type-specific interrupt masks */
1840	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
1841	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
1842	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1843	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
1844
1845	/* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
1846	if (gsi->version > IPA_VERSION_3_1) {
1847		u32 offset;
1848
1849		/* These registers are in the non-adjusted address range */
1850		offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET;
1851		iowrite32(0, gsi->virt_raw + offset);
1852		offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
1853		iowrite32(0, gsi->virt_raw + offset);
1854	}
1855
1856	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
1857
1858	ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi);
1859	if (ret)
1860		dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
1861
1862	return ret;
1863}
1864
1865static void gsi_irq_teardown(struct gsi *gsi)
1866{
1867	free_irq(gsi->irq, gsi);
1868}
1869
1870/* Get # supported channel and event rings; there is no gsi_ring_teardown() */
1871static int gsi_ring_setup(struct gsi *gsi)
1872{
1873	struct device *dev = gsi->dev;
1874	u32 count;
1875	u32 val;
1876
1877	if (gsi->version < IPA_VERSION_3_5_1) {
1878		/* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */
1879		gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1880		gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1881
1882		return 0;
1883	}
1884
1885	val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1886
1887	count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1888	if (!count) {
1889		dev_err(dev, "GSI reports zero channels supported\n");
1890		return -EINVAL;
1891	}
1892	if (count > GSI_CHANNEL_COUNT_MAX) {
1893		dev_warn(dev, "limiting to %u channels; hardware supports %u\n",
1894			 GSI_CHANNEL_COUNT_MAX, count);
1895		count = GSI_CHANNEL_COUNT_MAX;
1896	}
1897	gsi->channel_count = count;
1898
1899	count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1900	if (!count) {
1901		dev_err(dev, "GSI reports zero event rings supported\n");
1902		return -EINVAL;
1903	}
1904	if (count > GSI_EVT_RING_COUNT_MAX) {
1905		dev_warn(dev,
1906			 "limiting to %u event rings; hardware supports %u\n",
1907			 GSI_EVT_RING_COUNT_MAX, count);
1908		count = GSI_EVT_RING_COUNT_MAX;
1909	}
1910	gsi->evt_ring_count = count;
1911
1912	return 0;
1913}
1914
1915/* Setup function for GSI.  GSI firmware must be loaded and initialized */
1916int gsi_setup(struct gsi *gsi)
1917{
1918	u32 val;
1919	int ret;
1920
1921	/* Here is where we first touch the GSI hardware */
1922	val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1923	if (!(val & ENABLED_FMASK)) {
1924		dev_err(gsi->dev, "GSI has not been enabled\n");
1925		return -EIO;
1926	}
1927
1928	ret = gsi_irq_setup(gsi);
1929	if (ret)
1930		return ret;
1931
1932	ret = gsi_ring_setup(gsi);	/* No matching teardown required */
1933	if (ret)
1934		goto err_irq_teardown;
1935
1936	/* Initialize the error log */
1937	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1938
1939	ret = gsi_channel_setup(gsi);
1940	if (ret)
1941		goto err_irq_teardown;
1942
1943	return 0;
1944
1945err_irq_teardown:
1946	gsi_irq_teardown(gsi);
1947
1948	return ret;
1949}
1950
1951/* Inverse of gsi_setup() */
1952void gsi_teardown(struct gsi *gsi)
1953{
1954	gsi_channel_teardown(gsi);
1955	gsi_irq_teardown(gsi);
1956}
1957
1958/* Initialize a channel's event ring */
1959static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1960{
1961	struct gsi *gsi = channel->gsi;
1962	struct gsi_evt_ring *evt_ring;
1963	int ret;
1964
1965	ret = gsi_evt_ring_id_alloc(gsi);
1966	if (ret < 0)
1967		return ret;
1968	channel->evt_ring_id = ret;
1969
1970	evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1971	evt_ring->channel = channel;
1972
1973	ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1974	if (!ret)
1975		return 0;	/* Success! */
1976
1977	dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1978		ret, gsi_channel_id(channel));
1979
1980	gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1981
1982	return ret;
1983}
1984
1985/* Inverse of gsi_channel_evt_ring_init() */
1986static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
1987{
1988	u32 evt_ring_id = channel->evt_ring_id;
1989	struct gsi *gsi = channel->gsi;
1990	struct gsi_evt_ring *evt_ring;
1991
1992	evt_ring = &gsi->evt_ring[evt_ring_id];
1993	gsi_ring_free(gsi, &evt_ring->ring);
1994	gsi_evt_ring_id_free(gsi, evt_ring_id);
1995}
1996
1997static bool gsi_channel_data_valid(struct gsi *gsi, bool command,
1998				   const struct ipa_gsi_endpoint_data *data)
1999{
2000	const struct gsi_channel_data *channel_data;
2001	u32 channel_id = data->channel_id;
2002	struct device *dev = gsi->dev;
2003
2004	/* Make sure channel ids are in the range driver supports */
2005	if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
2006		dev_err(dev, "bad channel id %u; must be less than %u\n",
2007			channel_id, GSI_CHANNEL_COUNT_MAX);
2008		return false;
2009	}
2010
2011	if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
2012		dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
2013		return false;
2014	}
2015
2016	if (command && !data->toward_ipa) {
2017		dev_err(dev, "command channel %u is not TX\n", channel_id);
2018		return false;
2019	}
2020
2021	channel_data = &data->channel;
2022
2023	if (!channel_data->tlv_count ||
2024	    channel_data->tlv_count > GSI_TLV_MAX) {
2025		dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
2026			channel_id, channel_data->tlv_count, GSI_TLV_MAX);
2027		return false;
2028	}
2029
2030	if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) {
2031		dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n",
2032			channel_id, IPA_COMMAND_TRANS_TRE_MAX,
2033			channel_data->tlv_count);
2034		return false;
2035	}
2036
2037	/* We have to allow at least one maximally-sized transaction to
2038	 * be outstanding (which would use tlv_count TREs).  Given how
2039	 * gsi_channel_tre_max() is computed, tre_count has to be almost
2040	 * twice the TLV FIFO size to satisfy this requirement.
2041	 */
2042	if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) {
2043		dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
2044			channel_id, channel_data->tlv_count,
2045			channel_data->tre_count);
2046		return false;
2047	}
2048
2049	if (!is_power_of_2(channel_data->tre_count)) {
2050		dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
2051			channel_id, channel_data->tre_count);
2052		return false;
2053	}
2054
2055	if (!is_power_of_2(channel_data->event_count)) {
2056		dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
2057			channel_id, channel_data->event_count);
2058		return false;
2059	}
2060
2061	return true;
2062}
2063
2064/* Init function for a single channel */
2065static int gsi_channel_init_one(struct gsi *gsi,
2066				const struct ipa_gsi_endpoint_data *data,
2067				bool command)
2068{
2069	struct gsi_channel *channel;
2070	u32 tre_count;
2071	int ret;
2072
2073	if (!gsi_channel_data_valid(gsi, command, data))
2074		return -EINVAL;
2075
2076	/* Worst case we need an event for every outstanding TRE */
2077	if (data->channel.tre_count > data->channel.event_count) {
2078		tre_count = data->channel.event_count;
2079		dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
2080			 data->channel_id, tre_count);
2081	} else {
2082		tre_count = data->channel.tre_count;
2083	}
2084
2085	channel = &gsi->channel[data->channel_id];
2086	memset(channel, 0, sizeof(*channel));
2087
2088	channel->gsi = gsi;
2089	channel->toward_ipa = data->toward_ipa;
2090	channel->command = command;
2091	channel->trans_tre_max = data->channel.tlv_count;
2092	channel->tre_count = tre_count;
2093	channel->event_count = data->channel.event_count;
2094
2095	ret = gsi_channel_evt_ring_init(channel);
2096	if (ret)
2097		goto err_clear_gsi;
2098
2099	ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
2100	if (ret) {
2101		dev_err(gsi->dev, "error %d allocating channel %u ring\n",
2102			ret, data->channel_id);
2103		goto err_channel_evt_ring_exit;
2104	}
2105
2106	ret = gsi_channel_trans_init(gsi, data->channel_id);
2107	if (ret)
2108		goto err_ring_free;
2109
2110	if (command) {
2111		u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
2112
2113		ret = ipa_cmd_pool_init(channel, tre_max);
2114	}
2115	if (!ret)
2116		return 0;	/* Success! */
2117
2118	gsi_channel_trans_exit(channel);
2119err_ring_free:
2120	gsi_ring_free(gsi, &channel->tre_ring);
2121err_channel_evt_ring_exit:
2122	gsi_channel_evt_ring_exit(channel);
2123err_clear_gsi:
2124	channel->gsi = NULL;	/* Mark it not (fully) initialized */
2125
2126	return ret;
2127}
2128
2129/* Inverse of gsi_channel_init_one() */
2130static void gsi_channel_exit_one(struct gsi_channel *channel)
2131{
2132	if (!gsi_channel_initialized(channel))
2133		return;
2134
2135	if (channel->command)
2136		ipa_cmd_pool_exit(channel);
2137	gsi_channel_trans_exit(channel);
2138	gsi_ring_free(channel->gsi, &channel->tre_ring);
2139	gsi_channel_evt_ring_exit(channel);
2140}
2141
2142/* Init function for channels */
2143static int gsi_channel_init(struct gsi *gsi, u32 count,
2144			    const struct ipa_gsi_endpoint_data *data)
2145{
2146	bool modem_alloc;
2147	int ret = 0;
2148	u32 i;
2149
2150	/* IPA v4.2 requires the AP to allocate channels for the modem */
2151	modem_alloc = gsi->version == IPA_VERSION_4_2;
2152
2153	gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
2154	gsi->ieob_enabled_bitmap = 0;
2155
2156	/* The endpoint data array is indexed by endpoint name */
2157	for (i = 0; i < count; i++) {
2158		bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
2159
2160		if (ipa_gsi_endpoint_data_empty(&data[i]))
2161			continue;	/* Skip over empty slots */
2162
2163		/* Mark modem channels to be allocated (hardware workaround) */
2164		if (data[i].ee_id == GSI_EE_MODEM) {
2165			if (modem_alloc)
2166				gsi->modem_channel_bitmap |=
2167						BIT(data[i].channel_id);
2168			continue;
2169		}
2170
2171		ret = gsi_channel_init_one(gsi, &data[i], command);
2172		if (ret)
2173			goto err_unwind;
2174	}
2175
2176	return ret;
2177
2178err_unwind:
2179	while (i--) {
2180		if (ipa_gsi_endpoint_data_empty(&data[i]))
2181			continue;
2182		if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
2183			gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
2184			continue;
2185		}
2186		gsi_channel_exit_one(&gsi->channel[data->channel_id]);
2187	}
2188
2189	return ret;
2190}
2191
2192/* Inverse of gsi_channel_init() */
2193static void gsi_channel_exit(struct gsi *gsi)
2194{
2195	u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
2196
2197	do
2198		gsi_channel_exit_one(&gsi->channel[channel_id]);
2199	while (channel_id--);
2200	gsi->modem_channel_bitmap = 0;
2201}
2202
2203/* Init function for GSI.  GSI hardware does not need to be "ready" */
2204int gsi_init(struct gsi *gsi, struct platform_device *pdev,
2205	     enum ipa_version version, u32 count,
2206	     const struct ipa_gsi_endpoint_data *data)
2207{
2208	struct device *dev = &pdev->dev;
2209	struct resource *res;
2210	resource_size_t size;
2211	u32 adjust;
2212	int ret;
2213
2214	gsi_validate_build();
2215
2216	gsi->dev = dev;
2217	gsi->version = version;
2218
2219	/* GSI uses NAPI on all channels.  Create a dummy network device
2220	 * for the channel NAPI contexts to be associated with.
2221	 */
2222	init_dummy_netdev(&gsi->dummy_dev);
2223
2224	/* Get GSI memory range and map it */
2225	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
2226	if (!res) {
2227		dev_err(dev, "DT error getting \"gsi\" memory property\n");
2228		return -ENODEV;
2229	}
2230
2231	size = resource_size(res);
2232	if (res->start > U32_MAX || size > U32_MAX - res->start) {
2233		dev_err(dev, "DT memory resource \"gsi\" out of range\n");
2234		return -EINVAL;
2235	}
2236
2237	/* Make sure we can make our pointer adjustment if necessary */
2238	adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
2239	if (res->start < adjust) {
2240		dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
2241			adjust);
2242		return -EINVAL;
2243	}
2244
2245	gsi->virt_raw = ioremap(res->start, size);
2246	if (!gsi->virt_raw) {
2247		dev_err(dev, "unable to remap \"gsi\" memory\n");
2248		return -ENOMEM;
2249	}
2250	/* Most registers are accessed using an adjusted register range */
2251	gsi->virt = gsi->virt_raw - adjust;
2252
2253	init_completion(&gsi->completion);
2254
2255	ret = gsi_irq_init(gsi, pdev);	/* No matching exit required */
2256	if (ret)
2257		goto err_iounmap;
2258
2259	ret = gsi_channel_init(gsi, count, data);
2260	if (ret)
2261		goto err_iounmap;
2262
2263	mutex_init(&gsi->mutex);
2264
2265	return 0;
2266
2267err_iounmap:
2268	iounmap(gsi->virt_raw);
2269
2270	return ret;
2271}
2272
2273/* Inverse of gsi_init() */
2274void gsi_exit(struct gsi *gsi)
2275{
2276	mutex_destroy(&gsi->mutex);
2277	gsi_channel_exit(gsi);
2278	iounmap(gsi->virt_raw);
2279}
2280
2281/* The maximum number of outstanding TREs on a channel.  This limits
2282 * a channel's maximum number of transactions outstanding (worst case
2283 * is one TRE per transaction).
2284 *
2285 * The absolute limit is the number of TREs in the channel's TRE ring,
2286 * and in theory we should be able use all of them.  But in practice,
2287 * doing that led to the hardware reporting exhaustion of event ring
2288 * slots for writing completion information.  So the hardware limit
2289 * would be (tre_count - 1).
2290 *
2291 * We reduce it a bit further though.  Transaction resource pools are
2292 * sized to be a little larger than this maximum, to allow resource
2293 * allocations to always be contiguous.  The number of entries in a
2294 * TRE ring buffer is a power of 2, and the extra resources in a pool
2295 * tends to nearly double the memory allocated for it.  Reducing the
2296 * maximum number of outstanding TREs allows the number of entries in
2297 * a pool to avoid crossing that power-of-2 boundary, and this can
2298 * substantially reduce pool memory requirements.  The number we
2299 * reduce it by matches the number added in gsi_trans_pool_init().
2300 */
2301u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2302{
2303	struct gsi_channel *channel = &gsi->channel[channel_id];
2304
2305	/* Hardware limit is channel->tre_count - 1 */
2306	return channel->tre_count - (channel->trans_tre_max - 1);
2307}