Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2018-2023 Linaro Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <linux/atomic.h>
   9#include <linux/bitfield.h>
  10#include <linux/bug.h>
  11#include <linux/io.h>
  12#include <linux/firmware.h>
 
  13#include <linux/module.h>
  14#include <linux/of.h>
  15#include <linux/of_address.h>
  16#include <linux/platform_device.h>
  17#include <linux/pm_runtime.h>
 
 
  18#include <linux/firmware/qcom/qcom_scm.h>
  19#include <linux/soc/qcom/mdt_loader.h>
  20
  21#include "ipa.h"
  22#include "ipa_power.h"
  23#include "ipa_data.h"
  24#include "ipa_endpoint.h"
  25#include "ipa_resource.h"
  26#include "ipa_cmd.h"
  27#include "ipa_reg.h"
  28#include "ipa_mem.h"
  29#include "ipa_table.h"
  30#include "ipa_smp2p.h"
  31#include "ipa_modem.h"
  32#include "ipa_uc.h"
  33#include "ipa_interrupt.h"
  34#include "gsi_trans.h"
 
  35#include "ipa_sysfs.h"
 
 
 
  36
  37/**
  38 * DOC: The IP Accelerator
  39 *
  40 * This driver supports the Qualcomm IP Accelerator (IPA), which is a
  41 * networking component found in many Qualcomm SoCs.  The IPA is connected
  42 * to the application processor (AP), but is also connected (and partially
  43 * controlled by) other "execution environments" (EEs), such as a modem.
  44 *
  45 * The IPA is the conduit between the AP and the modem that carries network
  46 * traffic.  This driver presents a network interface representing the
  47 * connection of the modem to external (e.g. LTE) networks.
  48 *
  49 * The IPA provides protocol checksum calculation, offloading this work
  50 * from the AP.  The IPA offers additional functionality, including routing,
  51 * filtering, and NAT support, but that more advanced functionality is not
  52 * currently supported.  Despite that, some resources--including routing
  53 * tables and filter tables--are defined in this driver because they must
  54 * be initialized even when the advanced hardware features are not used.
  55 *
  56 * There are two distinct layers that implement the IPA hardware, and this
  57 * is reflected in the organization of the driver.  The generic software
  58 * interface (GSI) is an integral component of the IPA, providing a
  59 * well-defined communication layer between the AP subsystem and the IPA
  60 * core.  The GSI implements a set of "channels" used for communication
  61 * between the AP and the IPA.
  62 *
  63 * The IPA layer uses GSI channels to implement its "endpoints".  And while
  64 * a GSI channel carries data between the AP and the IPA, a pair of IPA
  65 * endpoints is used to carry traffic between two EEs.  Specifically, the main
  66 * modem network interface is implemented by two pairs of endpoints:  a TX
  67 * endpoint on the AP coupled with an RX endpoint on the modem; and another
  68 * RX endpoint on the AP receiving data from a TX endpoint on the modem.
  69 */
  70
  71/* The name of the GSI firmware file relative to /lib/firmware */
  72#define IPA_FW_PATH_DEFAULT	"ipa_fws.mdt"
  73#define IPA_PAS_ID		15
  74
  75/* Shift of 19.2 MHz timestamp to achieve lower resolution timestamps */
  76/* IPA v5.5+ does not specify Qtime timestamp config for DPL */
  77#define DPL_TIMESTAMP_SHIFT	14	/* ~1.172 kHz, ~853 usec per tick */
  78#define TAG_TIMESTAMP_SHIFT	14
  79#define NAT_TIMESTAMP_SHIFT	24	/* ~1.144 Hz, ~874 msec per tick */
  80
  81/* Divider for 19.2 MHz crystal oscillator clock to get common timer clock */
  82#define IPA_XO_CLOCK_DIVIDER	192	/* 1 is subtracted where used */
  83
  84/**
  85 * enum ipa_firmware_loader: How GSI firmware gets loaded
  86 *
  87 * @IPA_LOADER_DEFER:		System not ready; try again later
  88 * @IPA_LOADER_SELF:		AP loads GSI firmware
  89 * @IPA_LOADER_MODEM:		Modem loads GSI firmware, signals when done
  90 * @IPA_LOADER_SKIP:		Neither AP nor modem need to load GSI firmware
  91 * @IPA_LOADER_INVALID:	GSI firmware loader specification is invalid
  92 */
  93enum ipa_firmware_loader {
  94	IPA_LOADER_DEFER,
  95	IPA_LOADER_SELF,
  96	IPA_LOADER_MODEM,
  97	IPA_LOADER_SKIP,
  98	IPA_LOADER_INVALID,
  99};
 100
 101/**
 102 * ipa_setup() - Set up IPA hardware
 103 * @ipa:	IPA pointer
 104 *
 105 * Perform initialization that requires issuing immediate commands on
 106 * the command TX endpoint.  If the modem is doing GSI firmware load
 107 * and initialization, this function will be called when an SMP2P
 108 * interrupt has been signaled by the modem.  Otherwise it will be
 109 * called from ipa_probe() after GSI firmware has been successfully
 110 * loaded, authenticated, and started by Trust Zone.
 111 */
 112int ipa_setup(struct ipa *ipa)
 113{
 114	struct ipa_endpoint *exception_endpoint;
 115	struct ipa_endpoint *command_endpoint;
 116	struct device *dev = ipa->dev;
 117	int ret;
 118
 119	ret = gsi_setup(&ipa->gsi);
 120	if (ret)
 121		return ret;
 122
 123	ret = ipa_power_setup(ipa);
 124	if (ret)
 125		goto err_gsi_teardown;
 126
 127	ipa_endpoint_setup(ipa);
 128
 129	/* We need to use the AP command TX endpoint to perform other
 130	 * initialization, so we enable first.
 131	 */
 132	command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
 133	ret = ipa_endpoint_enable_one(command_endpoint);
 134	if (ret)
 135		goto err_endpoint_teardown;
 136
 137	ret = ipa_mem_setup(ipa);	/* No matching teardown required */
 138	if (ret)
 139		goto err_command_disable;
 140
 141	ret = ipa_table_setup(ipa);	/* No matching teardown required */
 142	if (ret)
 143		goto err_command_disable;
 144
 145	/* Enable the exception handling endpoint, and tell the hardware
 146	 * to use it by default.
 147	 */
 148	exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
 149	ret = ipa_endpoint_enable_one(exception_endpoint);
 150	if (ret)
 151		goto err_command_disable;
 152
 153	ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id);
 154
 155	/* We're all set.  Now prepare for communication with the modem */
 156	ret = ipa_qmi_setup(ipa);
 157	if (ret)
 158		goto err_default_route_clear;
 159
 160	ipa->setup_complete = true;
 161
 162	dev_info(dev, "IPA driver setup completed successfully\n");
 163
 164	return 0;
 165
 166err_default_route_clear:
 167	ipa_endpoint_default_route_clear(ipa);
 168	ipa_endpoint_disable_one(exception_endpoint);
 169err_command_disable:
 170	ipa_endpoint_disable_one(command_endpoint);
 171err_endpoint_teardown:
 172	ipa_endpoint_teardown(ipa);
 173	ipa_power_teardown(ipa);
 174err_gsi_teardown:
 175	gsi_teardown(&ipa->gsi);
 176
 177	return ret;
 178}
 179
 180/**
 181 * ipa_teardown() - Inverse of ipa_setup()
 182 * @ipa:	IPA pointer
 183 */
 184static void ipa_teardown(struct ipa *ipa)
 185{
 186	struct ipa_endpoint *exception_endpoint;
 187	struct ipa_endpoint *command_endpoint;
 188
 189	/* We're going to tear everything down, as if setup never completed */
 190	ipa->setup_complete = false;
 191
 192	ipa_qmi_teardown(ipa);
 193	ipa_endpoint_default_route_clear(ipa);
 194	exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
 195	ipa_endpoint_disable_one(exception_endpoint);
 196	command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
 197	ipa_endpoint_disable_one(command_endpoint);
 198	ipa_endpoint_teardown(ipa);
 199	ipa_power_teardown(ipa);
 200	gsi_teardown(&ipa->gsi);
 201}
 202
 203static void
 204ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
 205{
 206	const struct reg *reg;
 207	u32 val;
 208
 209	/* IPA v4.5+ has no backward compatibility register */
 210	if (ipa->version >= IPA_VERSION_4_5)
 211		return;
 212
 213	reg = ipa_reg(ipa, IPA_BCR);
 214	val = data->backward_compat;
 215	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 216}
 217
 218static void ipa_hardware_config_tx(struct ipa *ipa)
 219{
 220	enum ipa_version version = ipa->version;
 221	const struct reg *reg;
 222	u32 offset;
 223	u32 val;
 224
 225	if (version <= IPA_VERSION_4_0 || version >= IPA_VERSION_4_5)
 226		return;
 227
 228	/* Disable PA mask to allow HOLB drop */
 229	reg = ipa_reg(ipa, IPA_TX_CFG);
 230	offset = reg_offset(reg);
 231
 232	val = ioread32(ipa->reg_virt + offset);
 233
 234	val &= ~reg_bit(reg, PA_MASK_EN);
 235
 236	iowrite32(val, ipa->reg_virt + offset);
 237}
 238
 239static void ipa_hardware_config_clkon(struct ipa *ipa)
 240{
 241	enum ipa_version version = ipa->version;
 242	const struct reg *reg;
 243	u32 val;
 244
 245	if (version >= IPA_VERSION_4_5)
 246		return;
 247
 248	if (version < IPA_VERSION_4_0 && version != IPA_VERSION_3_1)
 249		return;
 250
 251	/* Implement some hardware workarounds */
 252	reg = ipa_reg(ipa, CLKON_CFG);
 253	if (version == IPA_VERSION_3_1) {
 254		/* Disable MISC clock gating */
 255		val = reg_bit(reg, CLKON_MISC);
 256	} else {	/* IPA v4.0+ */
 257		/* Enable open global clocks in the CLKON configuration */
 258		val = reg_bit(reg, CLKON_GLOBAL);
 259		val |= reg_bit(reg, GLOBAL_2X_CLK);
 260	}
 261
 262	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 263}
 264
 265/* Configure bus access behavior for IPA components */
 266static void ipa_hardware_config_comp(struct ipa *ipa)
 267{
 268	const struct reg *reg;
 269	u32 offset;
 270	u32 val;
 271
 272	/* Nothing to configure prior to IPA v4.0 */
 273	if (ipa->version < IPA_VERSION_4_0)
 274		return;
 275
 276	reg = ipa_reg(ipa, COMP_CFG);
 277	offset = reg_offset(reg);
 278
 279	val = ioread32(ipa->reg_virt + offset);
 280
 281	if (ipa->version == IPA_VERSION_4_0) {
 282		val &= ~reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
 283		val &= ~reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
 284		val &= ~reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
 285	} else if (ipa->version < IPA_VERSION_4_5) {
 286		val |= reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
 287	} else {
 288		/* For IPA v4.5+ FULL_FLUSH_WAIT_RS_CLOSURE_EN is 0 */
 289	}
 290
 291	val |= reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
 292	val |= reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
 293
 294	iowrite32(val, ipa->reg_virt + offset);
 295}
 296
 297/* Configure DDR and (possibly) PCIe max read/write QSB values */
 298static void
 299ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
 300{
 301	const struct ipa_qsb_data *data0;
 302	const struct ipa_qsb_data *data1;
 303	const struct reg *reg;
 304	u32 val;
 305
 306	/* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
 307	data0 = &data->qsb_data[IPA_QSB_MASTER_DDR];
 308	if (data->qsb_count > 1)
 309		data1 = &data->qsb_data[IPA_QSB_MASTER_PCIE];
 310
 311	/* Max outstanding write accesses for QSB masters */
 312	reg = ipa_reg(ipa, QSB_MAX_WRITES);
 313
 314	val = reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
 315	if (data->qsb_count > 1)
 316		val |= reg_encode(reg, GEN_QMB_1_MAX_WRITES, data1->max_writes);
 317
 318	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 319
 320	/* Max outstanding read accesses for QSB masters */
 321	reg = ipa_reg(ipa, QSB_MAX_READS);
 322
 323	val = reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
 324	if (ipa->version >= IPA_VERSION_4_0)
 325		val |= reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
 326				  data0->max_reads_beats);
 327	if (data->qsb_count > 1) {
 328		val = reg_encode(reg, GEN_QMB_1_MAX_READS, data1->max_reads);
 329		if (ipa->version >= IPA_VERSION_4_0)
 330			val |= reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
 331					  data1->max_reads_beats);
 332	}
 333
 334	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 335}
 336
 337/* The internal inactivity timer clock is used for the aggregation timer */
 338#define TIMER_FREQUENCY	32000		/* 32 KHz inactivity timer clock */
 339
 340/* Compute the value to use in the COUNTER_CFG register AGGR_GRANULARITY
 341 * field to represent the given number of microseconds.  The value is one
 342 * less than the number of timer ticks in the requested period.  0 is not
 343 * a valid granularity value (so for example @usec must be at least 16 for
 344 * a TIMER_FREQUENCY of 32000).
 345 */
 346static __always_inline u32 ipa_aggr_granularity_val(u32 usec)
 347{
 348	return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1;
 349}
 350
 351/* IPA uses unified Qtime starting at IPA v4.5, implementing various
 352 * timestamps and timers independent of the IPA core clock rate.  The
 353 * Qtimer is based on a 56-bit timestamp incremented at each tick of
 354 * a 19.2 MHz SoC crystal oscillator (XO clock).
 355 *
 356 * For IPA timestamps (tag, NAT, data path logging) a lower resolution
 357 * timestamp is achieved by shifting the Qtimer timestamp value right
 358 * some number of bits to produce the low-order bits of the coarser
 359 * granularity timestamp.
 360 *
 361 * For timers, a common timer clock is derived from the XO clock using
 362 * a divider (we use 192, to produce a 100kHz timer clock).  From
 363 * this common clock, three "pulse generators" are used to produce
 364 * timer ticks at a configurable frequency.  IPA timers (such as
 365 * those used for aggregation or head-of-line block handling) now
 366 * define their period based on one of these pulse generators.
 367 */
 368static void ipa_qtime_config(struct ipa *ipa)
 369{
 370	const struct reg *reg;
 371	u32 offset;
 372	u32 val;
 373
 374	/* Timer clock divider must be disabled when we change the rate */
 375	reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
 376	iowrite32(0, ipa->reg_virt + reg_offset(reg));
 377
 378	reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG);
 379	if (ipa->version < IPA_VERSION_5_5) {
 380		/* Set DPL time stamp resolution to use Qtime (not 1 msec) */
 381		val = reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
 382		val |= reg_bit(reg, DPL_TIMESTAMP_SEL);
 383	}
 384	/* Configure tag and NAT Qtime timestamp resolution as well */
 385	val = reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
 386	val = reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
 387
 388	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 389
 390	/* Set granularity of pulse generators used for other timers */
 391	reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG);
 392	val = reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
 393	val |= reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
 394	if (ipa->version >= IPA_VERSION_5_0) {
 395		val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_10_MS);
 396		val |= reg_encode(reg, PULSE_GRAN_3, IPA_GRAN_10_MS);
 397	} else {
 398		val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
 399	}
 400
 401	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 402
 403	/* Actual divider is 1 more than value supplied here */
 404	reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
 405	offset = reg_offset(reg);
 406
 407	val = reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
 408
 409	iowrite32(val, ipa->reg_virt + offset);
 410
 411	/* Divider value is set; re-enable the common timer clock divider */
 412	val |= reg_bit(reg, DIV_ENABLE);
 413
 414	iowrite32(val, ipa->reg_virt + offset);
 415}
 416
 417/* Before IPA v4.5 timing is controlled by a counter register */
 418static void ipa_hardware_config_counter(struct ipa *ipa)
 419{
 420	u32 granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
 421	const struct reg *reg;
 422	u32 val;
 423
 424	reg = ipa_reg(ipa, COUNTER_CFG);
 425	/* If defined, EOT_COAL_GRANULARITY is 0 */
 426	val = reg_encode(reg, AGGR_GRANULARITY, granularity);
 427	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 428}
 429
 430static void ipa_hardware_config_timing(struct ipa *ipa)
 431{
 432	if (ipa->version < IPA_VERSION_4_5)
 433		ipa_hardware_config_counter(ipa);
 434	else
 435		ipa_qtime_config(ipa);
 436}
 437
 438static void ipa_hardware_config_hashing(struct ipa *ipa)
 439{
 440	const struct reg *reg;
 441
 442	/* Other than IPA v4.2, all versions enable "hashing".  Starting
 443	 * with IPA v5.0, the filter and router tables are implemented
 444	 * differently, but the default configuration enables this feature
 445	 * (now referred to as "cacheing"), so there's nothing to do here.
 446	 */
 447	if (ipa->version != IPA_VERSION_4_2)
 448		return;
 449
 450	/* IPA v4.2 does not support hashed tables, so disable them */
 451	reg = ipa_reg(ipa, FILT_ROUT_HASH_EN);
 452
 453	/* IPV6_ROUTER_HASH, IPV6_FILTER_HASH, IPV4_ROUTER_HASH,
 454	 * IPV4_FILTER_HASH are all zero.
 455	 */
 456	iowrite32(0, ipa->reg_virt + reg_offset(reg));
 457}
 458
 459static void ipa_idle_indication_cfg(struct ipa *ipa,
 460				    u32 enter_idle_debounce_thresh,
 461				    bool const_non_idle_enable)
 462{
 463	const struct reg *reg;
 464	u32 val;
 465
 466	if (ipa->version < IPA_VERSION_3_5_1)
 467		return;
 468
 469	reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
 470	val = reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
 471			 enter_idle_debounce_thresh);
 472	if (const_non_idle_enable)
 473		val |= reg_bit(reg, CONST_NON_IDLE_ENABLE);
 474
 475	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 476}
 477
 478/**
 479 * ipa_hardware_dcd_config() - Enable dynamic clock division on IPA
 480 * @ipa:	IPA pointer
 481 *
 482 * Configures when the IPA signals it is idle to the global clock
 483 * controller, which can respond by scaling down the clock to save
 484 * power.
 485 */
 486static void ipa_hardware_dcd_config(struct ipa *ipa)
 487{
 488	/* Recommended values for IPA 3.5 and later according to IPA HPG */
 489	ipa_idle_indication_cfg(ipa, 256, false);
 490}
 491
 492static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
 493{
 494	/* Power-on reset values */
 495	ipa_idle_indication_cfg(ipa, 0, true);
 496}
 497
 498/**
 499 * ipa_hardware_config() - Primitive hardware initialization
 500 * @ipa:	IPA pointer
 501 * @data:	IPA configuration data
 502 */
 503static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data)
 504{
 505	ipa_hardware_config_bcr(ipa, data);
 506	ipa_hardware_config_tx(ipa);
 507	ipa_hardware_config_clkon(ipa);
 508	ipa_hardware_config_comp(ipa);
 509	ipa_hardware_config_qsb(ipa, data);
 510	ipa_hardware_config_timing(ipa);
 511	ipa_hardware_config_hashing(ipa);
 512	ipa_hardware_dcd_config(ipa);
 513}
 514
 515/**
 516 * ipa_hardware_deconfig() - Inverse of ipa_hardware_config()
 517 * @ipa:	IPA pointer
 518 *
 519 * This restores the power-on reset values (even if they aren't different)
 520 */
 521static void ipa_hardware_deconfig(struct ipa *ipa)
 522{
 523	/* Mostly we just leave things as we set them. */
 524	ipa_hardware_dcd_deconfig(ipa);
 525}
 526
 527/**
 528 * ipa_config() - Configure IPA hardware
 529 * @ipa:	IPA pointer
 530 * @data:	IPA configuration data
 531 *
 532 * Perform initialization requiring IPA power to be enabled.
 533 */
 534static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
 535{
 536	int ret;
 537
 538	ipa_hardware_config(ipa, data);
 539
 540	ret = ipa_mem_config(ipa);
 541	if (ret)
 542		goto err_hardware_deconfig;
 543
 544	ret = ipa_interrupt_config(ipa);
 545	if (ret)
 546		goto err_mem_deconfig;
 547
 548	ipa_uc_config(ipa);
 549
 550	ret = ipa_endpoint_config(ipa);
 551	if (ret)
 552		goto err_uc_deconfig;
 553
 554	ipa_table_config(ipa);		/* No deconfig required */
 555
 556	/* Assign resource limitation to each group; no deconfig required */
 557	ret = ipa_resource_config(ipa, data->resource_data);
 558	if (ret)
 559		goto err_endpoint_deconfig;
 560
 561	ret = ipa_modem_config(ipa);
 562	if (ret)
 563		goto err_endpoint_deconfig;
 564
 565	return 0;
 566
 567err_endpoint_deconfig:
 568	ipa_endpoint_deconfig(ipa);
 569err_uc_deconfig:
 570	ipa_uc_deconfig(ipa);
 571	ipa_interrupt_deconfig(ipa);
 572err_mem_deconfig:
 573	ipa_mem_deconfig(ipa);
 574err_hardware_deconfig:
 575	ipa_hardware_deconfig(ipa);
 576
 577	return ret;
 578}
 579
 580/**
 581 * ipa_deconfig() - Inverse of ipa_config()
 582 * @ipa:	IPA pointer
 583 */
 584static void ipa_deconfig(struct ipa *ipa)
 585{
 586	ipa_modem_deconfig(ipa);
 587	ipa_endpoint_deconfig(ipa);
 588	ipa_uc_deconfig(ipa);
 589	ipa_interrupt_deconfig(ipa);
 590	ipa_mem_deconfig(ipa);
 591	ipa_hardware_deconfig(ipa);
 592}
 593
 594static int ipa_firmware_load(struct device *dev)
 595{
 596	const struct firmware *fw;
 597	struct device_node *node;
 598	struct resource res;
 599	phys_addr_t phys;
 600	const char *path;
 601	ssize_t size;
 602	void *virt;
 603	int ret;
 604
 605	node = of_parse_phandle(dev->of_node, "memory-region", 0);
 606	if (!node) {
 607		dev_err(dev, "DT error getting \"memory-region\" property\n");
 608		return -EINVAL;
 609	}
 610
 611	ret = of_address_to_resource(node, 0, &res);
 612	of_node_put(node);
 613	if (ret) {
 614		dev_err(dev, "error %d getting \"memory-region\" resource\n",
 615			ret);
 616		return ret;
 617	}
 618
 619	/* Use name from DTB if specified; use default for *any* error */
 620	ret = of_property_read_string(dev->of_node, "firmware-name", &path);
 621	if (ret) {
 622		dev_dbg(dev, "error %d getting \"firmware-name\" resource\n",
 623			ret);
 624		path = IPA_FW_PATH_DEFAULT;
 625	}
 626
 627	ret = request_firmware(&fw, path, dev);
 628	if (ret) {
 629		dev_err(dev, "error %d requesting \"%s\"\n", ret, path);
 630		return ret;
 631	}
 632
 633	phys = res.start;
 634	size = (size_t)resource_size(&res);
 635	virt = memremap(phys, size, MEMREMAP_WC);
 636	if (!virt) {
 637		dev_err(dev, "unable to remap firmware memory\n");
 638		ret = -ENOMEM;
 639		goto out_release_firmware;
 640	}
 641
 642	ret = qcom_mdt_load(dev, fw, path, IPA_PAS_ID, virt, phys, size, NULL);
 643	if (ret)
 644		dev_err(dev, "error %d loading \"%s\"\n", ret, path);
 645	else if ((ret = qcom_scm_pas_auth_and_reset(IPA_PAS_ID)))
 646		dev_err(dev, "error %d authenticating \"%s\"\n", ret, path);
 647
 648	memunmap(virt);
 649out_release_firmware:
 650	release_firmware(fw);
 651
 652	return ret;
 653}
 654
 655static const struct of_device_id ipa_match[] = {
 656	{
 657		.compatible	= "qcom,msm8998-ipa",
 658		.data		= &ipa_data_v3_1,
 659	},
 660	{
 661		.compatible	= "qcom,sdm845-ipa",
 662		.data		= &ipa_data_v3_5_1,
 663	},
 664	{
 665		.compatible	= "qcom,sc7180-ipa",
 666		.data		= &ipa_data_v4_2,
 667	},
 668	{
 669		.compatible	= "qcom,sdx55-ipa",
 670		.data		= &ipa_data_v4_5,
 671	},
 672	{
 673		.compatible	= "qcom,sm6350-ipa",
 674		.data		= &ipa_data_v4_7,
 675	},
 676	{
 677		.compatible	= "qcom,sm8350-ipa",
 678		.data		= &ipa_data_v4_9,
 679	},
 680	{
 681		.compatible	= "qcom,sc7280-ipa",
 682		.data		= &ipa_data_v4_11,
 683	},
 684	{
 685		.compatible	= "qcom,sdx65-ipa",
 686		.data		= &ipa_data_v5_0,
 687	},
 688	{
 689		.compatible	= "qcom,sm8550-ipa",
 690		.data		= &ipa_data_v5_5,
 691	},
 692	{ },
 693};
 694MODULE_DEVICE_TABLE(of, ipa_match);
 695
 696/* Check things that can be validated at build time.  This just
 697 * groups these things BUILD_BUG_ON() calls don't clutter the rest
 698 * of the code.
 699 * */
 700static void ipa_validate_build(void)
 701{
 702	/* At one time we assumed a 64-bit build, allowing some do_div()
 703	 * calls to be replaced by simple division or modulo operations.
 704	 * We currently only perform divide and modulo operations on u32,
 705	 * u16, or size_t objects, and of those only size_t has any chance
 706	 * of being a 64-bit value.  (It should be guaranteed 32 bits wide
 707	 * on a 32-bit build, but there is no harm in verifying that.)
 708	 */
 709	BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT) && sizeof(size_t) != 4);
 710
 711	/* Code assumes the EE ID for the AP is 0 (zeroed structure field) */
 712	BUILD_BUG_ON(GSI_EE_AP != 0);
 713
 714	/* There's no point if we have no channels or event rings */
 715	BUILD_BUG_ON(!GSI_CHANNEL_COUNT_MAX);
 716	BUILD_BUG_ON(!GSI_EVT_RING_COUNT_MAX);
 717
 718	/* GSI hardware design limits */
 719	BUILD_BUG_ON(GSI_CHANNEL_COUNT_MAX > 32);
 720	BUILD_BUG_ON(GSI_EVT_RING_COUNT_MAX > 31);
 721
 722	/* The number of TREs in a transaction is limited by the channel's
 723	 * TLV FIFO size.  A transaction structure uses 8-bit fields
 724	 * to represents the number of TREs it has allocated and used.
 725	 */
 726	BUILD_BUG_ON(GSI_TLV_MAX > U8_MAX);
 727
 728	/* This is used as a divisor */
 729	BUILD_BUG_ON(!IPA_AGGR_GRANULARITY);
 730
 731	/* Aggregation granularity value can't be 0, and must fit */
 732	BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
 733}
 734
 735static enum ipa_firmware_loader ipa_firmware_loader(struct device *dev)
 736{
 737	bool modem_init;
 738	const char *str;
 739	int ret;
 740
 741	/* Look up the old and new properties by name */
 742	modem_init = of_property_read_bool(dev->of_node, "modem-init");
 743	ret = of_property_read_string(dev->of_node, "qcom,gsi-loader", &str);
 744
 745	/* If the new property doesn't exist, it's legacy behavior */
 746	if (ret == -EINVAL) {
 747		if (modem_init)
 748			return IPA_LOADER_MODEM;
 749		goto out_self;
 750	}
 751
 752	/* Any other error on the new property means it's poorly defined */
 753	if (ret)
 754		return IPA_LOADER_INVALID;
 755
 756	/* New property value exists; if old one does too, that's invalid */
 757	if (modem_init)
 758		return IPA_LOADER_INVALID;
 759
 760	/* Modem loads GSI firmware for "modem" */
 761	if (!strcmp(str, "modem"))
 762		return IPA_LOADER_MODEM;
 763
 764	/* No GSI firmware load is needed for "skip" */
 765	if (!strcmp(str, "skip"))
 766		return IPA_LOADER_SKIP;
 767
 768	/* Any value other than "self" is an error */
 769	if (strcmp(str, "self"))
 770		return IPA_LOADER_INVALID;
 771out_self:
 772	/* We need Trust Zone to load firmware; make sure it's available */
 773	if (qcom_scm_is_available())
 774		return IPA_LOADER_SELF;
 775
 776	return IPA_LOADER_DEFER;
 777}
 778
 779/**
 780 * ipa_probe() - IPA platform driver probe function
 781 * @pdev:	Platform device pointer
 782 *
 783 * Return:	0 if successful, or a negative error code (possibly
 784 *		EPROBE_DEFER)
 785 *
 786 * This is the main entry point for the IPA driver.  Initialization proceeds
 787 * in several stages:
 788 *   - The "init" stage involves activities that can be initialized without
 789 *     access to the IPA hardware.
 790 *   - The "config" stage requires IPA power to be active so IPA registers
 791 *     can be accessed, but does not require the use of IPA immediate commands.
 792 *   - The "setup" stage uses IPA immediate commands, and so requires the GSI
 793 *     layer to be initialized.
 794 *
 795 * A Boolean Device Tree "modem-init" property determines whether GSI
 796 * initialization will be performed by the AP (Trust Zone) or the modem.
 797 * If the AP does GSI initialization, the setup phase is entered after
 798 * this has completed successfully.  Otherwise the modem initializes
 799 * the GSI layer and signals it has finished by sending an SMP2P interrupt
 800 * to the AP; this triggers the start if IPA setup.
 801 */
 802static int ipa_probe(struct platform_device *pdev)
 803{
 804	struct device *dev = &pdev->dev;
 805	struct ipa_interrupt *interrupt;
 806	enum ipa_firmware_loader loader;
 807	const struct ipa_data *data;
 808	struct ipa_power *power;
 809	struct ipa *ipa;
 810	int ret;
 811
 812	ipa_validate_build();
 813
 814	/* Get configuration data early; needed for power initialization */
 815	data = of_device_get_match_data(dev);
 816	if (!data) {
 817		dev_err(dev, "matched hardware not supported\n");
 818		return -ENODEV;
 819	}
 820
 821	if (!ipa_version_supported(data->version)) {
 822		dev_err(dev, "unsupported IPA version %u\n", data->version);
 823		return -EINVAL;
 824	}
 825
 826	if (!data->modem_route_count) {
 827		dev_err(dev, "modem_route_count cannot be zero\n");
 828		return -EINVAL;
 829	}
 830
 831	loader = ipa_firmware_loader(dev);
 832	if (loader == IPA_LOADER_INVALID)
 833		return -EINVAL;
 834	if (loader == IPA_LOADER_DEFER)
 835		return -EPROBE_DEFER;
 836
 837	/* The IPA interrupt might not be ready when we're probed, so this
 838	 * might return -EPROBE_DEFER.
 839	 */
 840	interrupt = ipa_interrupt_init(pdev);
 841	if (IS_ERR(interrupt))
 842		return PTR_ERR(interrupt);
 843
 844	/* The clock and interconnects might not be ready when we're probed,
 845	 * so this might return -EPROBE_DEFER.
 846	 */
 847	power = ipa_power_init(dev, data->power_data);
 848	if (IS_ERR(power)) {
 849		ret = PTR_ERR(power);
 850		goto err_interrupt_exit;
 851	}
 852
 853	/* No more EPROBE_DEFER.  Allocate and initialize the IPA structure */
 854	ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
 855	if (!ipa) {
 856		ret = -ENOMEM;
 857		goto err_power_exit;
 858	}
 859
 860	ipa->dev = dev;
 861	dev_set_drvdata(dev, ipa);
 862	ipa->interrupt = interrupt;
 863	ipa->power = power;
 864	ipa->version = data->version;
 865	ipa->modem_route_count = data->modem_route_count;
 866	init_completion(&ipa->completion);
 867
 868	ret = ipa_reg_init(ipa, pdev);
 869	if (ret)
 870		goto err_kfree_ipa;
 871
 872	ret = ipa_mem_init(ipa, pdev, data->mem_data);
 873	if (ret)
 874		goto err_reg_exit;
 875
 
 
 
 
 876	ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count,
 877		       data->endpoint_data);
 878	if (ret)
 879		goto err_mem_exit;
 880
 881	/* Result is a non-zero mask of endpoints that support filtering */
 882	ret = ipa_endpoint_init(ipa, data->endpoint_count, data->endpoint_data);
 883	if (ret)
 884		goto err_gsi_exit;
 885
 886	ret = ipa_table_init(ipa);
 887	if (ret)
 888		goto err_endpoint_exit;
 889
 890	ret = ipa_smp2p_init(ipa, pdev, loader == IPA_LOADER_MODEM);
 891	if (ret)
 892		goto err_table_exit;
 893
 894	/* Power needs to be active for config and setup */
 895	ret = pm_runtime_get_sync(dev);
 896	if (WARN_ON(ret < 0))
 897		goto err_power_put;
 898
 899	ret = ipa_config(ipa, data);
 900	if (ret)
 901		goto err_power_put;
 902
 903	dev_info(dev, "IPA driver initialized");
 904
 905	/* If the modem is loading GSI firmware, it will trigger a call to
 906	 * ipa_setup() when it has finished.  In that case we're done here.
 907	 */
 908	if (loader == IPA_LOADER_MODEM)
 909		goto done;
 910
 911	if (loader == IPA_LOADER_SELF) {
 912		/* The AP is loading GSI firmware; do so now */
 913		ret = ipa_firmware_load(dev);
 914		if (ret)
 915			goto err_deconfig;
 916	} /* Otherwise loader == IPA_LOADER_SKIP */
 917
 918	/* GSI firmware is loaded; proceed to setup */
 919	ret = ipa_setup(ipa);
 920	if (ret)
 921		goto err_deconfig;
 922done:
 923	pm_runtime_mark_last_busy(dev);
 924	(void)pm_runtime_put_autosuspend(dev);
 925
 926	return 0;
 927
 928err_deconfig:
 929	ipa_deconfig(ipa);
 930err_power_put:
 931	pm_runtime_put_noidle(dev);
 932	ipa_smp2p_exit(ipa);
 933err_table_exit:
 934	ipa_table_exit(ipa);
 935err_endpoint_exit:
 936	ipa_endpoint_exit(ipa);
 937err_gsi_exit:
 938	gsi_exit(&ipa->gsi);
 939err_mem_exit:
 940	ipa_mem_exit(ipa);
 941err_reg_exit:
 942	ipa_reg_exit(ipa);
 943err_kfree_ipa:
 944	kfree(ipa);
 945err_power_exit:
 946	ipa_power_exit(power);
 947err_interrupt_exit:
 948	ipa_interrupt_exit(interrupt);
 949
 950	return ret;
 951}
 952
 953static void ipa_remove(struct platform_device *pdev)
 954{
 955	struct ipa_interrupt *interrupt;
 956	struct ipa_power *power;
 957	struct device *dev;
 958	struct ipa *ipa;
 959	int ret;
 960
 961	ipa = dev_get_drvdata(&pdev->dev);
 962	dev = ipa->dev;
 963	WARN_ON(dev != &pdev->dev);
 964
 965	power = ipa->power;
 966	interrupt = ipa->interrupt;
 967
 968	/* Prevent the modem from triggering a call to ipa_setup().  This
 969	 * also ensures a modem-initiated setup that's underway completes.
 970	 */
 971	ipa_smp2p_irq_disable_setup(ipa);
 972
 973	ret = pm_runtime_get_sync(dev);
 974	if (WARN_ON(ret < 0))
 975		goto out_power_put;
 976
 977	if (ipa->setup_complete) {
 978		ret = ipa_modem_stop(ipa);
 979		/* If starting or stopping is in progress, try once more */
 980		if (ret == -EBUSY) {
 981			usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
 982			ret = ipa_modem_stop(ipa);
 983		}
 984		if (ret) {
 985			/*
 986			 * Not cleaning up here properly might also yield a
 987			 * crash later on. As the device is still unregistered
 988			 * in this case, this might even yield a crash later on.
 989			 */
 990			dev_err(dev, "Failed to stop modem (%pe), leaking resources\n",
 991				ERR_PTR(ret));
 992			return;
 993		}
 994
 995		ipa_teardown(ipa);
 996	}
 997
 998	ipa_deconfig(ipa);
 999out_power_put:
1000	pm_runtime_put_noidle(dev);
1001	ipa_smp2p_exit(ipa);
1002	ipa_table_exit(ipa);
1003	ipa_endpoint_exit(ipa);
1004	gsi_exit(&ipa->gsi);
1005	ipa_mem_exit(ipa);
1006	ipa_reg_exit(ipa);
1007	kfree(ipa);
1008	ipa_power_exit(power);
1009	ipa_interrupt_exit(interrupt);
1010
1011	dev_info(dev, "IPA driver removed");
1012}
1013
1014static const struct attribute_group *ipa_attribute_groups[] = {
1015	&ipa_attribute_group,
1016	&ipa_feature_attribute_group,
1017	&ipa_endpoint_id_attribute_group,
1018	&ipa_modem_attribute_group,
1019	NULL,
1020};
1021
1022static struct platform_driver ipa_driver = {
1023	.probe		= ipa_probe,
1024	.remove_new	= ipa_remove,
1025	.shutdown	= ipa_remove,
1026	.driver	= {
1027		.name		= "ipa",
1028		.pm		= &ipa_pm_ops,
1029		.of_match_table	= ipa_match,
1030		.dev_groups	= ipa_attribute_groups,
1031	},
1032};
1033
1034module_platform_driver(ipa_driver);
1035
1036MODULE_LICENSE("GPL v2");
1037MODULE_DESCRIPTION("Qualcomm IP Accelerator device driver");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2018-2024 Linaro Ltd.
   5 */
   6
 
 
 
   7#include <linux/bug.h>
 
   8#include <linux/firmware.h>
   9#include <linux/io.h>
  10#include <linux/module.h>
  11#include <linux/of.h>
  12#include <linux/of_address.h>
  13#include <linux/platform_device.h>
  14#include <linux/pm_runtime.h>
  15#include <linux/types.h>
  16
  17#include <linux/firmware/qcom/qcom_scm.h>
  18#include <linux/soc/qcom/mdt_loader.h>
  19
  20#include "ipa.h"
  21#include "ipa_cmd.h"
  22#include "ipa_data.h"
  23#include "ipa_endpoint.h"
  24#include "ipa_interrupt.h"
 
 
  25#include "ipa_mem.h"
 
 
  26#include "ipa_modem.h"
  27#include "ipa_power.h"
  28#include "ipa_reg.h"
  29#include "ipa_resource.h"
  30#include "ipa_smp2p.h"
  31#include "ipa_sysfs.h"
  32#include "ipa_table.h"
  33#include "ipa_uc.h"
  34#include "ipa_version.h"
  35
  36/**
  37 * DOC: The IP Accelerator
  38 *
  39 * This driver supports the Qualcomm IP Accelerator (IPA), which is a
  40 * networking component found in many Qualcomm SoCs.  The IPA is connected
  41 * to the application processor (AP), but is also connected (and partially
  42 * controlled by) other "execution environments" (EEs), such as a modem.
  43 *
  44 * The IPA is the conduit between the AP and the modem that carries network
  45 * traffic.  This driver presents a network interface representing the
  46 * connection of the modem to external (e.g. LTE) networks.
  47 *
  48 * The IPA provides protocol checksum calculation, offloading this work
  49 * from the AP.  The IPA offers additional functionality, including routing,
  50 * filtering, and NAT support, but that more advanced functionality is not
  51 * currently supported.  Despite that, some resources--including routing
  52 * tables and filter tables--are defined in this driver because they must
  53 * be initialized even when the advanced hardware features are not used.
  54 *
  55 * There are two distinct layers that implement the IPA hardware, and this
  56 * is reflected in the organization of the driver.  The generic software
  57 * interface (GSI) is an integral component of the IPA, providing a
  58 * well-defined communication layer between the AP subsystem and the IPA
  59 * core.  The GSI implements a set of "channels" used for communication
  60 * between the AP and the IPA.
  61 *
  62 * The IPA layer uses GSI channels to implement its "endpoints".  And while
  63 * a GSI channel carries data between the AP and the IPA, a pair of IPA
  64 * endpoints is used to carry traffic between two EEs.  Specifically, the main
  65 * modem network interface is implemented by two pairs of endpoints:  a TX
  66 * endpoint on the AP coupled with an RX endpoint on the modem; and another
  67 * RX endpoint on the AP receiving data from a TX endpoint on the modem.
  68 */
  69
  70/* The name of the GSI firmware file relative to /lib/firmware */
  71#define IPA_FW_PATH_DEFAULT	"ipa_fws.mdt"
  72#define IPA_PAS_ID		15
  73
  74/* Shift of 19.2 MHz timestamp to achieve lower resolution timestamps */
  75/* IPA v5.5+ does not specify Qtime timestamp config for DPL */
  76#define DPL_TIMESTAMP_SHIFT	14	/* ~1.172 kHz, ~853 usec per tick */
  77#define TAG_TIMESTAMP_SHIFT	14
  78#define NAT_TIMESTAMP_SHIFT	24	/* ~1.144 Hz, ~874 msec per tick */
  79
  80/* Divider for 19.2 MHz crystal oscillator clock to get common timer clock */
  81#define IPA_XO_CLOCK_DIVIDER	192	/* 1 is subtracted where used */
  82
  83/**
  84 * enum ipa_firmware_loader: How GSI firmware gets loaded
  85 *
  86 * @IPA_LOADER_DEFER:		System not ready; try again later
  87 * @IPA_LOADER_SELF:		AP loads GSI firmware
  88 * @IPA_LOADER_MODEM:		Modem loads GSI firmware, signals when done
  89 * @IPA_LOADER_SKIP:		Neither AP nor modem need to load GSI firmware
  90 * @IPA_LOADER_INVALID:	GSI firmware loader specification is invalid
  91 */
  92enum ipa_firmware_loader {
  93	IPA_LOADER_DEFER,
  94	IPA_LOADER_SELF,
  95	IPA_LOADER_MODEM,
  96	IPA_LOADER_SKIP,
  97	IPA_LOADER_INVALID,
  98};
  99
 100/**
 101 * ipa_setup() - Set up IPA hardware
 102 * @ipa:	IPA pointer
 103 *
 104 * Perform initialization that requires issuing immediate commands on
 105 * the command TX endpoint.  If the modem is doing GSI firmware load
 106 * and initialization, this function will be called when an SMP2P
 107 * interrupt has been signaled by the modem.  Otherwise it will be
 108 * called from ipa_probe() after GSI firmware has been successfully
 109 * loaded, authenticated, and started by Trust Zone.
 110 */
 111int ipa_setup(struct ipa *ipa)
 112{
 113	struct ipa_endpoint *exception_endpoint;
 114	struct ipa_endpoint *command_endpoint;
 115	struct device *dev = ipa->dev;
 116	int ret;
 117
 118	ret = gsi_setup(&ipa->gsi);
 119	if (ret)
 120		return ret;
 121
 
 
 
 
 122	ipa_endpoint_setup(ipa);
 123
 124	/* We need to use the AP command TX endpoint to perform other
 125	 * initialization, so we enable first.
 126	 */
 127	command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
 128	ret = ipa_endpoint_enable_one(command_endpoint);
 129	if (ret)
 130		goto err_endpoint_teardown;
 131
 132	ret = ipa_mem_setup(ipa);	/* No matching teardown required */
 133	if (ret)
 134		goto err_command_disable;
 135
 136	ret = ipa_table_setup(ipa);	/* No matching teardown required */
 137	if (ret)
 138		goto err_command_disable;
 139
 140	/* Enable the exception handling endpoint, and tell the hardware
 141	 * to use it by default.
 142	 */
 143	exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
 144	ret = ipa_endpoint_enable_one(exception_endpoint);
 145	if (ret)
 146		goto err_command_disable;
 147
 148	ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id);
 149
 150	/* We're all set.  Now prepare for communication with the modem */
 151	ret = ipa_qmi_setup(ipa);
 152	if (ret)
 153		goto err_default_route_clear;
 154
 155	ipa->setup_complete = true;
 156
 157	dev_info(dev, "IPA driver setup completed successfully\n");
 158
 159	return 0;
 160
 161err_default_route_clear:
 162	ipa_endpoint_default_route_clear(ipa);
 163	ipa_endpoint_disable_one(exception_endpoint);
 164err_command_disable:
 165	ipa_endpoint_disable_one(command_endpoint);
 166err_endpoint_teardown:
 167	ipa_endpoint_teardown(ipa);
 
 
 168	gsi_teardown(&ipa->gsi);
 169
 170	return ret;
 171}
 172
 173/**
 174 * ipa_teardown() - Inverse of ipa_setup()
 175 * @ipa:	IPA pointer
 176 */
 177static void ipa_teardown(struct ipa *ipa)
 178{
 179	struct ipa_endpoint *exception_endpoint;
 180	struct ipa_endpoint *command_endpoint;
 181
 182	/* We're going to tear everything down, as if setup never completed */
 183	ipa->setup_complete = false;
 184
 185	ipa_qmi_teardown(ipa);
 186	ipa_endpoint_default_route_clear(ipa);
 187	exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
 188	ipa_endpoint_disable_one(exception_endpoint);
 189	command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
 190	ipa_endpoint_disable_one(command_endpoint);
 191	ipa_endpoint_teardown(ipa);
 
 192	gsi_teardown(&ipa->gsi);
 193}
 194
 195static void
 196ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
 197{
 198	const struct reg *reg;
 199	u32 val;
 200
 201	/* IPA v4.5+ has no backward compatibility register */
 202	if (ipa->version >= IPA_VERSION_4_5)
 203		return;
 204
 205	reg = ipa_reg(ipa, IPA_BCR);
 206	val = data->backward_compat;
 207	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 208}
 209
 210static void ipa_hardware_config_tx(struct ipa *ipa)
 211{
 212	enum ipa_version version = ipa->version;
 213	const struct reg *reg;
 214	u32 offset;
 215	u32 val;
 216
 217	if (version <= IPA_VERSION_4_0 || version >= IPA_VERSION_4_5)
 218		return;
 219
 220	/* Disable PA mask to allow HOLB drop */
 221	reg = ipa_reg(ipa, IPA_TX_CFG);
 222	offset = reg_offset(reg);
 223
 224	val = ioread32(ipa->reg_virt + offset);
 225
 226	val &= ~reg_bit(reg, PA_MASK_EN);
 227
 228	iowrite32(val, ipa->reg_virt + offset);
 229}
 230
 231static void ipa_hardware_config_clkon(struct ipa *ipa)
 232{
 233	enum ipa_version version = ipa->version;
 234	const struct reg *reg;
 235	u32 val;
 236
 237	if (version >= IPA_VERSION_4_5)
 238		return;
 239
 240	if (version < IPA_VERSION_4_0 && version != IPA_VERSION_3_1)
 241		return;
 242
 243	/* Implement some hardware workarounds */
 244	reg = ipa_reg(ipa, CLKON_CFG);
 245	if (version == IPA_VERSION_3_1) {
 246		/* Disable MISC clock gating */
 247		val = reg_bit(reg, CLKON_MISC);
 248	} else {	/* IPA v4.0+ */
 249		/* Enable open global clocks in the CLKON configuration */
 250		val = reg_bit(reg, CLKON_GLOBAL);
 251		val |= reg_bit(reg, GLOBAL_2X_CLK);
 252	}
 253
 254	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 255}
 256
 257/* Configure bus access behavior for IPA components */
 258static void ipa_hardware_config_comp(struct ipa *ipa)
 259{
 260	const struct reg *reg;
 261	u32 offset;
 262	u32 val;
 263
 264	/* Nothing to configure prior to IPA v4.0 */
 265	if (ipa->version < IPA_VERSION_4_0)
 266		return;
 267
 268	reg = ipa_reg(ipa, COMP_CFG);
 269	offset = reg_offset(reg);
 270
 271	val = ioread32(ipa->reg_virt + offset);
 272
 273	if (ipa->version == IPA_VERSION_4_0) {
 274		val &= ~reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
 275		val &= ~reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
 276		val &= ~reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
 277	} else if (ipa->version < IPA_VERSION_4_5) {
 278		val |= reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
 279	} else {
 280		/* For IPA v4.5+ FULL_FLUSH_WAIT_RS_CLOSURE_EN is 0 */
 281	}
 282
 283	val |= reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
 284	val |= reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
 285
 286	iowrite32(val, ipa->reg_virt + offset);
 287}
 288
 289/* Configure DDR and (possibly) PCIe max read/write QSB values */
 290static void
 291ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
 292{
 293	const struct ipa_qsb_data *data0;
 294	const struct ipa_qsb_data *data1;
 295	const struct reg *reg;
 296	u32 val;
 297
 298	/* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
 299	data0 = &data->qsb_data[IPA_QSB_MASTER_DDR];
 300	if (data->qsb_count > 1)
 301		data1 = &data->qsb_data[IPA_QSB_MASTER_PCIE];
 302
 303	/* Max outstanding write accesses for QSB masters */
 304	reg = ipa_reg(ipa, QSB_MAX_WRITES);
 305
 306	val = reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
 307	if (data->qsb_count > 1)
 308		val |= reg_encode(reg, GEN_QMB_1_MAX_WRITES, data1->max_writes);
 309
 310	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 311
 312	/* Max outstanding read accesses for QSB masters */
 313	reg = ipa_reg(ipa, QSB_MAX_READS);
 314
 315	val = reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
 316	if (ipa->version >= IPA_VERSION_4_0)
 317		val |= reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
 318				  data0->max_reads_beats);
 319	if (data->qsb_count > 1) {
 320		val = reg_encode(reg, GEN_QMB_1_MAX_READS, data1->max_reads);
 321		if (ipa->version >= IPA_VERSION_4_0)
 322			val |= reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
 323					  data1->max_reads_beats);
 324	}
 325
 326	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 327}
 328
 329/* The internal inactivity timer clock is used for the aggregation timer */
 330#define TIMER_FREQUENCY	32000		/* 32 KHz inactivity timer clock */
 331
 332/* Compute the value to use in the COUNTER_CFG register AGGR_GRANULARITY
 333 * field to represent the given number of microseconds.  The value is one
 334 * less than the number of timer ticks in the requested period.  0 is not
 335 * a valid granularity value (so for example @usec must be at least 16 for
 336 * a TIMER_FREQUENCY of 32000).
 337 */
 338static __always_inline u32 ipa_aggr_granularity_val(u32 usec)
 339{
 340	return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1;
 341}
 342
 343/* IPA uses unified Qtime starting at IPA v4.5, implementing various
 344 * timestamps and timers independent of the IPA core clock rate.  The
 345 * Qtimer is based on a 56-bit timestamp incremented at each tick of
 346 * a 19.2 MHz SoC crystal oscillator (XO clock).
 347 *
 348 * For IPA timestamps (tag, NAT, data path logging) a lower resolution
 349 * timestamp is achieved by shifting the Qtimer timestamp value right
 350 * some number of bits to produce the low-order bits of the coarser
 351 * granularity timestamp.
 352 *
 353 * For timers, a common timer clock is derived from the XO clock using
 354 * a divider (we use 192, to produce a 100kHz timer clock).  From
 355 * this common clock, three "pulse generators" are used to produce
 356 * timer ticks at a configurable frequency.  IPA timers (such as
 357 * those used for aggregation or head-of-line block handling) now
 358 * define their period based on one of these pulse generators.
 359 */
 360static void ipa_qtime_config(struct ipa *ipa)
 361{
 362	const struct reg *reg;
 363	u32 offset;
 364	u32 val;
 365
 366	/* Timer clock divider must be disabled when we change the rate */
 367	reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
 368	iowrite32(0, ipa->reg_virt + reg_offset(reg));
 369
 370	reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG);
 371	if (ipa->version < IPA_VERSION_5_5) {
 372		/* Set DPL time stamp resolution to use Qtime (not 1 msec) */
 373		val = reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
 374		val |= reg_bit(reg, DPL_TIMESTAMP_SEL);
 375	}
 376	/* Configure tag and NAT Qtime timestamp resolution as well */
 377	val = reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
 378	val = reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
 379
 380	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 381
 382	/* Set granularity of pulse generators used for other timers */
 383	reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG);
 384	val = reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
 385	val |= reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
 386	if (ipa->version >= IPA_VERSION_5_0) {
 387		val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_10_MS);
 388		val |= reg_encode(reg, PULSE_GRAN_3, IPA_GRAN_10_MS);
 389	} else {
 390		val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
 391	}
 392
 393	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 394
 395	/* Actual divider is 1 more than value supplied here */
 396	reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
 397	offset = reg_offset(reg);
 398
 399	val = reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
 400
 401	iowrite32(val, ipa->reg_virt + offset);
 402
 403	/* Divider value is set; re-enable the common timer clock divider */
 404	val |= reg_bit(reg, DIV_ENABLE);
 405
 406	iowrite32(val, ipa->reg_virt + offset);
 407}
 408
 409/* Before IPA v4.5 timing is controlled by a counter register */
 410static void ipa_hardware_config_counter(struct ipa *ipa)
 411{
 412	u32 granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
 413	const struct reg *reg;
 414	u32 val;
 415
 416	reg = ipa_reg(ipa, COUNTER_CFG);
 417	/* If defined, EOT_COAL_GRANULARITY is 0 */
 418	val = reg_encode(reg, AGGR_GRANULARITY, granularity);
 419	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 420}
 421
 422static void ipa_hardware_config_timing(struct ipa *ipa)
 423{
 424	if (ipa->version < IPA_VERSION_4_5)
 425		ipa_hardware_config_counter(ipa);
 426	else
 427		ipa_qtime_config(ipa);
 428}
 429
 430static void ipa_hardware_config_hashing(struct ipa *ipa)
 431{
 432	const struct reg *reg;
 433
 434	/* Other than IPA v4.2, all versions enable "hashing".  Starting
 435	 * with IPA v5.0, the filter and router tables are implemented
 436	 * differently, but the default configuration enables this feature
 437	 * (now referred to as "cacheing"), so there's nothing to do here.
 438	 */
 439	if (ipa->version != IPA_VERSION_4_2)
 440		return;
 441
 442	/* IPA v4.2 does not support hashed tables, so disable them */
 443	reg = ipa_reg(ipa, FILT_ROUT_HASH_EN);
 444
 445	/* IPV6_ROUTER_HASH, IPV6_FILTER_HASH, IPV4_ROUTER_HASH,
 446	 * IPV4_FILTER_HASH are all zero.
 447	 */
 448	iowrite32(0, ipa->reg_virt + reg_offset(reg));
 449}
 450
 451static void ipa_idle_indication_cfg(struct ipa *ipa,
 452				    u32 enter_idle_debounce_thresh,
 453				    bool const_non_idle_enable)
 454{
 455	const struct reg *reg;
 456	u32 val;
 457
 458	if (ipa->version < IPA_VERSION_3_5_1)
 459		return;
 460
 461	reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
 462	val = reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
 463			 enter_idle_debounce_thresh);
 464	if (const_non_idle_enable)
 465		val |= reg_bit(reg, CONST_NON_IDLE_ENABLE);
 466
 467	iowrite32(val, ipa->reg_virt + reg_offset(reg));
 468}
 469
 470/**
 471 * ipa_hardware_dcd_config() - Enable dynamic clock division on IPA
 472 * @ipa:	IPA pointer
 473 *
 474 * Configures when the IPA signals it is idle to the global clock
 475 * controller, which can respond by scaling down the clock to save
 476 * power.
 477 */
 478static void ipa_hardware_dcd_config(struct ipa *ipa)
 479{
 480	/* Recommended values for IPA 3.5 and later according to IPA HPG */
 481	ipa_idle_indication_cfg(ipa, 256, false);
 482}
 483
 484static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
 485{
 486	/* Power-on reset values */
 487	ipa_idle_indication_cfg(ipa, 0, true);
 488}
 489
 490/**
 491 * ipa_hardware_config() - Primitive hardware initialization
 492 * @ipa:	IPA pointer
 493 * @data:	IPA configuration data
 494 */
 495static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data)
 496{
 497	ipa_hardware_config_bcr(ipa, data);
 498	ipa_hardware_config_tx(ipa);
 499	ipa_hardware_config_clkon(ipa);
 500	ipa_hardware_config_comp(ipa);
 501	ipa_hardware_config_qsb(ipa, data);
 502	ipa_hardware_config_timing(ipa);
 503	ipa_hardware_config_hashing(ipa);
 504	ipa_hardware_dcd_config(ipa);
 505}
 506
 507/**
 508 * ipa_hardware_deconfig() - Inverse of ipa_hardware_config()
 509 * @ipa:	IPA pointer
 510 *
 511 * This restores the power-on reset values (even if they aren't different)
 512 */
 513static void ipa_hardware_deconfig(struct ipa *ipa)
 514{
 515	/* Mostly we just leave things as we set them. */
 516	ipa_hardware_dcd_deconfig(ipa);
 517}
 518
 519/**
 520 * ipa_config() - Configure IPA hardware
 521 * @ipa:	IPA pointer
 522 * @data:	IPA configuration data
 523 *
 524 * Perform initialization requiring IPA power to be enabled.
 525 */
 526static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
 527{
 528	int ret;
 529
 530	ipa_hardware_config(ipa, data);
 531
 532	ret = ipa_mem_config(ipa);
 533	if (ret)
 534		goto err_hardware_deconfig;
 535
 536	ret = ipa_interrupt_config(ipa);
 537	if (ret)
 538		goto err_mem_deconfig;
 539
 540	ipa_uc_config(ipa);
 541
 542	ret = ipa_endpoint_config(ipa);
 543	if (ret)
 544		goto err_uc_deconfig;
 545
 546	ipa_table_config(ipa);		/* No deconfig required */
 547
 548	/* Assign resource limitation to each group; no deconfig required */
 549	ret = ipa_resource_config(ipa, data->resource_data);
 550	if (ret)
 551		goto err_endpoint_deconfig;
 552
 553	ret = ipa_modem_config(ipa);
 554	if (ret)
 555		goto err_endpoint_deconfig;
 556
 557	return 0;
 558
 559err_endpoint_deconfig:
 560	ipa_endpoint_deconfig(ipa);
 561err_uc_deconfig:
 562	ipa_uc_deconfig(ipa);
 563	ipa_interrupt_deconfig(ipa);
 564err_mem_deconfig:
 565	ipa_mem_deconfig(ipa);
 566err_hardware_deconfig:
 567	ipa_hardware_deconfig(ipa);
 568
 569	return ret;
 570}
 571
 572/**
 573 * ipa_deconfig() - Inverse of ipa_config()
 574 * @ipa:	IPA pointer
 575 */
 576static void ipa_deconfig(struct ipa *ipa)
 577{
 578	ipa_modem_deconfig(ipa);
 579	ipa_endpoint_deconfig(ipa);
 580	ipa_uc_deconfig(ipa);
 581	ipa_interrupt_deconfig(ipa);
 582	ipa_mem_deconfig(ipa);
 583	ipa_hardware_deconfig(ipa);
 584}
 585
 586static int ipa_firmware_load(struct device *dev)
 587{
 588	const struct firmware *fw;
 589	struct device_node *node;
 590	struct resource res;
 591	phys_addr_t phys;
 592	const char *path;
 593	ssize_t size;
 594	void *virt;
 595	int ret;
 596
 597	node = of_parse_phandle(dev->of_node, "memory-region", 0);
 598	if (!node) {
 599		dev_err(dev, "DT error getting \"memory-region\" property\n");
 600		return -EINVAL;
 601	}
 602
 603	ret = of_address_to_resource(node, 0, &res);
 604	of_node_put(node);
 605	if (ret) {
 606		dev_err(dev, "error %d getting \"memory-region\" resource\n",
 607			ret);
 608		return ret;
 609	}
 610
 611	/* Use name from DTB if specified; use default for *any* error */
 612	ret = of_property_read_string(dev->of_node, "firmware-name", &path);
 613	if (ret) {
 614		dev_dbg(dev, "error %d getting \"firmware-name\" resource\n",
 615			ret);
 616		path = IPA_FW_PATH_DEFAULT;
 617	}
 618
 619	ret = request_firmware(&fw, path, dev);
 620	if (ret) {
 621		dev_err(dev, "error %d requesting \"%s\"\n", ret, path);
 622		return ret;
 623	}
 624
 625	phys = res.start;
 626	size = (size_t)resource_size(&res);
 627	virt = memremap(phys, size, MEMREMAP_WC);
 628	if (!virt) {
 629		dev_err(dev, "unable to remap firmware memory\n");
 630		ret = -ENOMEM;
 631		goto out_release_firmware;
 632	}
 633
 634	ret = qcom_mdt_load(dev, fw, path, IPA_PAS_ID, virt, phys, size, NULL);
 635	if (ret)
 636		dev_err(dev, "error %d loading \"%s\"\n", ret, path);
 637	else if ((ret = qcom_scm_pas_auth_and_reset(IPA_PAS_ID)))
 638		dev_err(dev, "error %d authenticating \"%s\"\n", ret, path);
 639
 640	memunmap(virt);
 641out_release_firmware:
 642	release_firmware(fw);
 643
 644	return ret;
 645}
 646
 647static const struct of_device_id ipa_match[] = {
 648	{
 649		.compatible	= "qcom,msm8998-ipa",
 650		.data		= &ipa_data_v3_1,
 651	},
 652	{
 653		.compatible	= "qcom,sdm845-ipa",
 654		.data		= &ipa_data_v3_5_1,
 655	},
 656	{
 657		.compatible	= "qcom,sc7180-ipa",
 658		.data		= &ipa_data_v4_2,
 659	},
 660	{
 661		.compatible	= "qcom,sdx55-ipa",
 662		.data		= &ipa_data_v4_5,
 663	},
 664	{
 665		.compatible	= "qcom,sm6350-ipa",
 666		.data		= &ipa_data_v4_7,
 667	},
 668	{
 669		.compatible	= "qcom,sm8350-ipa",
 670		.data		= &ipa_data_v4_9,
 671	},
 672	{
 673		.compatible	= "qcom,sc7280-ipa",
 674		.data		= &ipa_data_v4_11,
 675	},
 676	{
 677		.compatible	= "qcom,sdx65-ipa",
 678		.data		= &ipa_data_v5_0,
 679	},
 680	{
 681		.compatible	= "qcom,sm8550-ipa",
 682		.data		= &ipa_data_v5_5,
 683	},
 684	{ },
 685};
 686MODULE_DEVICE_TABLE(of, ipa_match);
 687
 688/* Check things that can be validated at build time.  This just
 689 * groups these things BUILD_BUG_ON() calls don't clutter the rest
 690 * of the code.
 691 * */
 692static void ipa_validate_build(void)
 693{
 694	/* At one time we assumed a 64-bit build, allowing some do_div()
 695	 * calls to be replaced by simple division or modulo operations.
 696	 * We currently only perform divide and modulo operations on u32,
 697	 * u16, or size_t objects, and of those only size_t has any chance
 698	 * of being a 64-bit value.  (It should be guaranteed 32 bits wide
 699	 * on a 32-bit build, but there is no harm in verifying that.)
 700	 */
 701	BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT) && sizeof(size_t) != 4);
 702
 703	/* Code assumes the EE ID for the AP is 0 (zeroed structure field) */
 704	BUILD_BUG_ON(GSI_EE_AP != 0);
 705
 706	/* There's no point if we have no channels or event rings */
 707	BUILD_BUG_ON(!GSI_CHANNEL_COUNT_MAX);
 708	BUILD_BUG_ON(!GSI_EVT_RING_COUNT_MAX);
 709
 710	/* GSI hardware design limits */
 711	BUILD_BUG_ON(GSI_CHANNEL_COUNT_MAX > 32);
 712	BUILD_BUG_ON(GSI_EVT_RING_COUNT_MAX > 31);
 713
 714	/* The number of TREs in a transaction is limited by the channel's
 715	 * TLV FIFO size.  A transaction structure uses 8-bit fields
 716	 * to represents the number of TREs it has allocated and used.
 717	 */
 718	BUILD_BUG_ON(GSI_TLV_MAX > U8_MAX);
 719
 720	/* This is used as a divisor */
 721	BUILD_BUG_ON(!IPA_AGGR_GRANULARITY);
 722
 723	/* Aggregation granularity value can't be 0, and must fit */
 724	BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
 725}
 726
 727static enum ipa_firmware_loader ipa_firmware_loader(struct device *dev)
 728{
 729	bool modem_init;
 730	const char *str;
 731	int ret;
 732
 733	/* Look up the old and new properties by name */
 734	modem_init = of_property_read_bool(dev->of_node, "modem-init");
 735	ret = of_property_read_string(dev->of_node, "qcom,gsi-loader", &str);
 736
 737	/* If the new property doesn't exist, it's legacy behavior */
 738	if (ret == -EINVAL) {
 739		if (modem_init)
 740			return IPA_LOADER_MODEM;
 741		goto out_self;
 742	}
 743
 744	/* Any other error on the new property means it's poorly defined */
 745	if (ret)
 746		return IPA_LOADER_INVALID;
 747
 748	/* New property value exists; if old one does too, that's invalid */
 749	if (modem_init)
 750		return IPA_LOADER_INVALID;
 751
 752	/* Modem loads GSI firmware for "modem" */
 753	if (!strcmp(str, "modem"))
 754		return IPA_LOADER_MODEM;
 755
 756	/* No GSI firmware load is needed for "skip" */
 757	if (!strcmp(str, "skip"))
 758		return IPA_LOADER_SKIP;
 759
 760	/* Any value other than "self" is an error */
 761	if (strcmp(str, "self"))
 762		return IPA_LOADER_INVALID;
 763out_self:
 764	/* We need Trust Zone to load firmware; make sure it's available */
 765	if (qcom_scm_is_available())
 766		return IPA_LOADER_SELF;
 767
 768	return IPA_LOADER_DEFER;
 769}
 770
 771/**
 772 * ipa_probe() - IPA platform driver probe function
 773 * @pdev:	Platform device pointer
 774 *
 775 * Return:	0 if successful, or a negative error code (possibly
 776 *		EPROBE_DEFER)
 777 *
 778 * This is the main entry point for the IPA driver.  Initialization proceeds
 779 * in several stages:
 780 *   - The "init" stage involves activities that can be initialized without
 781 *     access to the IPA hardware.
 782 *   - The "config" stage requires IPA power to be active so IPA registers
 783 *     can be accessed, but does not require the use of IPA immediate commands.
 784 *   - The "setup" stage uses IPA immediate commands, and so requires the GSI
 785 *     layer to be initialized.
 786 *
 787 * A Boolean Device Tree "modem-init" property determines whether GSI
 788 * initialization will be performed by the AP (Trust Zone) or the modem.
 789 * If the AP does GSI initialization, the setup phase is entered after
 790 * this has completed successfully.  Otherwise the modem initializes
 791 * the GSI layer and signals it has finished by sending an SMP2P interrupt
 792 * to the AP; this triggers the start if IPA setup.
 793 */
 794static int ipa_probe(struct platform_device *pdev)
 795{
 796	struct device *dev = &pdev->dev;
 797	struct ipa_interrupt *interrupt;
 798	enum ipa_firmware_loader loader;
 799	const struct ipa_data *data;
 800	struct ipa_power *power;
 801	struct ipa *ipa;
 802	int ret;
 803
 804	ipa_validate_build();
 805
 806	/* Get configuration data early; needed for power initialization */
 807	data = of_device_get_match_data(dev);
 808	if (!data) {
 809		dev_err(dev, "matched hardware not supported\n");
 810		return -ENODEV;
 811	}
 812
 
 
 
 
 
 813	if (!data->modem_route_count) {
 814		dev_err(dev, "modem_route_count cannot be zero\n");
 815		return -EINVAL;
 816	}
 817
 818	loader = ipa_firmware_loader(dev);
 819	if (loader == IPA_LOADER_INVALID)
 820		return -EINVAL;
 821	if (loader == IPA_LOADER_DEFER)
 822		return -EPROBE_DEFER;
 823
 824	/* The IPA interrupt might not be ready when we're probed, so this
 825	 * might return -EPROBE_DEFER.
 826	 */
 827	interrupt = ipa_interrupt_init(pdev);
 828	if (IS_ERR(interrupt))
 829		return PTR_ERR(interrupt);
 830
 831	/* The clock and interconnects might not be ready when we're probed,
 832	 * so this might return -EPROBE_DEFER.
 833	 */
 834	power = ipa_power_init(dev, data->power_data);
 835	if (IS_ERR(power)) {
 836		ret = PTR_ERR(power);
 837		goto err_interrupt_exit;
 838	}
 839
 840	/* No more EPROBE_DEFER.  Allocate and initialize the IPA structure */
 841	ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
 842	if (!ipa) {
 843		ret = -ENOMEM;
 844		goto err_power_exit;
 845	}
 846
 847	ipa->dev = dev;
 848	dev_set_drvdata(dev, ipa);
 849	ipa->interrupt = interrupt;
 850	ipa->power = power;
 851	ipa->version = data->version;
 852	ipa->modem_route_count = data->modem_route_count;
 853	init_completion(&ipa->completion);
 854
 855	ret = ipa_reg_init(ipa, pdev);
 856	if (ret)
 857		goto err_kfree_ipa;
 858
 859	ret = ipa_mem_init(ipa, pdev, data->mem_data);
 860	if (ret)
 861		goto err_reg_exit;
 862
 863	ret = ipa_cmd_init(ipa);
 864	if (ret)
 865		goto err_mem_exit;
 866
 867	ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count,
 868		       data->endpoint_data);
 869	if (ret)
 870		goto err_mem_exit;
 871
 872	/* Result is a non-zero mask of endpoints that support filtering */
 873	ret = ipa_endpoint_init(ipa, data->endpoint_count, data->endpoint_data);
 874	if (ret)
 875		goto err_gsi_exit;
 876
 877	ret = ipa_table_init(ipa);
 878	if (ret)
 879		goto err_endpoint_exit;
 880
 881	ret = ipa_smp2p_init(ipa, pdev, loader == IPA_LOADER_MODEM);
 882	if (ret)
 883		goto err_table_exit;
 884
 885	/* Power needs to be active for config and setup */
 886	ret = pm_runtime_get_sync(dev);
 887	if (WARN_ON(ret < 0))
 888		goto err_power_put;
 889
 890	ret = ipa_config(ipa, data);
 891	if (ret)
 892		goto err_power_put;
 893
 894	dev_info(dev, "IPA driver initialized");
 895
 896	/* If the modem is loading GSI firmware, it will trigger a call to
 897	 * ipa_setup() when it has finished.  In that case we're done here.
 898	 */
 899	if (loader == IPA_LOADER_MODEM)
 900		goto done;
 901
 902	if (loader == IPA_LOADER_SELF) {
 903		/* The AP is loading GSI firmware; do so now */
 904		ret = ipa_firmware_load(dev);
 905		if (ret)
 906			goto err_deconfig;
 907	} /* Otherwise loader == IPA_LOADER_SKIP */
 908
 909	/* GSI firmware is loaded; proceed to setup */
 910	ret = ipa_setup(ipa);
 911	if (ret)
 912		goto err_deconfig;
 913done:
 914	pm_runtime_mark_last_busy(dev);
 915	(void)pm_runtime_put_autosuspend(dev);
 916
 917	return 0;
 918
 919err_deconfig:
 920	ipa_deconfig(ipa);
 921err_power_put:
 922	pm_runtime_put_noidle(dev);
 923	ipa_smp2p_exit(ipa);
 924err_table_exit:
 925	ipa_table_exit(ipa);
 926err_endpoint_exit:
 927	ipa_endpoint_exit(ipa);
 928err_gsi_exit:
 929	gsi_exit(&ipa->gsi);
 930err_mem_exit:
 931	ipa_mem_exit(ipa);
 932err_reg_exit:
 933	ipa_reg_exit(ipa);
 934err_kfree_ipa:
 935	kfree(ipa);
 936err_power_exit:
 937	ipa_power_exit(power);
 938err_interrupt_exit:
 939	ipa_interrupt_exit(interrupt);
 940
 941	return ret;
 942}
 943
 944static void ipa_remove(struct platform_device *pdev)
 945{
 946	struct ipa_interrupt *interrupt;
 947	struct ipa_power *power;
 948	struct device *dev;
 949	struct ipa *ipa;
 950	int ret;
 951
 952	ipa = dev_get_drvdata(&pdev->dev);
 953	dev = ipa->dev;
 954	WARN_ON(dev != &pdev->dev);
 955
 956	power = ipa->power;
 957	interrupt = ipa->interrupt;
 958
 959	/* Prevent the modem from triggering a call to ipa_setup().  This
 960	 * also ensures a modem-initiated setup that's underway completes.
 961	 */
 962	ipa_smp2p_irq_disable_setup(ipa);
 963
 964	ret = pm_runtime_get_sync(dev);
 965	if (WARN_ON(ret < 0))
 966		goto out_power_put;
 967
 968	if (ipa->setup_complete) {
 969		ret = ipa_modem_stop(ipa);
 970		/* If starting or stopping is in progress, try once more */
 971		if (ret == -EBUSY) {
 972			usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
 973			ret = ipa_modem_stop(ipa);
 974		}
 975		if (ret) {
 976			/*
 977			 * Not cleaning up here properly might also yield a
 978			 * crash later on. As the device is still unregistered
 979			 * in this case, this might even yield a crash later on.
 980			 */
 981			dev_err(dev, "Failed to stop modem (%pe), leaking resources\n",
 982				ERR_PTR(ret));
 983			return;
 984		}
 985
 986		ipa_teardown(ipa);
 987	}
 988
 989	ipa_deconfig(ipa);
 990out_power_put:
 991	pm_runtime_put_noidle(dev);
 992	ipa_smp2p_exit(ipa);
 993	ipa_table_exit(ipa);
 994	ipa_endpoint_exit(ipa);
 995	gsi_exit(&ipa->gsi);
 996	ipa_mem_exit(ipa);
 997	ipa_reg_exit(ipa);
 998	kfree(ipa);
 999	ipa_power_exit(power);
1000	ipa_interrupt_exit(interrupt);
1001
1002	dev_info(dev, "IPA driver removed");
1003}
1004
1005static const struct attribute_group *ipa_attribute_groups[] = {
1006	&ipa_attribute_group,
1007	&ipa_feature_attribute_group,
1008	&ipa_endpoint_id_attribute_group,
1009	&ipa_modem_attribute_group,
1010	NULL,
1011};
1012
1013static struct platform_driver ipa_driver = {
1014	.probe		= ipa_probe,
1015	.remove		= ipa_remove,
1016	.shutdown	= ipa_remove,
1017	.driver	= {
1018		.name		= "ipa",
1019		.pm		= &ipa_pm_ops,
1020		.of_match_table	= ipa_match,
1021		.dev_groups	= ipa_attribute_groups,
1022	},
1023};
1024
1025module_platform_driver(ipa_driver);
1026
1027MODULE_LICENSE("GPL v2");
1028MODULE_DESCRIPTION("Qualcomm IP Accelerator device driver");