Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2020-21 Intel Corporation.
   4 */
   5
   6#include <linux/delay.h>
   7
   8#include "iosm_ipc_chnl_cfg.h"
   9#include "iosm_ipc_devlink.h"
  10#include "iosm_ipc_flash.h"
  11#include "iosm_ipc_imem.h"
  12#include "iosm_ipc_port.h"
  13#include "iosm_ipc_trace.h"
  14#include "iosm_ipc_debugfs.h"
  15
  16/* Check the wwan ips if it is valid with Channel as input. */
  17static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
  18{
  19	if (chnl)
  20		return chnl->ctype == IPC_CTYPE_WWAN &&
  21		       chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
  22	return false;
  23}
  24
  25static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
  26{
  27	union ipc_msg_prep_args prep_args = {
  28		.sleep.target = 1,
  29		.sleep.state = state,
  30	};
  31
  32	ipc_imem->device_sleep = state;
  33
  34	return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
  35					IPC_MSG_PREP_SLEEP, &prep_args, NULL);
  36}
  37
  38static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
  39				  struct ipc_pipe *pipe)
  40{
  41	/* limit max. nr of entries */
  42	if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
  43		return false;
  44
  45	return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
  46}
  47
  48/* This timer handler will retry DL buff allocation if a pipe has no free buf
  49 * and gives doorbell if TD is available
  50 */
  51static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
  52				      void *msg, size_t size)
  53{
  54	bool new_buffers_available = false;
  55	bool retry_allocation = false;
  56	int i;
  57
  58	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
  59		struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
  60
  61		if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
  62			continue;
  63
  64		while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
  65			new_buffers_available = true;
  66
  67		if (pipe->nr_of_queued_entries == 0)
  68			retry_allocation = true;
  69	}
  70
  71	if (new_buffers_available)
  72		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
  73					      IPC_HP_DL_PROCESS);
  74
  75	if (retry_allocation) {
  76		ipc_imem->hrtimer_period =
  77		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
  78		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
  79			hrtimer_start(&ipc_imem->td_alloc_timer,
  80				      ipc_imem->hrtimer_period,
  81				      HRTIMER_MODE_REL);
  82	}
  83	return 0;
  84}
  85
  86static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
  87{
  88	struct iosm_imem *ipc_imem =
  89		container_of(hr_timer, struct iosm_imem, td_alloc_timer);
  90	/* Post an async tasklet event to trigger HP update Doorbell */
  91	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
  92				 0, false);
  93	return HRTIMER_NORESTART;
  94}
  95
  96/* Fast update timer tasklet handler to trigger HP update */
  97static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
  98					    void *msg, size_t size)
  99{
 100	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
 101				      IPC_HP_FAST_TD_UPD_TMR);
 102
 103	return 0;
 104}
 105
 106static enum hrtimer_restart
 107ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
 108{
 109	struct iosm_imem *ipc_imem =
 110		container_of(hr_timer, struct iosm_imem, fast_update_timer);
 111	/* Post an async tasklet event to trigger HP update Doorbell */
 112	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
 113				 NULL, 0, false);
 114	return HRTIMER_NORESTART;
 115}
 116
 117static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
 118				    void *msg, size_t size)
 119{
 120	ipc_mux_ul_adb_finish(ipc_imem->mux);
 121	return 0;
 122}
 123
 124static enum hrtimer_restart
 125ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
 126{
 127	struct iosm_imem *ipc_imem =
 128		container_of(hr_timer, struct iosm_imem, adb_timer);
 129
 130	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
 131				 NULL, 0, false);
 132	return HRTIMER_NORESTART;
 133}
 134
 135static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
 136					  struct ipc_mux_config *cfg)
 137{
 138	ipc_mmio_update_cp_capability(ipc_imem->mmio);
 139
 140	if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
 141		dev_err(ipc_imem->dev, "Failed to get Mux capability.");
 142		return -EINVAL;
 143	}
 144
 145	cfg->protocol = ipc_imem->mmio->mux_protocol;
 146
 147	cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
 148			       MUX_UL_ON_CREDITS :
 149			       MUX_UL;
 150
 151	/* The instance ID is same as channel ID because this is been reused
 152	 * for channel alloc function.
 153	 */
 154	cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
 
 155
 156	return 0;
 157}
 158
 159void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
 160				   unsigned int reset_enable, bool atomic_ctx)
 161{
 162	union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
 163						      reset_enable };
 164
 165	if (atomic_ctx)
 166		ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
 167					 IPC_MSG_PREP_FEATURE_SET, &prep_args,
 168					 NULL);
 169	else
 170		ipc_protocol_msg_send(ipc_imem->ipc_protocol,
 171				      IPC_MSG_PREP_FEATURE_SET, &prep_args);
 172}
 173
 174/**
 175 * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started.
 176 * @ipc_imem:                       Pointer to imem data-struct
 177 */
 178void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
 179{
 180	/* Use the TD update timer only in the runtime phase */
 181	if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
 182		/* trigger the doorbell irq on CP directly. */
 183		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
 184					      IPC_HP_TD_UPD_TMR_START);
 185		return;
 186	}
 187
 188	if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
 189		ipc_imem->hrtimer_period =
 190		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
 191		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
 192			hrtimer_start(&ipc_imem->tdupdate_timer,
 193				      ipc_imem->hrtimer_period,
 194				      HRTIMER_MODE_REL);
 195	}
 196}
 197
 198void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
 199{
 200	if (hrtimer_active(hr_timer))
 201		hrtimer_cancel(hr_timer);
 202}
 203
 204/**
 205 * ipc_imem_adb_timer_start -	Starts the adb Timer if not starting.
 206 * @ipc_imem:			Pointer to imem data-struct
 207 */
 208void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
 209{
 210	if (!hrtimer_active(&ipc_imem->adb_timer)) {
 211		ipc_imem->hrtimer_period =
 212			ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
 213		hrtimer_start(&ipc_imem->adb_timer,
 214			      ipc_imem->hrtimer_period,
 215			      HRTIMER_MODE_REL);
 216	}
 217}
 218
 219bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
 220{
 221	struct ipc_mem_channel *channel;
 222	bool hpda_ctrl_pending = false;
 223	struct sk_buff_head *ul_list;
 224	bool hpda_pending = false;
 
 225	struct ipc_pipe *pipe;
 226	int i;
 227
 228	/* Analyze the uplink pipe of all active channels. */
 229	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
 230		channel = &ipc_imem->channels[i];
 231
 232		if (channel->state != IMEM_CHANNEL_ACTIVE)
 233			continue;
 234
 235		pipe = &channel->ul_pipe;
 236
 237		/* Get the reference to the skbuf accumulator list. */
 238		ul_list = &channel->ul_list;
 239
 240		/* Fill the transfer descriptor with the uplink buffer info. */
 241		if (!ipc_imem_check_wwan_ips(channel)) {
 242			hpda_ctrl_pending |=
 243				ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
 244							pipe, ul_list);
 245		} else {
 246			hpda_pending |=
 247				ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
 248							pipe, ul_list);
 249		}
 250	}
 251
 252	/* forced HP update needed for non data channels */
 253	if (hpda_ctrl_pending) {
 254		hpda_pending = false;
 255		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
 256					      IPC_HP_UL_WRITE_TD);
 257	}
 258
 259	return hpda_pending;
 260}
 261
 262void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
 263{
 264	int timeout = IPC_MODEM_BOOT_TIMEOUT;
 265
 266	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
 267
 268	/* Trigger the CP interrupt to enter the init state. */
 269	ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
 270			  IPC_MEM_DEVICE_IPC_INIT);
 271	/* Wait for the CP update. */
 272	do {
 273		if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
 274		    ipc_imem->ipc_requested_state) {
 275			/* Prepare the MMIO space */
 276			ipc_mmio_config(ipc_imem->mmio);
 277
 278			/* Trigger the CP irq to enter the running state. */
 279			ipc_imem->ipc_requested_state =
 280				IPC_MEM_DEVICE_IPC_RUNNING;
 281			ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
 282					  IPC_MEM_DEVICE_IPC_RUNNING);
 283
 284			return;
 285		}
 286		msleep(20);
 287	} while (--timeout);
 288
 289	/* timeout */
 290	dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
 291		ipc_imem_phase_get_string(ipc_imem->phase),
 292		ipc_mmio_get_ipc_state(ipc_imem->mmio));
 293
 294	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
 295}
 296
 297/* Analyze the packet type and distribute it. */
 298static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
 299				    struct ipc_pipe *pipe, struct sk_buff *skb)
 300{
 301	u16 port_id;
 302
 303	if (!skb)
 304		return;
 305
 306	/* An AT/control or IP packet is expected. */
 307	switch (pipe->channel->ctype) {
 308	case IPC_CTYPE_CTRL:
 309		port_id = pipe->channel->channel_id;
 310		ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len,
 311				    IPC_CB(skb)->mapping,
 312				    IPC_CB(skb)->direction);
 313		if (port_id == IPC_MEM_CTRL_CHL_ID_7)
 314			ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
 315						       skb);
 316		else if (ipc_is_trace_channel(ipc_imem, port_id))
 317			ipc_trace_port_rx(ipc_imem, skb);
 318		else
 319			wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
 320				     skb);
 321		break;
 322
 323	case IPC_CTYPE_WWAN:
 324		if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
 325			ipc_mux_dl_decode(ipc_imem->mux, skb);
 326		break;
 327	default:
 328		dev_err(ipc_imem->dev, "Invalid channel type");
 329		break;
 330	}
 331}
 332
 333/* Process the downlink data and pass them to the char or net layer. */
 334static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
 335				     struct ipc_pipe *pipe)
 336{
 337	s32 cnt = 0, processed_td_cnt = 0;
 338	struct ipc_mem_channel *channel;
 339	u32 head = 0, tail = 0;
 340	bool processed = false;
 341	struct sk_buff *skb;
 342
 343	channel = pipe->channel;
 344
 345	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
 346					 &tail);
 347	if (pipe->old_tail != tail) {
 348		if (pipe->old_tail < tail)
 349			cnt = tail - pipe->old_tail;
 350		else
 351			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
 352	}
 353
 354	processed_td_cnt = cnt;
 355
 356	/* Seek for pipes with pending DL data. */
 357	while (cnt--) {
 358		skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
 359
 360		/* Analyze the packet type and distribute it. */
 361		ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
 362	}
 363
 364	/* try to allocate new empty DL SKbs from head..tail - 1*/
 365	while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
 366		processed = true;
 367
 368	if (processed && !ipc_imem_check_wwan_ips(channel)) {
 369		/* Force HP update for non IP channels */
 370		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
 371					      IPC_HP_DL_PROCESS);
 372		processed = false;
 373
 374		/* If Fast Update timer is already running then stop */
 375		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
 376	}
 377
 378	/* Any control channel process will get immediate HP update.
 379	 * Start Fast update timer only for IP channel if all the TDs were
 380	 * used in last process.
 381	 */
 382	if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
 383		ipc_imem->hrtimer_period =
 384		ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
 385		hrtimer_start(&ipc_imem->fast_update_timer,
 386			      ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
 387	}
 388
 389	if (ipc_imem->app_notify_dl_pend)
 390		complete(&ipc_imem->dl_pend_sem);
 391}
 392
 393/* process open uplink pipe */
 394static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
 395				     struct ipc_pipe *pipe)
 396{
 397	struct ipc_mem_channel *channel;
 398	u32 tail = 0, head = 0;
 399	struct sk_buff *skb;
 400	s32 cnt = 0;
 401
 402	channel = pipe->channel;
 403
 404	/* Get the internal phase. */
 405	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
 406					 &tail);
 407
 408	if (pipe->old_tail != tail) {
 409		if (pipe->old_tail < tail)
 410			cnt = tail - pipe->old_tail;
 411		else
 412			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
 413	}
 414
 415	/* Free UL buffers. */
 416	while (cnt--) {
 417		skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
 418
 419		if (!skb)
 420			continue;
 421
 422		/* If the user app was suspended in uplink direction - blocking
 423		 * write, resume it.
 424		 */
 425		if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
 426			complete(&channel->ul_sem);
 427
 428		/* Free the skbuf element. */
 429		if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
 430			if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
 431				ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
 432			else
 433				dev_err(ipc_imem->dev,
 434					"OP Type is UL_MUX, unknown if_id %d",
 435					channel->if_id);
 436		} else {
 437			ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
 438		}
 439	}
 440
 441	/* Trace channel stats for IP UL pipe. */
 442	if (ipc_imem_check_wwan_ips(pipe->channel))
 443		ipc_mux_check_n_restart_tx(ipc_imem->mux);
 444
 445	if (ipc_imem->app_notify_ul_pend)
 446		complete(&ipc_imem->ul_pend_sem);
 447}
 448
 449/* Executes the irq. */
 450static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
 451{
 452	struct ipc_mem_channel *channel;
 453
 454	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
 
 
 
 
 
 
 455	ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
 
 
 
 
 
 456	complete(&channel->ul_sem);
 457}
 458
 459/* Execute the UL bundle timer actions, generating the doorbell irq. */
 460static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
 461					  void *msg, size_t size)
 462{
 463	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
 464				      IPC_HP_TD_UPD_TMR);
 465	return 0;
 466}
 467
 468/* Consider link power management in the runtime phase. */
 469static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
 470{
 471	    /* link will go down, Test pending UL packets.*/
 472	if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
 473	    hrtimer_active(&ipc_imem->tdupdate_timer)) {
 474		/* Generate the doorbell irq. */
 475		ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
 476		/* Stop the TD update timer. */
 477		ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
 478		/* Stop the fast update timer. */
 479		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
 480	}
 481}
 482
 483/* Execute startup timer and wait for delayed start (e.g. NAND) */
 484static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
 485					void *msg, size_t size)
 486{
 487	/* Update & check the current operation phase. */
 488	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
 489		return -EIO;
 490
 491	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
 492	    IPC_MEM_DEVICE_IPC_UNINIT) {
 493		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
 494
 495		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
 496				  IPC_MEM_DEVICE_IPC_INIT);
 497
 498		ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
 499		/* reduce period to 100 ms to check for mmio init state */
 500		if (!hrtimer_active(&ipc_imem->startup_timer))
 501			hrtimer_start(&ipc_imem->startup_timer,
 502				      ipc_imem->hrtimer_period,
 503				      HRTIMER_MODE_REL);
 504	} else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
 505		   IPC_MEM_DEVICE_IPC_INIT) {
 506		/* Startup complete  - disable timer */
 507		ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
 508
 509		/* Prepare the MMIO space */
 510		ipc_mmio_config(ipc_imem->mmio);
 511		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
 512		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
 513				  IPC_MEM_DEVICE_IPC_RUNNING);
 514	}
 515
 516	return 0;
 517}
 518
 519static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
 520{
 521	enum hrtimer_restart result = HRTIMER_NORESTART;
 522	struct iosm_imem *ipc_imem =
 523		container_of(hr_timer, struct iosm_imem, startup_timer);
 524
 525	if (ktime_to_ns(ipc_imem->hrtimer_period)) {
 526		hrtimer_forward_now(&ipc_imem->startup_timer,
 527				    ipc_imem->hrtimer_period);
 528		result = HRTIMER_RESTART;
 529	}
 530
 531	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
 532				 NULL, 0, false);
 533	return result;
 534}
 535
 536/* Get the CP execution stage */
 537static enum ipc_mem_exec_stage
 538ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
 539{
 540	return (ipc_imem->phase == IPC_P_RUN &&
 541		ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
 542		       ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
 543		       ipc_mmio_get_exec_stage(ipc_imem->mmio);
 544}
 545
 546/* Callback to send the modem ready uevent */
 547static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
 548				    void *msg, size_t size)
 549{
 550	enum ipc_mem_exec_stage exec_stage =
 551		ipc_imem_get_exec_stage_buffered(ipc_imem);
 552
 553	if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
 554		ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
 555
 556	return 0;
 557}
 558
 559/* This function is executed in a task context via an ipc_worker object,
 560 * as the creation or removal of device can't be done from tasklet.
 561 */
 562static void ipc_imem_run_state_worker(struct work_struct *instance)
 563{
 564	struct ipc_chnl_cfg chnl_cfg_port = { 0 };
 565	struct ipc_mux_config mux_cfg;
 566	struct iosm_imem *ipc_imem;
 567	u8 ctrl_chl_idx = 0;
 568	int ret;
 569
 570	ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
 571
 572	if (ipc_imem->phase != IPC_P_RUN) {
 573		dev_err(ipc_imem->dev,
 574			"Modem link down. Exit run state worker.");
 575		goto err_out;
 576	}
 577
 578	if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
 579		ipc_devlink_deinit(ipc_imem->ipc_devlink);
 580
 581	ret = ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg);
 582	if (ret < 0)
 583		goto err_out;
 584
 585	ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
 586	if (!ipc_imem->mux)
 587		goto err_out;
 588
 589	ret = ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
 590	if (ret < 0)
 591		goto err_ipc_mux_deinit;
 592
 593	ipc_imem->mux->wwan = ipc_imem->wwan;
 
 
 594
 595	while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
 596		if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
 597			ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
 598
 599			if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID &&
 600			    chnl_cfg_port.wwan_port_type == WWAN_PORT_XMMRPC) {
 601				ctrl_chl_idx++;
 602				continue;
 603			}
 604
 605			if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
 606			    chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
 607				ctrl_chl_idx++;
 608				continue;
 609			}
 610			if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
 611				ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
 612						      chnl_cfg_port,
 613						      IRQ_MOD_OFF);
 614				ipc_imem->ipc_port[ctrl_chl_idx] =
 615					ipc_port_init(ipc_imem, chnl_cfg_port);
 616			}
 617		}
 618		ctrl_chl_idx++;
 619	}
 620
 621	ipc_debugfs_init(ipc_imem);
 622
 623	ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
 624				 false);
 625
 626	/* Complete all memory stores before setting bit */
 627	smp_mb__before_atomic();
 628
 629	set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
 630
 631	/* Complete all memory stores after setting bit */
 632	smp_mb__after_atomic();
 633
 634	return;
 635
 636err_ipc_mux_deinit:
 637	ipc_mux_deinit(ipc_imem->mux);
 638err_out:
 639	ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
 640}
 641
 642static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
 643{
 644	enum ipc_mem_device_ipc_state curr_ipc_status;
 645	enum ipc_phase old_phase, phase;
 646	bool retry_allocation = false;
 647	bool ul_pending = false;
 648	int i;
 649
 650	if (irq != IMEM_IRQ_DONT_CARE)
 651		ipc_imem->ev_irq_pending[irq] = false;
 652
 653	/* Get the internal phase. */
 654	old_phase = ipc_imem->phase;
 655
 656	if (old_phase == IPC_P_OFF_REQ) {
 657		dev_dbg(ipc_imem->dev,
 658			"[%s]: Ignoring MSI. Deinit sequence in progress!",
 659			ipc_imem_phase_get_string(old_phase));
 660		return;
 661	}
 662
 663	/* Update the phase controlled by CP. */
 664	phase = ipc_imem_phase_update(ipc_imem);
 665
 666	switch (phase) {
 667	case IPC_P_RUN:
 668		if (!ipc_imem->enter_runtime) {
 669			/* Excute the transition from flash/boot to runtime. */
 670			ipc_imem->enter_runtime = 1;
 671
 672			/* allow device to sleep, default value is
 673			 * IPC_HOST_SLEEP_ENTER_SLEEP
 674			 */
 675			ipc_imem_msg_send_device_sleep(ipc_imem,
 676						       ipc_imem->device_sleep);
 677
 678			ipc_imem_msg_send_feature_set(ipc_imem,
 679						      IPC_MEM_INBAND_CRASH_SIG,
 680						  true);
 681		}
 682
 683		curr_ipc_status =
 684			ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
 685
 686		/* check ipc_status change */
 687		if (ipc_imem->ipc_status != curr_ipc_status) {
 688			ipc_imem->ipc_status = curr_ipc_status;
 689
 690			if (ipc_imem->ipc_status ==
 691			    IPC_MEM_DEVICE_IPC_RUNNING) {
 692				schedule_work(&ipc_imem->run_state_worker);
 693			}
 694		}
 695
 696		/* Consider power management in the runtime phase. */
 697		ipc_imem_slp_control_exec(ipc_imem);
 698		break; /* Continue with skbuf processing. */
 699
 700		/* Unexpected phases. */
 701	case IPC_P_OFF:
 702	case IPC_P_OFF_REQ:
 703		dev_err(ipc_imem->dev, "confused phase %s",
 704			ipc_imem_phase_get_string(phase));
 705		return;
 706
 707	case IPC_P_PSI:
 708		if (old_phase != IPC_P_ROM)
 709			break;
 710
 711		fallthrough;
 712		/* On CP the PSI phase is already active. */
 713
 714	case IPC_P_ROM:
 715		/* Before CP ROM driver starts the PSI image, it sets
 716		 * the exit_code field on the doorbell scratchpad and
 717		 * triggers the irq.
 718		 */
 719		ipc_imem_rom_irq_exec(ipc_imem);
 720		return;
 721
 722	default:
 723		break;
 724	}
 725
 726	/* process message ring */
 727	ipc_protocol_msg_process(ipc_imem, irq);
 728
 729	/* process all open pipes */
 730	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
 731		struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
 732		struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
 733
 734		if (dl_pipe->is_open &&
 735		    (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
 736			ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
 737
 738			if (dl_pipe->nr_of_queued_entries == 0)
 739				retry_allocation = true;
 740		}
 741
 742		if (ul_pipe->is_open)
 743			ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
 744	}
 745
 746	/* Try to generate new ADB or ADGH. */
 747	if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
 748		ipc_imem_td_update_timer_start(ipc_imem);
 749		if (ipc_imem->mux->protocol == MUX_AGGREGATION)
 750			ipc_imem_adb_timer_start(ipc_imem);
 751	}
 752
 753	/* Continue the send procedure with accumulated SIO or NETIF packets.
 754	 * Reset the debounce flags.
 755	 */
 756	ul_pending |= ipc_imem_ul_write_td(ipc_imem);
 757
 758	/* if UL data is pending restart TD update timer */
 759	if (ul_pending) {
 760		ipc_imem->hrtimer_period =
 761		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
 762		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
 763			hrtimer_start(&ipc_imem->tdupdate_timer,
 764				      ipc_imem->hrtimer_period,
 765				      HRTIMER_MODE_REL);
 766	}
 767
 768	/* If CP has executed the transition
 769	 * from IPC_INIT to IPC_RUNNING in the PSI
 770	 * phase, wake up the flash app to open the pipes.
 771	 */
 772	if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
 773	    ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
 774	    ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
 775						IPC_MEM_DEVICE_IPC_RUNNING) {
 776		complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
 
 
 
 777	}
 778
 779	/* Reset the expected CP state. */
 780	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
 781
 782	if (retry_allocation) {
 783		ipc_imem->hrtimer_period =
 784		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
 785		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
 786			hrtimer_start(&ipc_imem->td_alloc_timer,
 787				      ipc_imem->hrtimer_period,
 788				      HRTIMER_MODE_REL);
 789	}
 790}
 791
 792/* Callback by tasklet for handling interrupt events. */
 793static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
 794			      size_t size)
 795{
 796	ipc_imem_handle_irq(ipc_imem, arg);
 797
 798	return 0;
 799}
 800
 801void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
 802{
 803	/* start doorbell irq delay timer if UL is pending */
 804	if (ipc_imem_ul_write_td(ipc_imem))
 805		ipc_imem_td_update_timer_start(ipc_imem);
 806}
 807
 808/* Check the execution stage and update the AP phase */
 809static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
 810						  enum ipc_mem_exec_stage stage)
 811{
 812	switch (stage) {
 813	case IPC_MEM_EXEC_STAGE_BOOT:
 814		if (ipc_imem->phase != IPC_P_ROM) {
 815			/* Send this event only once */
 816			ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
 817		}
 818
 819		ipc_imem->phase = IPC_P_ROM;
 820		break;
 821
 822	case IPC_MEM_EXEC_STAGE_PSI:
 823		ipc_imem->phase = IPC_P_PSI;
 824		break;
 825
 826	case IPC_MEM_EXEC_STAGE_EBL:
 827		ipc_imem->phase = IPC_P_EBL;
 828		break;
 829
 830	case IPC_MEM_EXEC_STAGE_RUN:
 831		if (ipc_imem->phase != IPC_P_RUN &&
 832		    ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
 833			ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
 834		}
 835		ipc_imem->phase = IPC_P_RUN;
 836		break;
 837
 838	case IPC_MEM_EXEC_STAGE_CRASH:
 839		if (ipc_imem->phase != IPC_P_CRASH)
 840			ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
 841
 842		ipc_imem->phase = IPC_P_CRASH;
 843		break;
 844
 845	case IPC_MEM_EXEC_STAGE_CD_READY:
 846		if (ipc_imem->phase != IPC_P_CD_READY)
 847			ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
 848		ipc_imem->phase = IPC_P_CD_READY;
 849		break;
 850
 851	default:
 852		/* unknown exec stage:
 853		 * assume that link is down and send info to listeners
 854		 */
 855		ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
 856		break;
 857	}
 858
 859	return ipc_imem->phase;
 860}
 861
 862/* Send msg to device to open pipe */
 863static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
 864			       struct ipc_pipe *pipe)
 865{
 866	union ipc_msg_prep_args prep_args = {
 867		.pipe_open.pipe = pipe,
 868	};
 869
 870	if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
 871				  IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
 872		pipe->is_open = true;
 873
 874	return pipe->is_open;
 875}
 876
 877/* Allocates the TDs for the given pipe along with firing HP update DB. */
 878static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
 879				     void *msg, size_t size)
 880{
 881	struct ipc_pipe *dl_pipe = msg;
 882	bool processed = false;
 883	int i;
 884
 885	for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
 886		processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
 887
 888	/* Trigger the doorbell irq to inform CP that new downlink buffers are
 889	 * available.
 890	 */
 891	if (processed)
 892		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
 893
 894	return 0;
 895}
 896
 897static enum hrtimer_restart
 898ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
 899{
 900	struct iosm_imem *ipc_imem =
 901		container_of(hr_timer, struct iosm_imem, tdupdate_timer);
 902
 903	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
 904				 NULL, 0, false);
 905	return HRTIMER_NORESTART;
 906}
 907
 908/* Get the CP execution state and map it to the AP phase. */
 909enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
 910{
 911	enum ipc_mem_exec_stage exec_stage =
 912				ipc_imem_get_exec_stage_buffered(ipc_imem);
 913	/* If the CP stage is undef, return the internal precalculated phase. */
 914	return ipc_imem->phase == IPC_P_OFF_REQ ?
 915		       ipc_imem->phase :
 916		       ipc_imem_phase_update_check(ipc_imem, exec_stage);
 917}
 918
 919const char *ipc_imem_phase_get_string(enum ipc_phase phase)
 920{
 921	switch (phase) {
 922	case IPC_P_RUN:
 923		return "A-RUN";
 924
 925	case IPC_P_OFF:
 926		return "A-OFF";
 927
 928	case IPC_P_ROM:
 929		return "A-ROM";
 930
 931	case IPC_P_PSI:
 932		return "A-PSI";
 933
 934	case IPC_P_EBL:
 935		return "A-EBL";
 936
 937	case IPC_P_CRASH:
 938		return "A-CRASH";
 939
 940	case IPC_P_CD_READY:
 941		return "A-CD_READY";
 942
 943	case IPC_P_OFF_REQ:
 944		return "A-OFF_REQ";
 945
 946	default:
 947		return "A-???";
 948	}
 949}
 950
 951void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
 952{
 953	union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
 954
 955	pipe->is_open = false;
 956	ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
 957			      &prep_args);
 958
 959	ipc_imem_pipe_cleanup(ipc_imem, pipe);
 960}
 961
 962void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
 963{
 964	struct ipc_mem_channel *channel;
 965
 966	if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
 967		dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
 968		return;
 969	}
 970
 971	channel = &ipc_imem->channels[channel_id];
 972
 973	if (channel->state == IMEM_CHANNEL_FREE) {
 974		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
 975			channel_id, channel->state);
 976		return;
 977	}
 978
 979	/* Free only the channel id in the CP power off mode. */
 980	if (channel->state == IMEM_CHANNEL_RESERVED)
 981		/* Release only the channel id. */
 982		goto channel_free;
 983
 984	if (ipc_imem->phase == IPC_P_RUN) {
 985		ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
 986		ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
 987	}
 988
 989	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
 990	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
 991
 992channel_free:
 993	ipc_imem_channel_free(channel);
 994}
 995
 996struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
 997					      int channel_id, u32 db_id)
 998{
 999	struct ipc_mem_channel *channel;
1000
1001	if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
1002		dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
1003		return NULL;
1004	}
1005
1006	channel = &ipc_imem->channels[channel_id];
1007
1008	channel->state = IMEM_CHANNEL_ACTIVE;
1009
1010	if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
1011		goto ul_pipe_err;
1012
1013	if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
1014		goto dl_pipe_err;
1015
1016	/* Allocate the downlink buffers in tasklet context. */
1017	if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
1018				     &channel->dl_pipe, 0, false)) {
1019		dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
1020		goto task_failed;
1021	}
1022
1023	/* Active channel. */
1024	return channel;
1025task_failed:
1026	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
1027dl_pipe_err:
1028	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
1029ul_pipe_err:
1030	ipc_imem_channel_free(channel);
1031	return NULL;
1032}
1033
1034void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
1035{
1036	ipc_protocol_suspend(ipc_imem->ipc_protocol);
1037}
1038
1039void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
1040{
1041	ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
1042}
1043
1044void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
1045{
1046	enum ipc_mem_exec_stage stage;
1047
1048	if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
1049		stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1050		ipc_imem_phase_update_check(ipc_imem, stage);
1051	}
1052}
1053
1054void ipc_imem_channel_free(struct ipc_mem_channel *channel)
1055{
1056	/* Reset dynamic channel elements. */
1057	channel->state = IMEM_CHANNEL_FREE;
1058}
1059
1060int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
1061			   enum ipc_ctype ctype)
1062{
1063	struct ipc_mem_channel *channel;
1064	int i;
1065
1066	/* Find channel of given type/index */
1067	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1068		channel = &ipc_imem->channels[i];
1069		if (channel->ctype == ctype && channel->index == index)
1070			break;
1071	}
1072
1073	if (i >= ipc_imem->nr_of_channels) {
1074		dev_dbg(ipc_imem->dev,
1075			"no channel definition for index=%d ctype=%d", index,
1076			ctype);
1077		return -ECHRNG;
1078	}
1079
1080	if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
1081		dev_dbg(ipc_imem->dev, "channel is in use");
1082		return -EBUSY;
1083	}
1084
1085	if (channel->ctype == IPC_CTYPE_WWAN &&
1086	    index == IPC_MEM_MUX_IP_CH_IF_ID)
1087		channel->if_id = index;
1088
1089	channel->channel_id = index;
1090	channel->state = IMEM_CHANNEL_RESERVED;
1091
1092	return i;
1093}
1094
1095void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
1096			   struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1097{
1098	struct ipc_mem_channel *channel;
1099
1100	if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
1101	    chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
1102		dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
1103			chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
1104		return;
1105	}
1106
1107	if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
1108		dev_err(ipc_imem->dev, "too many channels");
1109		return;
1110	}
1111
1112	channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
1113	channel->channel_id = ipc_imem->nr_of_channels;
1114	channel->ctype = ctype;
1115	channel->index = chnl_cfg.id;
1116	channel->net_err_count = 0;
1117	channel->state = IMEM_CHANNEL_FREE;
1118	ipc_imem->nr_of_channels++;
1119
1120	ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
1121				IRQ_MOD_OFF);
1122
1123	skb_queue_head_init(&channel->ul_list);
1124
1125	init_completion(&channel->ul_sem);
1126}
1127
1128void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
1129			     struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1130{
1131	struct ipc_mem_channel *channel;
1132
1133	if (id < 0 || id >= ipc_imem->nr_of_channels) {
1134		dev_err(ipc_imem->dev, "invalid channel id %d", id);
1135		return;
1136	}
1137
1138	channel = &ipc_imem->channels[id];
1139
1140	if (channel->state != IMEM_CHANNEL_FREE &&
1141	    channel->state != IMEM_CHANNEL_RESERVED) {
1142		dev_err(ipc_imem->dev, "invalid channel state %d",
1143			channel->state);
1144		return;
1145	}
1146
1147	channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
1148	channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
1149	channel->ul_pipe.is_open = false;
1150	channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
1151	channel->ul_pipe.channel = channel;
1152	channel->ul_pipe.dir = IPC_MEM_DIR_UL;
1153	channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1154	channel->ul_pipe.irq_moderation = irq_moderation;
1155	channel->ul_pipe.buf_size = 0;
1156
1157	channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
1158	channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
1159	channel->dl_pipe.is_open = false;
1160	channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
1161	channel->dl_pipe.channel = channel;
1162	channel->dl_pipe.dir = IPC_MEM_DIR_DL;
1163	channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1164	channel->dl_pipe.irq_moderation = irq_moderation;
1165	channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
1166}
1167
1168static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
1169{
1170	int i;
1171
1172	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1173		struct ipc_mem_channel *channel;
1174
1175		channel = &ipc_imem->channels[i];
1176
1177		ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
1178		ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
1179
1180		ipc_imem_channel_free(channel);
1181	}
1182}
1183
1184void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
1185{
1186	struct sk_buff *skb;
1187
1188	/* Force pipe to closed state also when not explicitly closed through
1189	 * ipc_imem_pipe_close()
1190	 */
1191	pipe->is_open = false;
1192
1193	/* Empty the uplink skb accumulator. */
1194	while ((skb = skb_dequeue(&pipe->channel->ul_list)))
1195		ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
1196
1197	ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
1198}
1199
1200/* Send IPC protocol uninit to the modem when Link is active. */
1201static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
1202{
1203	int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
1204	enum ipc_mem_device_ipc_state ipc_state;
1205
1206	/* When PCIe link is up set IPC_UNINIT
1207	 * of the modem otherwise ignore it when PCIe link down happens.
1208	 */
1209	if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
1210		/* set modem to UNINIT
1211		 * (in case we want to reload the AP driver without resetting
1212		 * the modem)
1213		 */
1214		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
1215				  IPC_MEM_DEVICE_IPC_UNINIT);
1216		ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1217
1218		/* Wait for maximum 30ms to allow the Modem to uninitialize the
1219		 * protocol.
1220		 */
1221		while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
1222		       (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
1223		       (timeout > 0)) {
1224			usleep_range(1000, 1250);
1225			timeout--;
1226			ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1227		}
1228	}
1229}
1230
1231void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
1232{
1233	ipc_imem->phase = IPC_P_OFF_REQ;
1234
1235	/* forward MDM_NOT_READY to listeners */
1236	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
1237
1238	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1239	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1240	hrtimer_cancel(&ipc_imem->fast_update_timer);
1241	hrtimer_cancel(&ipc_imem->startup_timer);
1242
1243	/* cancel the workqueue */
1244	cancel_work_sync(&ipc_imem->run_state_worker);
1245
1246	if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
1247		ipc_mux_deinit(ipc_imem->mux);
1248		ipc_debugfs_deinit(ipc_imem);
1249		ipc_wwan_deinit(ipc_imem->wwan);
1250		ipc_port_deinit(ipc_imem->ipc_port);
1251	}
1252
1253	if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
1254		ipc_devlink_deinit(ipc_imem->ipc_devlink);
1255
1256	ipc_imem_device_ipc_uninit(ipc_imem);
1257	ipc_imem_channel_reset(ipc_imem);
1258
1259	ipc_protocol_deinit(ipc_imem->ipc_protocol);
1260	ipc_task_deinit(ipc_imem->ipc_task);
1261
1262	kfree(ipc_imem->ipc_task);
1263	kfree(ipc_imem->mmio);
1264
1265	ipc_imem->phase = IPC_P_OFF;
1266}
1267
1268/* After CP has unblocked the PCIe link, save the start address of the doorbell
1269 * scratchpad and prepare the shared memory region. If the flashing to RAM
1270 * procedure shall be executed, copy the chip information from the doorbell
1271 * scratchtpad to the application buffer and wake up the flash app.
1272 */
1273static int ipc_imem_config(struct iosm_imem *ipc_imem)
1274{
1275	enum ipc_phase phase;
1276
1277	/* Initialize the semaphore for the blocking read UL/DL transfer. */
1278	init_completion(&ipc_imem->ul_pend_sem);
1279
1280	init_completion(&ipc_imem->dl_pend_sem);
1281
1282	/* clear internal flags */
1283	ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
1284	ipc_imem->enter_runtime = 0;
1285
1286	phase = ipc_imem_phase_update(ipc_imem);
1287
1288	/* Either CP shall be in the power off or power on phase. */
1289	switch (phase) {
1290	case IPC_P_ROM:
1291		ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
1292		/* poll execution stage (for delayed start, e.g. NAND) */
1293		if (!hrtimer_active(&ipc_imem->startup_timer))
1294			hrtimer_start(&ipc_imem->startup_timer,
1295				      ipc_imem->hrtimer_period,
1296				      HRTIMER_MODE_REL);
1297		return 0;
1298
1299	case IPC_P_PSI:
1300	case IPC_P_EBL:
1301	case IPC_P_RUN:
1302		/* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
1303		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
1304
1305		/* Verify the exepected initial state. */
1306		if (ipc_imem->ipc_requested_state ==
1307		    ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
1308			ipc_imem_ipc_init_check(ipc_imem);
1309
1310			return 0;
1311		}
1312		dev_err(ipc_imem->dev,
1313			"ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
1314			ipc_mmio_get_ipc_state(ipc_imem->mmio));
1315		break;
1316	case IPC_P_CRASH:
1317	case IPC_P_CD_READY:
1318		dev_dbg(ipc_imem->dev,
1319			"Modem is in phase %d, reset Modem to collect CD",
1320			phase);
1321		return 0;
1322	default:
1323		dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
1324		break;
1325	}
1326
1327	complete(&ipc_imem->dl_pend_sem);
1328	complete(&ipc_imem->ul_pend_sem);
1329	ipc_imem->phase = IPC_P_OFF;
1330	return -EIO;
1331}
1332
1333/* Pass the dev ptr to the shared memory driver and request the entry points */
1334struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
1335				void __iomem *mmio, struct device *dev)
1336{
1337	struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
1338	enum ipc_mem_exec_stage stage;
1339
1340	if (!ipc_imem)
1341		return NULL;
1342
1343	/* Save the device address. */
1344	ipc_imem->pcie = pcie;
1345	ipc_imem->dev = dev;
1346
1347	ipc_imem->pci_device_id = device_id;
1348
 
1349	ipc_imem->cp_version = 0;
1350	ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
1351
 
 
 
1352	/* Reset the max number of configured channels */
1353	ipc_imem->nr_of_channels = 0;
1354
1355	/* allocate IPC MMIO */
1356	ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
1357	if (!ipc_imem->mmio) {
1358		dev_err(ipc_imem->dev, "failed to initialize mmio region");
1359		goto mmio_init_fail;
1360	}
1361
1362	ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
1363				     GFP_KERNEL);
1364
1365	/* Create tasklet for event handling*/
1366	if (!ipc_imem->ipc_task)
1367		goto ipc_task_fail;
1368
1369	if (ipc_task_init(ipc_imem->ipc_task))
1370		goto ipc_task_init_fail;
1371
1372	ipc_imem->ipc_task->dev = ipc_imem->dev;
1373
1374	INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
1375
1376	ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
1377
1378	if (!ipc_imem->ipc_protocol)
1379		goto protocol_init_fail;
1380
1381	/* The phase is set to power off. */
1382	ipc_imem->phase = IPC_P_OFF;
1383
1384	hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
1385		     HRTIMER_MODE_REL);
1386	ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
1387
1388	hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
1389		     HRTIMER_MODE_REL);
1390	ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
1391
1392	hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
1393		     HRTIMER_MODE_REL);
1394	ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
1395
1396	hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
1397		     HRTIMER_MODE_REL);
1398	ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
1399
1400	hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1401	ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
1402
1403	if (ipc_imem_config(ipc_imem)) {
1404		dev_err(ipc_imem->dev, "failed to initialize the imem");
1405		goto imem_config_fail;
1406	}
1407
1408	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1409	if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
1410		/* Alloc and Register devlink */
1411		ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
1412		if (!ipc_imem->ipc_devlink) {
1413			dev_err(ipc_imem->dev, "Devlink register failed");
1414			goto imem_config_fail;
1415		}
1416
1417		if (ipc_flash_link_establish(ipc_imem))
1418			goto devlink_channel_fail;
1419
1420		set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
1421	}
1422	return ipc_imem;
1423devlink_channel_fail:
1424	ipc_devlink_deinit(ipc_imem->ipc_devlink);
1425imem_config_fail:
1426	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1427	hrtimer_cancel(&ipc_imem->fast_update_timer);
1428	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1429	hrtimer_cancel(&ipc_imem->startup_timer);
1430protocol_init_fail:
1431	cancel_work_sync(&ipc_imem->run_state_worker);
1432	ipc_task_deinit(ipc_imem->ipc_task);
1433ipc_task_init_fail:
1434	kfree(ipc_imem->ipc_task);
1435ipc_task_fail:
1436	kfree(ipc_imem->mmio);
1437mmio_init_fail:
1438	kfree(ipc_imem);
1439	return NULL;
1440}
1441
1442void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
1443{
1444	/* Debounce IPC_EV_IRQ. */
1445	if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
1446		ipc_imem->ev_irq_pending[irq] = true;
1447		ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
1448					 NULL, 0, false);
1449	}
1450}
1451
1452void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
1453{
1454	ipc_imem->td_update_timer_suspended = suspend;
1455}
1456
1457/* Verify the CP execution state, copy the chip info,
1458 * change the execution phase to ROM
1459 */
1460static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
1461						 int arg, void *msg,
1462						 size_t msgsize)
1463{
1464	enum ipc_mem_exec_stage stage;
1465	struct sk_buff *skb;
1466	int rc = -EINVAL;
1467	size_t size;
1468
1469	/* Test the CP execution state. */
1470	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1471	if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
1472		dev_err(ipc_imem->dev,
1473			"Execution_stage: expected BOOT, received = %X", stage);
1474		goto trigger_chip_info_fail;
1475	}
1476	/* Allocate a new sk buf for the chip info. */
1477	size = ipc_imem->mmio->chip_info_size;
1478	if (size > IOSM_CHIP_INFO_SIZE_MAX)
1479		goto trigger_chip_info_fail;
1480
1481	skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
1482	if (!skb) {
1483		dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
1484		rc = -ENOMEM;
1485		goto trigger_chip_info_fail;
1486	}
1487	/* Copy the chip info characters into the ipc_skb. */
1488	ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
1489	/* First change to the ROM boot phase. */
1490	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
1491	ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
1492	ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
1493	rc = 0;
1494trigger_chip_info_fail:
1495	return rc;
1496}
1497
1498int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
1499{
1500	return ipc_task_queue_send_task(ipc_imem,
1501					ipc_imem_devlink_trigger_chip_info_cb,
1502					0, NULL, 0, true);
1503}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2020-21 Intel Corporation.
   4 */
   5
   6#include <linux/delay.h>
   7
   8#include "iosm_ipc_chnl_cfg.h"
 
 
   9#include "iosm_ipc_imem.h"
  10#include "iosm_ipc_port.h"
 
 
  11
  12/* Check the wwan ips if it is valid with Channel as input. */
  13static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
  14{
  15	if (chnl)
  16		return chnl->ctype == IPC_CTYPE_WWAN &&
  17		       chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
  18	return false;
  19}
  20
  21static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
  22{
  23	union ipc_msg_prep_args prep_args = {
  24		.sleep.target = 1,
  25		.sleep.state = state,
  26	};
  27
  28	ipc_imem->device_sleep = state;
  29
  30	return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
  31					IPC_MSG_PREP_SLEEP, &prep_args, NULL);
  32}
  33
  34static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
  35				  struct ipc_pipe *pipe)
  36{
  37	/* limit max. nr of entries */
  38	if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
  39		return false;
  40
  41	return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
  42}
  43
  44/* This timer handler will retry DL buff allocation if a pipe has no free buf
  45 * and gives doorbell if TD is available
  46 */
  47static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
  48				      void *msg, size_t size)
  49{
  50	bool new_buffers_available = false;
  51	bool retry_allocation = false;
  52	int i;
  53
  54	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
  55		struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
  56
  57		if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
  58			continue;
  59
  60		while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
  61			new_buffers_available = true;
  62
  63		if (pipe->nr_of_queued_entries == 0)
  64			retry_allocation = true;
  65	}
  66
  67	if (new_buffers_available)
  68		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
  69					      IPC_HP_DL_PROCESS);
  70
  71	if (retry_allocation) {
  72		ipc_imem->hrtimer_period =
  73		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
  74		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
  75			hrtimer_start(&ipc_imem->td_alloc_timer,
  76				      ipc_imem->hrtimer_period,
  77				      HRTIMER_MODE_REL);
  78	}
  79	return 0;
  80}
  81
  82static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
  83{
  84	struct iosm_imem *ipc_imem =
  85		container_of(hr_timer, struct iosm_imem, td_alloc_timer);
  86	/* Post an async tasklet event to trigger HP update Doorbell */
  87	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
  88				 0, false);
  89	return HRTIMER_NORESTART;
  90}
  91
  92/* Fast update timer tasklet handler to trigger HP update */
  93static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
  94					    void *msg, size_t size)
  95{
  96	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
  97				      IPC_HP_FAST_TD_UPD_TMR);
  98
  99	return 0;
 100}
 101
 102static enum hrtimer_restart
 103ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
 104{
 105	struct iosm_imem *ipc_imem =
 106		container_of(hr_timer, struct iosm_imem, fast_update_timer);
 107	/* Post an async tasklet event to trigger HP update Doorbell */
 108	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
 109				 NULL, 0, false);
 110	return HRTIMER_NORESTART;
 111}
 112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 113static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
 114					  struct ipc_mux_config *cfg)
 115{
 116	ipc_mmio_update_cp_capability(ipc_imem->mmio);
 117
 118	if (!ipc_imem->mmio->has_mux_lite) {
 119		dev_err(ipc_imem->dev, "Failed to get Mux capability.");
 120		return -EINVAL;
 121	}
 122
 123	cfg->protocol = MUX_LITE;
 124
 125	cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
 126			       MUX_UL_ON_CREDITS :
 127			       MUX_UL;
 128
 129	/* The instance ID is same as channel ID because this is been reused
 130	 * for channel alloc function.
 131	 */
 132	cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
 133	cfg->nr_sessions = IPC_MEM_MUX_IP_SESSION_ENTRIES;
 134
 135	return 0;
 136}
 137
 138void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
 139				   unsigned int reset_enable, bool atomic_ctx)
 140{
 141	union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
 142						      reset_enable };
 143
 144	if (atomic_ctx)
 145		ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
 146					 IPC_MSG_PREP_FEATURE_SET, &prep_args,
 147					 NULL);
 148	else
 149		ipc_protocol_msg_send(ipc_imem->ipc_protocol,
 150				      IPC_MSG_PREP_FEATURE_SET, &prep_args);
 151}
 152
 
 
 
 
 153void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
 154{
 155	/* Use the TD update timer only in the runtime phase */
 156	if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
 157		/* trigger the doorbell irq on CP directly. */
 158		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
 159					      IPC_HP_TD_UPD_TMR_START);
 160		return;
 161	}
 162
 163	if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
 164		ipc_imem->hrtimer_period =
 165		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
 166		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
 167			hrtimer_start(&ipc_imem->tdupdate_timer,
 168				      ipc_imem->hrtimer_period,
 169				      HRTIMER_MODE_REL);
 170	}
 171}
 172
 173void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
 174{
 175	if (hrtimer_active(hr_timer))
 176		hrtimer_cancel(hr_timer);
 177}
 178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 179bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
 180{
 181	struct ipc_mem_channel *channel;
 
 182	struct sk_buff_head *ul_list;
 183	bool hpda_pending = false;
 184	bool forced_hpdu = false;
 185	struct ipc_pipe *pipe;
 186	int i;
 187
 188	/* Analyze the uplink pipe of all active channels. */
 189	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
 190		channel = &ipc_imem->channels[i];
 191
 192		if (channel->state != IMEM_CHANNEL_ACTIVE)
 193			continue;
 194
 195		pipe = &channel->ul_pipe;
 196
 197		/* Get the reference to the skbuf accumulator list. */
 198		ul_list = &channel->ul_list;
 199
 200		/* Fill the transfer descriptor with the uplink buffer info. */
 201		hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
 
 
 202							pipe, ul_list);
 203
 204		/* forced HP update needed for non data channels */
 205		if (hpda_pending && !ipc_imem_check_wwan_ips(channel))
 206			forced_hpdu = true;
 
 207	}
 208
 209	if (forced_hpdu) {
 
 210		hpda_pending = false;
 211		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
 212					      IPC_HP_UL_WRITE_TD);
 213	}
 214
 215	return hpda_pending;
 216}
 217
 218void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
 219{
 220	int timeout = IPC_MODEM_BOOT_TIMEOUT;
 221
 222	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
 223
 224	/* Trigger the CP interrupt to enter the init state. */
 225	ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
 226			  IPC_MEM_DEVICE_IPC_INIT);
 227	/* Wait for the CP update. */
 228	do {
 229		if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
 230		    ipc_imem->ipc_requested_state) {
 231			/* Prepare the MMIO space */
 232			ipc_mmio_config(ipc_imem->mmio);
 233
 234			/* Trigger the CP irq to enter the running state. */
 235			ipc_imem->ipc_requested_state =
 236				IPC_MEM_DEVICE_IPC_RUNNING;
 237			ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
 238					  IPC_MEM_DEVICE_IPC_RUNNING);
 239
 240			return;
 241		}
 242		msleep(20);
 243	} while (--timeout);
 244
 245	/* timeout */
 246	dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
 247		ipc_imem_phase_get_string(ipc_imem->phase),
 248		ipc_mmio_get_ipc_state(ipc_imem->mmio));
 249
 250	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
 251}
 252
 253/* Analyze the packet type and distribute it. */
 254static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
 255				    struct ipc_pipe *pipe, struct sk_buff *skb)
 256{
 257	u16 port_id;
 258
 259	if (!skb)
 260		return;
 261
 262	/* An AT/control or IP packet is expected. */
 263	switch (pipe->channel->ctype) {
 264	case IPC_CTYPE_CTRL:
 265		port_id = pipe->channel->channel_id;
 266
 267		/* Pass the packet to the wwan layer. */
 268		wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port, skb);
 
 
 
 
 
 
 
 
 269		break;
 270
 271	case IPC_CTYPE_WWAN:
 272		if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
 273			ipc_mux_dl_decode(ipc_imem->mux, skb);
 274		break;
 275	default:
 276		dev_err(ipc_imem->dev, "Invalid channel type");
 277		break;
 278	}
 279}
 280
 281/* Process the downlink data and pass them to the char or net layer. */
 282static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
 283				     struct ipc_pipe *pipe)
 284{
 285	s32 cnt = 0, processed_td_cnt = 0;
 286	struct ipc_mem_channel *channel;
 287	u32 head = 0, tail = 0;
 288	bool processed = false;
 289	struct sk_buff *skb;
 290
 291	channel = pipe->channel;
 292
 293	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
 294					 &tail);
 295	if (pipe->old_tail != tail) {
 296		if (pipe->old_tail < tail)
 297			cnt = tail - pipe->old_tail;
 298		else
 299			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
 300	}
 301
 302	processed_td_cnt = cnt;
 303
 304	/* Seek for pipes with pending DL data. */
 305	while (cnt--) {
 306		skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
 307
 308		/* Analyze the packet type and distribute it. */
 309		ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
 310	}
 311
 312	/* try to allocate new empty DL SKbs from head..tail - 1*/
 313	while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
 314		processed = true;
 315
 316	if (processed && !ipc_imem_check_wwan_ips(channel)) {
 317		/* Force HP update for non IP channels */
 318		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
 319					      IPC_HP_DL_PROCESS);
 320		processed = false;
 321
 322		/* If Fast Update timer is already running then stop */
 323		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
 324	}
 325
 326	/* Any control channel process will get immediate HP update.
 327	 * Start Fast update timer only for IP channel if all the TDs were
 328	 * used in last process.
 329	 */
 330	if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
 331		ipc_imem->hrtimer_period =
 332		ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
 333		hrtimer_start(&ipc_imem->fast_update_timer,
 334			      ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
 335	}
 336
 337	if (ipc_imem->app_notify_dl_pend)
 338		complete(&ipc_imem->dl_pend_sem);
 339}
 340
 341/* process open uplink pipe */
 342static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
 343				     struct ipc_pipe *pipe)
 344{
 345	struct ipc_mem_channel *channel;
 346	u32 tail = 0, head = 0;
 347	struct sk_buff *skb;
 348	s32 cnt = 0;
 349
 350	channel = pipe->channel;
 351
 352	/* Get the internal phase. */
 353	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
 354					 &tail);
 355
 356	if (pipe->old_tail != tail) {
 357		if (pipe->old_tail < tail)
 358			cnt = tail - pipe->old_tail;
 359		else
 360			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
 361	}
 362
 363	/* Free UL buffers. */
 364	while (cnt--) {
 365		skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
 366
 367		if (!skb)
 368			continue;
 369
 370		/* If the user app was suspended in uplink direction - blocking
 371		 * write, resume it.
 372		 */
 373		if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
 374			complete(&channel->ul_sem);
 375
 376		/* Free the skbuf element. */
 377		if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
 378			if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
 379				ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
 380			else
 381				dev_err(ipc_imem->dev,
 382					"OP Type is UL_MUX, unknown if_id %d",
 383					channel->if_id);
 384		} else {
 385			ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
 386		}
 387	}
 388
 389	/* Trace channel stats for IP UL pipe. */
 390	if (ipc_imem_check_wwan_ips(pipe->channel))
 391		ipc_mux_check_n_restart_tx(ipc_imem->mux);
 392
 393	if (ipc_imem->app_notify_ul_pend)
 394		complete(&ipc_imem->ul_pend_sem);
 395}
 396
 397/* Executes the irq. */
 398static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
 399{
 400	struct ipc_mem_channel *channel;
 401
 402	if (ipc_imem->flash_channel_id < 0) {
 403		ipc_imem->rom_exit_code = IMEM_ROM_EXIT_FAIL;
 404		dev_err(ipc_imem->dev, "Missing flash app:%d",
 405			ipc_imem->flash_channel_id);
 406		return;
 407	}
 408
 409	ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
 410
 411	/* Wake up the flash app to continue or to terminate depending
 412	 * on the CP ROM exit code.
 413	 */
 414	channel = &ipc_imem->channels[ipc_imem->flash_channel_id];
 415	complete(&channel->ul_sem);
 416}
 417
 418/* Execute the UL bundle timer actions, generating the doorbell irq. */
 419static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
 420					  void *msg, size_t size)
 421{
 422	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
 423				      IPC_HP_TD_UPD_TMR);
 424	return 0;
 425}
 426
 427/* Consider link power management in the runtime phase. */
 428static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
 429{
 430	    /* link will go down, Test pending UL packets.*/
 431	if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
 432	    hrtimer_active(&ipc_imem->tdupdate_timer)) {
 433		/* Generate the doorbell irq. */
 434		ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
 435		/* Stop the TD update timer. */
 436		ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
 437		/* Stop the fast update timer. */
 438		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
 439	}
 440}
 441
 442/* Execute startup timer and wait for delayed start (e.g. NAND) */
 443static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
 444					void *msg, size_t size)
 445{
 446	/* Update & check the current operation phase. */
 447	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
 448		return -EIO;
 449
 450	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
 451	    IPC_MEM_DEVICE_IPC_UNINIT) {
 452		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
 453
 454		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
 455				  IPC_MEM_DEVICE_IPC_INIT);
 456
 457		ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
 458		/* reduce period to 100 ms to check for mmio init state */
 459		if (!hrtimer_active(&ipc_imem->startup_timer))
 460			hrtimer_start(&ipc_imem->startup_timer,
 461				      ipc_imem->hrtimer_period,
 462				      HRTIMER_MODE_REL);
 463	} else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
 464		   IPC_MEM_DEVICE_IPC_INIT) {
 465		/* Startup complete  - disable timer */
 466		ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
 467
 468		/* Prepare the MMIO space */
 469		ipc_mmio_config(ipc_imem->mmio);
 470		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
 471		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
 472				  IPC_MEM_DEVICE_IPC_RUNNING);
 473	}
 474
 475	return 0;
 476}
 477
 478static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
 479{
 480	enum hrtimer_restart result = HRTIMER_NORESTART;
 481	struct iosm_imem *ipc_imem =
 482		container_of(hr_timer, struct iosm_imem, startup_timer);
 483
 484	if (ktime_to_ns(ipc_imem->hrtimer_period)) {
 485		hrtimer_forward(&ipc_imem->startup_timer, ktime_get(),
 486				ipc_imem->hrtimer_period);
 487		result = HRTIMER_RESTART;
 488	}
 489
 490	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
 491				 NULL, 0, false);
 492	return result;
 493}
 494
 495/* Get the CP execution stage */
 496static enum ipc_mem_exec_stage
 497ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
 498{
 499	return (ipc_imem->phase == IPC_P_RUN &&
 500		ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
 501		       ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
 502		       ipc_mmio_get_exec_stage(ipc_imem->mmio);
 503}
 504
 505/* Callback to send the modem ready uevent */
 506static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
 507				    void *msg, size_t size)
 508{
 509	enum ipc_mem_exec_stage exec_stage =
 510		ipc_imem_get_exec_stage_buffered(ipc_imem);
 511
 512	if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
 513		ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
 514
 515	return 0;
 516}
 517
 518/* This function is executed in a task context via an ipc_worker object,
 519 * as the creation or removal of device can't be done from tasklet.
 520 */
 521static void ipc_imem_run_state_worker(struct work_struct *instance)
 522{
 523	struct ipc_chnl_cfg chnl_cfg_port = { 0 };
 524	struct ipc_mux_config mux_cfg;
 525	struct iosm_imem *ipc_imem;
 526	u8 ctrl_chl_idx = 0;
 
 527
 528	ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
 529
 530	if (ipc_imem->phase != IPC_P_RUN) {
 531		dev_err(ipc_imem->dev,
 532			"Modem link down. Exit run state worker.");
 533		return;
 534	}
 535
 536	if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
 537		ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
 
 
 
 
 
 
 
 
 
 
 
 
 538
 539	ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
 540	if (ipc_imem->mux)
 541		ipc_imem->mux->wwan = ipc_imem->wwan;
 542
 543	while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
 544		if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
 545			ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 546			if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
 547				ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
 548						      chnl_cfg_port,
 549						      IRQ_MOD_OFF);
 550				ipc_imem->ipc_port[ctrl_chl_idx] =
 551					ipc_port_init(ipc_imem, chnl_cfg_port);
 552			}
 553		}
 554		ctrl_chl_idx++;
 555	}
 556
 
 
 557	ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
 558				 false);
 559
 560	/* Complete all memory stores before setting bit */
 561	smp_mb__before_atomic();
 562
 563	set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
 564
 565	/* Complete all memory stores after setting bit */
 566	smp_mb__after_atomic();
 
 
 
 
 
 
 
 567}
 568
 569static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
 570{
 571	enum ipc_mem_device_ipc_state curr_ipc_status;
 572	enum ipc_phase old_phase, phase;
 573	bool retry_allocation = false;
 574	bool ul_pending = false;
 575	int ch_id, i;
 576
 577	if (irq != IMEM_IRQ_DONT_CARE)
 578		ipc_imem->ev_irq_pending[irq] = false;
 579
 580	/* Get the internal phase. */
 581	old_phase = ipc_imem->phase;
 582
 583	if (old_phase == IPC_P_OFF_REQ) {
 584		dev_dbg(ipc_imem->dev,
 585			"[%s]: Ignoring MSI. Deinit sequence in progress!",
 586			ipc_imem_phase_get_string(old_phase));
 587		return;
 588	}
 589
 590	/* Update the phase controlled by CP. */
 591	phase = ipc_imem_phase_update(ipc_imem);
 592
 593	switch (phase) {
 594	case IPC_P_RUN:
 595		if (!ipc_imem->enter_runtime) {
 596			/* Excute the transition from flash/boot to runtime. */
 597			ipc_imem->enter_runtime = 1;
 598
 599			/* allow device to sleep, default value is
 600			 * IPC_HOST_SLEEP_ENTER_SLEEP
 601			 */
 602			ipc_imem_msg_send_device_sleep(ipc_imem,
 603						       ipc_imem->device_sleep);
 604
 605			ipc_imem_msg_send_feature_set(ipc_imem,
 606						      IPC_MEM_INBAND_CRASH_SIG,
 607						  true);
 608		}
 609
 610		curr_ipc_status =
 611			ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
 612
 613		/* check ipc_status change */
 614		if (ipc_imem->ipc_status != curr_ipc_status) {
 615			ipc_imem->ipc_status = curr_ipc_status;
 616
 617			if (ipc_imem->ipc_status ==
 618			    IPC_MEM_DEVICE_IPC_RUNNING) {
 619				schedule_work(&ipc_imem->run_state_worker);
 620			}
 621		}
 622
 623		/* Consider power management in the runtime phase. */
 624		ipc_imem_slp_control_exec(ipc_imem);
 625		break; /* Continue with skbuf processing. */
 626
 627		/* Unexpected phases. */
 628	case IPC_P_OFF:
 629	case IPC_P_OFF_REQ:
 630		dev_err(ipc_imem->dev, "confused phase %s",
 631			ipc_imem_phase_get_string(phase));
 632		return;
 633
 634	case IPC_P_PSI:
 635		if (old_phase != IPC_P_ROM)
 636			break;
 637
 638		fallthrough;
 639		/* On CP the PSI phase is already active. */
 640
 641	case IPC_P_ROM:
 642		/* Before CP ROM driver starts the PSI image, it sets
 643		 * the exit_code field on the doorbell scratchpad and
 644		 * triggers the irq.
 645		 */
 646		ipc_imem_rom_irq_exec(ipc_imem);
 647		return;
 648
 649	default:
 650		break;
 651	}
 652
 653	/* process message ring */
 654	ipc_protocol_msg_process(ipc_imem, irq);
 655
 656	/* process all open pipes */
 657	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
 658		struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
 659		struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
 660
 661		if (dl_pipe->is_open &&
 662		    (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
 663			ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
 664
 665			if (dl_pipe->nr_of_queued_entries == 0)
 666				retry_allocation = true;
 667		}
 668
 669		if (ul_pipe->is_open)
 670			ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
 671	}
 672
 673	/* Try to generate new ADB or ADGH. */
 674	if (ipc_mux_ul_data_encode(ipc_imem->mux))
 675		ipc_imem_td_update_timer_start(ipc_imem);
 
 
 
 676
 677	/* Continue the send procedure with accumulated SIO or NETIF packets.
 678	 * Reset the debounce flags.
 679	 */
 680	ul_pending |= ipc_imem_ul_write_td(ipc_imem);
 681
 682	/* if UL data is pending restart TD update timer */
 683	if (ul_pending) {
 684		ipc_imem->hrtimer_period =
 685		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
 686		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
 687			hrtimer_start(&ipc_imem->tdupdate_timer,
 688				      ipc_imem->hrtimer_period,
 689				      HRTIMER_MODE_REL);
 690	}
 691
 692	/* If CP has executed the transition
 693	 * from IPC_INIT to IPC_RUNNING in the PSI
 694	 * phase, wake up the flash app to open the pipes.
 695	 */
 696	if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
 697	    ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
 698	    ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
 699		    IPC_MEM_DEVICE_IPC_RUNNING &&
 700	    ipc_imem->flash_channel_id >= 0) {
 701		/* Wake up the flash app to open the pipes. */
 702		ch_id = ipc_imem->flash_channel_id;
 703		complete(&ipc_imem->channels[ch_id].ul_sem);
 704	}
 705
 706	/* Reset the expected CP state. */
 707	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
 708
 709	if (retry_allocation) {
 710		ipc_imem->hrtimer_period =
 711		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
 712		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
 713			hrtimer_start(&ipc_imem->td_alloc_timer,
 714				      ipc_imem->hrtimer_period,
 715				      HRTIMER_MODE_REL);
 716	}
 717}
 718
 719/* Callback by tasklet for handling interrupt events. */
 720static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
 721			      size_t size)
 722{
 723	ipc_imem_handle_irq(ipc_imem, arg);
 724
 725	return 0;
 726}
 727
 728void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
 729{
 730	/* start doorbell irq delay timer if UL is pending */
 731	if (ipc_imem_ul_write_td(ipc_imem))
 732		ipc_imem_td_update_timer_start(ipc_imem);
 733}
 734
 735/* Check the execution stage and update the AP phase */
 736static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
 737						  enum ipc_mem_exec_stage stage)
 738{
 739	switch (stage) {
 740	case IPC_MEM_EXEC_STAGE_BOOT:
 741		if (ipc_imem->phase != IPC_P_ROM) {
 742			/* Send this event only once */
 743			ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
 744		}
 745
 746		ipc_imem->phase = IPC_P_ROM;
 747		break;
 748
 749	case IPC_MEM_EXEC_STAGE_PSI:
 750		ipc_imem->phase = IPC_P_PSI;
 751		break;
 752
 753	case IPC_MEM_EXEC_STAGE_EBL:
 754		ipc_imem->phase = IPC_P_EBL;
 755		break;
 756
 757	case IPC_MEM_EXEC_STAGE_RUN:
 758		if (ipc_imem->phase != IPC_P_RUN &&
 759		    ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
 760			ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
 761		}
 762		ipc_imem->phase = IPC_P_RUN;
 763		break;
 764
 765	case IPC_MEM_EXEC_STAGE_CRASH:
 766		if (ipc_imem->phase != IPC_P_CRASH)
 767			ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
 768
 769		ipc_imem->phase = IPC_P_CRASH;
 770		break;
 771
 772	case IPC_MEM_EXEC_STAGE_CD_READY:
 773		if (ipc_imem->phase != IPC_P_CD_READY)
 774			ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
 775		ipc_imem->phase = IPC_P_CD_READY;
 776		break;
 777
 778	default:
 779		/* unknown exec stage:
 780		 * assume that link is down and send info to listeners
 781		 */
 782		ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
 783		break;
 784	}
 785
 786	return ipc_imem->phase;
 787}
 788
 789/* Send msg to device to open pipe */
 790static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
 791			       struct ipc_pipe *pipe)
 792{
 793	union ipc_msg_prep_args prep_args = {
 794		.pipe_open.pipe = pipe,
 795	};
 796
 797	if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
 798				  IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
 799		pipe->is_open = true;
 800
 801	return pipe->is_open;
 802}
 803
 804/* Allocates the TDs for the given pipe along with firing HP update DB. */
 805static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
 806				     void *msg, size_t size)
 807{
 808	struct ipc_pipe *dl_pipe = msg;
 809	bool processed = false;
 810	int i;
 811
 812	for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
 813		processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
 814
 815	/* Trigger the doorbell irq to inform CP that new downlink buffers are
 816	 * available.
 817	 */
 818	if (processed)
 819		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
 820
 821	return 0;
 822}
 823
 824static enum hrtimer_restart
 825ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
 826{
 827	struct iosm_imem *ipc_imem =
 828		container_of(hr_timer, struct iosm_imem, tdupdate_timer);
 829
 830	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
 831				 NULL, 0, false);
 832	return HRTIMER_NORESTART;
 833}
 834
 835/* Get the CP execution state and map it to the AP phase. */
 836enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
 837{
 838	enum ipc_mem_exec_stage exec_stage =
 839				ipc_imem_get_exec_stage_buffered(ipc_imem);
 840	/* If the CP stage is undef, return the internal precalculated phase. */
 841	return ipc_imem->phase == IPC_P_OFF_REQ ?
 842		       ipc_imem->phase :
 843		       ipc_imem_phase_update_check(ipc_imem, exec_stage);
 844}
 845
 846const char *ipc_imem_phase_get_string(enum ipc_phase phase)
 847{
 848	switch (phase) {
 849	case IPC_P_RUN:
 850		return "A-RUN";
 851
 852	case IPC_P_OFF:
 853		return "A-OFF";
 854
 855	case IPC_P_ROM:
 856		return "A-ROM";
 857
 858	case IPC_P_PSI:
 859		return "A-PSI";
 860
 861	case IPC_P_EBL:
 862		return "A-EBL";
 863
 864	case IPC_P_CRASH:
 865		return "A-CRASH";
 866
 867	case IPC_P_CD_READY:
 868		return "A-CD_READY";
 869
 870	case IPC_P_OFF_REQ:
 871		return "A-OFF_REQ";
 872
 873	default:
 874		return "A-???";
 875	}
 876}
 877
 878void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
 879{
 880	union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
 881
 882	pipe->is_open = false;
 883	ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
 884			      &prep_args);
 885
 886	ipc_imem_pipe_cleanup(ipc_imem, pipe);
 887}
 888
 889void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
 890{
 891	struct ipc_mem_channel *channel;
 892
 893	if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
 894		dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
 895		return;
 896	}
 897
 898	channel = &ipc_imem->channels[channel_id];
 899
 900	if (channel->state == IMEM_CHANNEL_FREE) {
 901		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
 902			channel_id, channel->state);
 903		return;
 904	}
 905
 906	/* Free only the channel id in the CP power off mode. */
 907	if (channel->state == IMEM_CHANNEL_RESERVED)
 908		/* Release only the channel id. */
 909		goto channel_free;
 910
 911	if (ipc_imem->phase == IPC_P_RUN) {
 912		ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
 913		ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
 914	}
 915
 916	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
 917	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
 918
 919channel_free:
 920	ipc_imem_channel_free(channel);
 921}
 922
 923struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
 924					      int channel_id, u32 db_id)
 925{
 926	struct ipc_mem_channel *channel;
 927
 928	if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
 929		dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
 930		return NULL;
 931	}
 932
 933	channel = &ipc_imem->channels[channel_id];
 934
 935	channel->state = IMEM_CHANNEL_ACTIVE;
 936
 937	if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
 938		goto ul_pipe_err;
 939
 940	if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
 941		goto dl_pipe_err;
 942
 943	/* Allocate the downlink buffers in tasklet context. */
 944	if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
 945				     &channel->dl_pipe, 0, false)) {
 946		dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
 947		goto task_failed;
 948	}
 949
 950	/* Active channel. */
 951	return channel;
 952task_failed:
 953	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
 954dl_pipe_err:
 955	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
 956ul_pipe_err:
 957	ipc_imem_channel_free(channel);
 958	return NULL;
 959}
 960
 961void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
 962{
 963	ipc_protocol_suspend(ipc_imem->ipc_protocol);
 964}
 965
 966void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
 967{
 968	ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
 969}
 970
 971void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
 972{
 973	enum ipc_mem_exec_stage stage;
 974
 975	if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
 976		stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
 977		ipc_imem_phase_update_check(ipc_imem, stage);
 978	}
 979}
 980
 981void ipc_imem_channel_free(struct ipc_mem_channel *channel)
 982{
 983	/* Reset dynamic channel elements. */
 984	channel->state = IMEM_CHANNEL_FREE;
 985}
 986
 987int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
 988			   enum ipc_ctype ctype)
 989{
 990	struct ipc_mem_channel *channel;
 991	int i;
 992
 993	/* Find channel of given type/index */
 994	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
 995		channel = &ipc_imem->channels[i];
 996		if (channel->ctype == ctype && channel->index == index)
 997			break;
 998	}
 999
1000	if (i >= ipc_imem->nr_of_channels) {
1001		dev_dbg(ipc_imem->dev,
1002			"no channel definition for index=%d ctype=%d", index,
1003			ctype);
1004		return -ECHRNG;
1005	}
1006
1007	if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
1008		dev_dbg(ipc_imem->dev, "channel is in use");
1009		return -EBUSY;
1010	}
1011
1012	if (channel->ctype == IPC_CTYPE_WWAN &&
1013	    index == IPC_MEM_MUX_IP_CH_IF_ID)
1014		channel->if_id = index;
1015
1016	channel->channel_id = index;
1017	channel->state = IMEM_CHANNEL_RESERVED;
1018
1019	return i;
1020}
1021
1022void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
1023			   struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1024{
1025	struct ipc_mem_channel *channel;
1026
1027	if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
1028	    chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
1029		dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
1030			chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
1031		return;
1032	}
1033
1034	if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
1035		dev_err(ipc_imem->dev, "too many channels");
1036		return;
1037	}
1038
1039	channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
1040	channel->channel_id = ipc_imem->nr_of_channels;
1041	channel->ctype = ctype;
1042	channel->index = chnl_cfg.id;
1043	channel->net_err_count = 0;
1044	channel->state = IMEM_CHANNEL_FREE;
1045	ipc_imem->nr_of_channels++;
1046
1047	ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
1048				IRQ_MOD_OFF);
1049
1050	skb_queue_head_init(&channel->ul_list);
1051
1052	init_completion(&channel->ul_sem);
1053}
1054
1055void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
1056			     struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1057{
1058	struct ipc_mem_channel *channel;
1059
1060	if (id < 0 || id >= ipc_imem->nr_of_channels) {
1061		dev_err(ipc_imem->dev, "invalid channel id %d", id);
1062		return;
1063	}
1064
1065	channel = &ipc_imem->channels[id];
1066
1067	if (channel->state != IMEM_CHANNEL_FREE &&
1068	    channel->state != IMEM_CHANNEL_RESERVED) {
1069		dev_err(ipc_imem->dev, "invalid channel state %d",
1070			channel->state);
1071		return;
1072	}
1073
1074	channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
1075	channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
1076	channel->ul_pipe.is_open = false;
1077	channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
1078	channel->ul_pipe.channel = channel;
1079	channel->ul_pipe.dir = IPC_MEM_DIR_UL;
1080	channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1081	channel->ul_pipe.irq_moderation = irq_moderation;
1082	channel->ul_pipe.buf_size = 0;
1083
1084	channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
1085	channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
1086	channel->dl_pipe.is_open = false;
1087	channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
1088	channel->dl_pipe.channel = channel;
1089	channel->dl_pipe.dir = IPC_MEM_DIR_DL;
1090	channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1091	channel->dl_pipe.irq_moderation = irq_moderation;
1092	channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
1093}
1094
1095static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
1096{
1097	int i;
1098
1099	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1100		struct ipc_mem_channel *channel;
1101
1102		channel = &ipc_imem->channels[i];
1103
1104		ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
1105		ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
1106
1107		ipc_imem_channel_free(channel);
1108	}
1109}
1110
1111void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
1112{
1113	struct sk_buff *skb;
1114
1115	/* Force pipe to closed state also when not explicitly closed through
1116	 * ipc_imem_pipe_close()
1117	 */
1118	pipe->is_open = false;
1119
1120	/* Empty the uplink skb accumulator. */
1121	while ((skb = skb_dequeue(&pipe->channel->ul_list)))
1122		ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
1123
1124	ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
1125}
1126
1127/* Send IPC protocol uninit to the modem when Link is active. */
1128static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
1129{
1130	int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
1131	enum ipc_mem_device_ipc_state ipc_state;
1132
1133	/* When PCIe link is up set IPC_UNINIT
1134	 * of the modem otherwise ignore it when PCIe link down happens.
1135	 */
1136	if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
1137		/* set modem to UNINIT
1138		 * (in case we want to reload the AP driver without resetting
1139		 * the modem)
1140		 */
1141		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
1142				  IPC_MEM_DEVICE_IPC_UNINIT);
1143		ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1144
1145		/* Wait for maximum 30ms to allow the Modem to uninitialize the
1146		 * protocol.
1147		 */
1148		while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
1149		       (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
1150		       (timeout > 0)) {
1151			usleep_range(1000, 1250);
1152			timeout--;
1153			ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1154		}
1155	}
1156}
1157
1158void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
1159{
1160	ipc_imem->phase = IPC_P_OFF_REQ;
1161
1162	/* forward MDM_NOT_READY to listeners */
1163	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
1164
1165	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1166	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1167	hrtimer_cancel(&ipc_imem->fast_update_timer);
1168	hrtimer_cancel(&ipc_imem->startup_timer);
1169
1170	/* cancel the workqueue */
1171	cancel_work_sync(&ipc_imem->run_state_worker);
1172
1173	if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
1174		ipc_mux_deinit(ipc_imem->mux);
 
1175		ipc_wwan_deinit(ipc_imem->wwan);
1176		ipc_port_deinit(ipc_imem->ipc_port);
1177	}
1178
 
 
 
1179	ipc_imem_device_ipc_uninit(ipc_imem);
1180	ipc_imem_channel_reset(ipc_imem);
1181
1182	ipc_protocol_deinit(ipc_imem->ipc_protocol);
1183	ipc_task_deinit(ipc_imem->ipc_task);
1184
1185	kfree(ipc_imem->ipc_task);
1186	kfree(ipc_imem->mmio);
1187
1188	ipc_imem->phase = IPC_P_OFF;
1189}
1190
1191/* After CP has unblocked the PCIe link, save the start address of the doorbell
1192 * scratchpad and prepare the shared memory region. If the flashing to RAM
1193 * procedure shall be executed, copy the chip information from the doorbell
1194 * scratchtpad to the application buffer and wake up the flash app.
1195 */
1196static int ipc_imem_config(struct iosm_imem *ipc_imem)
1197{
1198	enum ipc_phase phase;
1199
1200	/* Initialize the semaphore for the blocking read UL/DL transfer. */
1201	init_completion(&ipc_imem->ul_pend_sem);
1202
1203	init_completion(&ipc_imem->dl_pend_sem);
1204
1205	/* clear internal flags */
1206	ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
1207	ipc_imem->enter_runtime = 0;
1208
1209	phase = ipc_imem_phase_update(ipc_imem);
1210
1211	/* Either CP shall be in the power off or power on phase. */
1212	switch (phase) {
1213	case IPC_P_ROM:
1214		ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
1215		/* poll execution stage (for delayed start, e.g. NAND) */
1216		if (!hrtimer_active(&ipc_imem->startup_timer))
1217			hrtimer_start(&ipc_imem->startup_timer,
1218				      ipc_imem->hrtimer_period,
1219				      HRTIMER_MODE_REL);
1220		return 0;
1221
1222	case IPC_P_PSI:
1223	case IPC_P_EBL:
1224	case IPC_P_RUN:
1225		/* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
1226		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
1227
1228		/* Verify the exepected initial state. */
1229		if (ipc_imem->ipc_requested_state ==
1230		    ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
1231			ipc_imem_ipc_init_check(ipc_imem);
1232
1233			return 0;
1234		}
1235		dev_err(ipc_imem->dev,
1236			"ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
1237			ipc_mmio_get_ipc_state(ipc_imem->mmio));
1238		break;
1239	case IPC_P_CRASH:
1240	case IPC_P_CD_READY:
1241		dev_dbg(ipc_imem->dev,
1242			"Modem is in phase %d, reset Modem to collect CD",
1243			phase);
1244		return 0;
1245	default:
1246		dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
1247		break;
1248	}
1249
1250	complete(&ipc_imem->dl_pend_sem);
1251	complete(&ipc_imem->ul_pend_sem);
1252	ipc_imem->phase = IPC_P_OFF;
1253	return -EIO;
1254}
1255
1256/* Pass the dev ptr to the shared memory driver and request the entry points */
1257struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
1258				void __iomem *mmio, struct device *dev)
1259{
1260	struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
 
1261
1262	if (!ipc_imem)
1263		return NULL;
1264
1265	/* Save the device address. */
1266	ipc_imem->pcie = pcie;
1267	ipc_imem->dev = dev;
1268
1269	ipc_imem->pci_device_id = device_id;
1270
1271	ipc_imem->ev_cdev_write_pending = false;
1272	ipc_imem->cp_version = 0;
1273	ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
1274
1275	/* Reset the flash channel id. */
1276	ipc_imem->flash_channel_id = -1;
1277
1278	/* Reset the max number of configured channels */
1279	ipc_imem->nr_of_channels = 0;
1280
1281	/* allocate IPC MMIO */
1282	ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
1283	if (!ipc_imem->mmio) {
1284		dev_err(ipc_imem->dev, "failed to initialize mmio region");
1285		goto mmio_init_fail;
1286	}
1287
1288	ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
1289				     GFP_KERNEL);
1290
1291	/* Create tasklet for event handling*/
1292	if (!ipc_imem->ipc_task)
1293		goto ipc_task_fail;
1294
1295	if (ipc_task_init(ipc_imem->ipc_task))
1296		goto ipc_task_init_fail;
1297
1298	ipc_imem->ipc_task->dev = ipc_imem->dev;
1299
1300	INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
1301
1302	ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
1303
1304	if (!ipc_imem->ipc_protocol)
1305		goto protocol_init_fail;
1306
1307	/* The phase is set to power off. */
1308	ipc_imem->phase = IPC_P_OFF;
1309
1310	hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
1311		     HRTIMER_MODE_REL);
1312	ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
1313
1314	hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
1315		     HRTIMER_MODE_REL);
1316	ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
1317
1318	hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
1319		     HRTIMER_MODE_REL);
1320	ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
1321
1322	hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
1323		     HRTIMER_MODE_REL);
1324	ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
1325
 
 
 
1326	if (ipc_imem_config(ipc_imem)) {
1327		dev_err(ipc_imem->dev, "failed to initialize the imem");
1328		goto imem_config_fail;
1329	}
1330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1331	return ipc_imem;
1332
 
1333imem_config_fail:
1334	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1335	hrtimer_cancel(&ipc_imem->fast_update_timer);
1336	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1337	hrtimer_cancel(&ipc_imem->startup_timer);
1338protocol_init_fail:
1339	cancel_work_sync(&ipc_imem->run_state_worker);
1340	ipc_task_deinit(ipc_imem->ipc_task);
1341ipc_task_init_fail:
1342	kfree(ipc_imem->ipc_task);
1343ipc_task_fail:
1344	kfree(ipc_imem->mmio);
1345mmio_init_fail:
1346	kfree(ipc_imem);
1347	return NULL;
1348}
1349
1350void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
1351{
1352	/* Debounce IPC_EV_IRQ. */
1353	if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
1354		ipc_imem->ev_irq_pending[irq] = true;
1355		ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
1356					 NULL, 0, false);
1357	}
1358}
1359
1360void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
1361{
1362	ipc_imem->td_update_timer_suspended = suspend;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1363}