Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2020-21 Intel Corporation.
   4 */
   5
   6#include <linux/nospec.h>
   7
   8#include "iosm_ipc_imem_ops.h"
   9#include "iosm_ipc_mux_codec.h"
  10#include "iosm_ipc_task_queue.h"
  11
  12/* Test the link power state and send a MUX command in blocking mode. */
  13static int ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
  14			       size_t size)
  15{
  16	struct iosm_mux *ipc_mux = ipc_imem->mux;
  17	const struct mux_acb *acb = msg;
  18
  19	skb_queue_tail(&ipc_mux->channel->ul_list, acb->skb);
  20	ipc_imem_ul_send(ipc_mux->imem);
  21
  22	return 0;
  23}
  24
  25static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
  26{
  27	struct completion *completion = &ipc_mux->channel->ul_sem;
  28	int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
  29					   0, &ipc_mux->acb,
  30					   sizeof(ipc_mux->acb), false);
  31	if (ret) {
  32		dev_err(ipc_mux->dev, "unable to send mux command");
  33		return ret;
  34	}
  35
  36	/* if blocking, suspend the app and wait for irq in the flash or
  37	 * crash phase. return false on timeout to indicate failure.
  38	 */
  39	if (blocking) {
  40		u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
  41
  42		reinit_completion(completion);
  43
  44		if (wait_for_completion_interruptible_timeout
  45		   (completion, msecs_to_jiffies(wait_time_milliseconds)) ==
  46		   0) {
  47			dev_err(ipc_mux->dev, "ch[%d] timeout",
  48				ipc_mux->channel_id);
  49			ipc_uevent_send(ipc_mux->imem->dev, UEVENT_MDM_TIMEOUT);
  50			return -ETIMEDOUT;
  51		}
  52	}
  53
  54	return 0;
  55}
  56
  57/* Initialize the command header. */
  58static void ipc_mux_acb_init(struct iosm_mux *ipc_mux)
  59{
  60	struct mux_acb *acb = &ipc_mux->acb;
  61	struct mux_acbh *header;
  62
  63	header = (struct mux_acbh *)(acb->skb)->data;
  64	header->block_length = cpu_to_le32(sizeof(struct mux_acbh));
  65	header->first_cmd_index = header->block_length;
  66	header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH);
  67	header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++);
  68}
  69
  70/* Add a command to the ACB. */
  71static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd,
  72					    void *param, u32 param_size)
  73{
  74	struct mux_acbh *header;
  75	struct mux_cmdh *cmdh;
  76	struct mux_acb *acb;
  77
  78	acb = &ipc_mux->acb;
  79	header = (struct mux_acbh *)(acb->skb)->data;
  80	cmdh = (struct mux_cmdh *)
  81		((acb->skb)->data + le32_to_cpu(header->block_length));
  82
  83	cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
  84	cmdh->command_type = cpu_to_le32(cmd);
  85	cmdh->if_id = acb->if_id;
  86
  87	acb->cmd = cmd;
  88	cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) +
  89				    param_size);
  90	cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
  91	if (param)
  92		memcpy(&cmdh->param, param, param_size);
  93
  94	skb_put(acb->skb, le32_to_cpu(header->block_length) +
  95					le16_to_cpu(cmdh->cmd_len));
  96
  97	return cmdh;
  98}
  99
 100/* Prepare mux Command */
 101static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
 102						  u32 cmd, struct mux_acb *acb,
 103						  void *param, u32 param_size)
 104{
 105	struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)acb->skb->data;
 106
 107	cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
 108	cmdh->command_type = cpu_to_le32(cmd);
 109	cmdh->if_id = acb->if_id;
 110
 111	acb->cmd = cmd;
 112
 113	cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_lite_cmdh, param) +
 114				    param_size);
 115	cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
 116
 117	if (param)
 118		memcpy(&cmdh->param, param, param_size);
 119
 120	skb_put(acb->skb, le16_to_cpu(cmdh->cmd_len));
 121
 122	return cmdh;
 123}
 124
 125static int ipc_mux_acb_alloc(struct iosm_mux *ipc_mux)
 126{
 127	struct mux_acb *acb = &ipc_mux->acb;
 128	struct sk_buff *skb;
 129	dma_addr_t mapping;
 130
 131	/* Allocate skb memory for the uplink buffer. */
 132	skb = ipc_pcie_alloc_skb(ipc_mux->pcie, MUX_MAX_UL_ACB_BUF_SIZE,
 133				 GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
 134	if (!skb)
 135		return -ENOMEM;
 136
 137	/* Save the skb address. */
 138	acb->skb = skb;
 139
 140	memset(skb->data, 0, MUX_MAX_UL_ACB_BUF_SIZE);
 141
 142	return 0;
 143}
 144
 145int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
 146			     u32 transaction_id, union mux_cmd_param *param,
 147			     size_t res_size, bool blocking, bool respond)
 148{
 149	struct mux_acb *acb = &ipc_mux->acb;
 150	union mux_type_cmdh cmdh;
 151	int ret = 0;
 152
 153	acb->if_id = if_id;
 154	ret = ipc_mux_acb_alloc(ipc_mux);
 155	if (ret)
 156		return ret;
 157
 158	if (ipc_mux->protocol == MUX_LITE) {
 159		cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb,
 160						     param, res_size);
 161
 162		if (respond)
 163			cmdh.ack_lite->transaction_id =
 164					cpu_to_le32(transaction_id);
 165	} else {
 166		/* Initialize the ACB header. */
 167		ipc_mux_acb_init(ipc_mux);
 168		cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param,
 169						    res_size);
 170
 171		if (respond)
 172			cmdh.ack_aggr->transaction_id =
 173					cpu_to_le32(transaction_id);
 174	}
 175	ret = ipc_mux_acb_send(ipc_mux, blocking);
 176
 177	return ret;
 178}
 179
 180void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
 181{
 182	/* Inform the network interface to start/stop flow ctrl */
 183	ipc_wwan_tx_flowctrl(session->wwan, idx, on);
 184}
 185
 186static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
 187					      union mux_cmd_param param,
 188					      __le32 command_type, u8 if_id,
 189					      __le32 transaction_id)
 190{
 191	struct mux_acb *acb = &ipc_mux->acb;
 192
 193	switch (le32_to_cpu(command_type)) {
 194	case MUX_CMD_OPEN_SESSION_RESP:
 195	case MUX_CMD_CLOSE_SESSION_RESP:
 196		/* Resume the control application. */
 197		acb->got_param = param;
 198		break;
 199
 200	case MUX_LITE_CMD_FLOW_CTL_ACK:
 201		/* This command type is not expected as response for
 202		 * Aggregation version of the protocol. So return non-zero.
 203		 */
 204		if (ipc_mux->protocol != MUX_LITE)
 205			return -EINVAL;
 206
 207		dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received",
 208			if_id, le32_to_cpu(transaction_id));
 209		break;
 210
 211	case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK:
 212		/* This command type is not expected as response for
 213		 * Lite version of the protocol. So return non-zero.
 214		 */
 215		if (ipc_mux->protocol == MUX_LITE)
 216			return -EINVAL;
 217		break;
 218
 219	default:
 220		return -EINVAL;
 221	}
 222
 223	acb->wanted_response = MUX_CMD_INVALID;
 224	acb->got_response = le32_to_cpu(command_type);
 225	complete(&ipc_mux->channel->ul_sem);
 226
 227	return 0;
 228}
 229
 230static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux,
 231					  union mux_cmd_param *param,
 232					  __le32 command_type, u8 if_id,
 233					  __le16 cmd_len, int size)
 234{
 235	struct mux_session *session;
 236	struct hrtimer *adb_timer;
 237
 238	dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
 239		if_id, le32_to_cpu(command_type));
 240
 241	switch (le32_to_cpu(command_type)) {
 242	case MUX_LITE_CMD_FLOW_CTL:
 243	case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE:
 244
 245		if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
 246			dev_err(ipc_mux->dev, "if_id [%d] not valid",
 247				if_id);
 248			return -EINVAL; /* No session interface id. */
 249		}
 250
 251		session = &ipc_mux->session[if_id];
 252		adb_timer = &ipc_mux->imem->adb_timer;
 253
 254		if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
 255			/* Backward Compatibility */
 256			if (cmd_len == cpu_to_le16(size))
 257				session->flow_ctl_mask =
 258					le32_to_cpu(param->flow_ctl.mask);
 259			else
 260				session->flow_ctl_mask = ~0;
 261			/* if CP asks for FLOW CTRL Enable
 262			 * then set our internal flow control Tx flag
 263			 * to limit uplink session queueing
 264			 */
 265			session->net_tx_stop = true;
 266
 267			/* We have to call Finish ADB here.
 268			 * Otherwise any already queued data
 269			 * will be sent to CP when ADB is full
 270			 * for some other sessions.
 271			 */
 272			if (ipc_mux->protocol == MUX_AGGREGATION) {
 273				ipc_mux_ul_adb_finish(ipc_mux);
 274				ipc_imem_hrtimer_stop(adb_timer);
 275			}
 276			/* Update the stats */
 277			session->flow_ctl_en_cnt++;
 278		} else if (param->flow_ctl.mask == 0) {
 279			/* Just reset the Flow control mask and let
 280			 * mux_flow_ctrl_low_thre_b take control on
 281			 * our internal Tx flag and enabling kernel
 282			 * flow control
 283			 */
 284			dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
 285				if_id, le32_to_cpu(param->flow_ctl.mask));
 286			/* Backward Compatibility */
 287			if (cmd_len == cpu_to_le16(size))
 288				session->flow_ctl_mask =
 289					le32_to_cpu(param->flow_ctl.mask);
 290			else
 291				session->flow_ctl_mask = 0;
 292			/* Update the stats */
 293			session->flow_ctl_dis_cnt++;
 294		} else {
 295			break;
 296		}
 297
 298		ipc_mux->acc_adb_size = 0;
 299		ipc_mux->acc_payload_size = 0;
 300
 301		dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id,
 302			le32_to_cpu(param->flow_ctl.mask));
 303		break;
 304
 305	case MUX_LITE_CMD_LINK_STATUS_REPORT:
 306		break;
 307
 308	default:
 309		return -EINVAL;
 310	}
 311	return 0;
 312}
 313
 314/* Decode and Send appropriate response to a command block. */
 315static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
 316{
 317	struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
 318	__le32 trans_id = cmdh->transaction_id;
 319	int size;
 320
 321	if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
 322					       cmdh->command_type, cmdh->if_id,
 323					       cmdh->transaction_id)) {
 324		/* Unable to decode command response indicates the cmd_type
 325		 * may be a command instead of response. So try to decoding it.
 326		 */
 327		size = offsetof(struct mux_lite_cmdh, param) +
 328				sizeof(cmdh->param.flow_ctl);
 329		if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
 330						    cmdh->command_type,
 331						    cmdh->if_id,
 332						    cmdh->cmd_len, size)) {
 333			/* Decoded command may need a response. Give the
 334			 * response according to the command type.
 335			 */
 336			union mux_cmd_param *mux_cmd = NULL;
 337			size_t size = 0;
 338			u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
 339
 340			if (cmdh->command_type ==
 341			    cpu_to_le32(MUX_LITE_CMD_LINK_STATUS_REPORT)) {
 342				mux_cmd = &cmdh->param;
 343				mux_cmd->link_status_resp.response =
 344					cpu_to_le32(MUX_CMD_RESP_SUCCESS);
 345				/* response field is u32 */
 346				size = sizeof(u32);
 347			} else if (cmdh->command_type ==
 348				   cpu_to_le32(MUX_LITE_CMD_FLOW_CTL)) {
 349				cmd = MUX_LITE_CMD_FLOW_CTL_ACK;
 350			} else {
 351				return;
 352			}
 353
 354			if (ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
 355						     le32_to_cpu(trans_id),
 356						     mux_cmd, size, false,
 357						     true))
 358				dev_err(ipc_mux->dev,
 359					"if_id %d: cmd send failed",
 360					cmdh->if_id);
 361		}
 362	}
 363}
 364
 365/* Pass the DL packet to the netif layer. */
 366static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
 367			       struct iosm_wwan *wwan, u32 offset,
 368			       u8 service_class, struct sk_buff *skb,
 369			       u32 pkt_len)
 370{
 371	struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
 372
 373	if (!dest_skb)
 374		return -ENOMEM;
 375
 376	skb_pull(dest_skb, offset);
 377	skb_trim(dest_skb, pkt_len);
 378	/* Pass the packet to the netif layer. */
 379	dest_skb->priority = service_class;
 380
 381	return ipc_wwan_receive(wwan, dest_skb, false, if_id);
 382}
 383
 384/* Decode Flow Credit Table in the block */
 385static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
 386				   unsigned char *block)
 387{
 388	struct ipc_mem_lite_gen_tbl *fct = (struct ipc_mem_lite_gen_tbl *)block;
 389	struct iosm_wwan *wwan;
 390	int ul_credits;
 391	int if_id;
 392
 393	if (fct->vfl_length != sizeof(fct->vfl.nr_of_bytes)) {
 394		dev_err(ipc_mux->dev, "unexpected FCT length: %d",
 395			fct->vfl_length);
 396		return;
 397	}
 398
 399	if_id = fct->if_id;
 400	if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
 401		dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
 402		return;
 403	}
 404
 405	/* Is the session active ? */
 406	if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
 407	wwan = ipc_mux->session[if_id].wwan;
 408	if (!wwan) {
 409		dev_err(ipc_mux->dev, "session Net ID is NULL");
 410		return;
 411	}
 412
 413	ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
 414
 415	dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
 416		if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
 417
 418	/* Update the Flow Credit information from ADB */
 419	ipc_mux->session[if_id].ul_flow_credits += ul_credits;
 420
 421	/* Check whether the TX can be started */
 422	if (ipc_mux->session[if_id].ul_flow_credits > 0) {
 423		ipc_mux->session[if_id].net_tx_stop = false;
 424		ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
 425					  ipc_mux->session[if_id].if_id, false);
 426	}
 427}
 428
 429/* Decode non-aggregated datagram */
 430static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
 431				   struct sk_buff *skb)
 432{
 433	u32 pad_len, packet_offset, adgh_len;
 434	struct iosm_wwan *wwan;
 435	struct mux_adgh *adgh;
 436	u8 *block = skb->data;
 437	int rc = 0;
 438	u8 if_id;
 439
 440	adgh = (struct mux_adgh *)block;
 441
 442	if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) {
 443		dev_err(ipc_mux->dev, "invalid ADGH signature received");
 444		return;
 445	}
 446
 447	if_id = adgh->if_id;
 448	if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
 449		dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
 450		return;
 451	}
 452
 453	/* Is the session active ? */
 454	if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
 455	wwan = ipc_mux->session[if_id].wwan;
 456	if (!wwan) {
 457		dev_err(ipc_mux->dev, "session Net ID is NULL");
 458		return;
 459	}
 460
 461	/* Store the pad len for the corresponding session
 462	 * Pad bytes as negotiated in the open session less the header size
 463	 * (see session management chapter for details).
 464	 * If resulting padding is zero or less, the additional head padding is
 465	 * omitted. For e.g., if HEAD_PAD_LEN = 16 or less, this field is
 466	 * omitted if HEAD_PAD_LEN = 20, then this field will have 4 bytes
 467	 * set to zero
 468	 */
 469	pad_len =
 470		ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
 471	packet_offset = sizeof(*adgh) + pad_len;
 472
 473	if_id += ipc_mux->wwan_q_offset;
 474	adgh_len = le16_to_cpu(adgh->length);
 475
 476	/* Pass the packet to the netif layer */
 477	rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
 478				 adgh->service_class, skb,
 479				 adgh_len - packet_offset);
 480	if (rc) {
 481		dev_err(ipc_mux->dev, "mux adgh decoding error");
 482		return;
 483	}
 484	ipc_mux->session[if_id].flush = 1;
 485}
 486
 487static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux,
 488				     struct mux_cmdh *cmdh, int size)
 489{
 490	u32 link_st  = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP;
 491	u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE;
 492	u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE;
 493	u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK;
 494	union mux_cmd_param *cmd_p = NULL;
 495	u32 cmd = link_st;
 496	u32 trans_id;
 497
 498	if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
 499					    cmdh->command_type, cmdh->if_id,
 500					    cmdh->cmd_len, size)) {
 501		size = 0;
 502		if (cmdh->command_type == cpu_to_le32(link_st)) {
 503			cmd_p = &cmdh->param;
 504			cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS;
 505		} else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) ||
 506				(cmdh->command_type == cpu_to_le32(fctl_dis))) {
 507			cmd = fctl_ack;
 508		} else {
 509			return;
 510			}
 511		trans_id = le32_to_cpu(cmdh->transaction_id);
 512		ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
 513					 trans_id, cmd_p, size, false, true);
 514	}
 515}
 516
 517/* Decode an aggregated command block. */
 518static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
 519{
 520	struct mux_acbh *acbh;
 521	struct mux_cmdh *cmdh;
 522	u32 next_cmd_index;
 523	u8 *block;
 524	int size;
 525
 526	acbh = (struct mux_acbh *)(skb->data);
 527	block = (u8 *)(skb->data);
 528
 529	next_cmd_index = le32_to_cpu(acbh->first_cmd_index);
 530	next_cmd_index = array_index_nospec(next_cmd_index,
 531					    sizeof(struct mux_cmdh));
 532
 533	while (next_cmd_index != 0) {
 534		cmdh = (struct mux_cmdh *)&block[next_cmd_index];
 535		next_cmd_index = le32_to_cpu(cmdh->next_cmd_index);
 536		if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
 537						       cmdh->command_type,
 538						       cmdh->if_id,
 539						       cmdh->transaction_id)) {
 540			size = offsetof(struct mux_cmdh, param) +
 541				sizeof(cmdh->param.flow_ctl);
 542			ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size);
 543		}
 544	}
 545}
 546
 547/* process datagram */
 548static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
 549			     struct mux_adth_dg *dg, struct sk_buff *skb,
 550			     int if_id, int nr_of_dg)
 551{
 552	u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
 553	u32 packet_offset, i, rc, dg_len;
 554
 555	for (i = 0; i < nr_of_dg; i++, dg++) {
 556		if (le32_to_cpu(dg->datagram_index)
 557				< sizeof(struct mux_adbh))
 558			goto dg_error;
 559
 560		/* Is the packet inside of the ADB */
 561		if (le32_to_cpu(dg->datagram_index) >=
 562					le32_to_cpu(adbh->block_length)) {
 563			goto dg_error;
 564		} else {
 565			packet_offset =
 566				le32_to_cpu(dg->datagram_index) +
 567				dl_head_pad_len;
 568			dg_len = le16_to_cpu(dg->datagram_length);
 569			/* Pass the packet to the netif layer. */
 570			rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
 571						 packet_offset,
 572						 dg->service_class, skb,
 573						 dg_len - dl_head_pad_len);
 574			if (rc)
 575				goto dg_error;
 576		}
 577	}
 578	return 0;
 579dg_error:
 580	return -1;
 581}
 582
 583/* Decode an aggregated data block. */
 584static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
 585			      struct sk_buff *skb)
 586{
 587	struct mux_adth_dg *dg;
 588	struct iosm_wwan *wwan;
 589	struct mux_adbh *adbh;
 590	struct mux_adth *adth;
 591	int nr_of_dg, if_id;
 592	u32 adth_index;
 593	u8 *block;
 594
 595	block = skb->data;
 596	adbh = (struct mux_adbh *)block;
 597
 598	/* Process the aggregated datagram tables. */
 599	adth_index = le32_to_cpu(adbh->first_table_index);
 600
 601	/* Has CP sent an empty ADB ? */
 602	if (adth_index < 1) {
 603		dev_err(ipc_mux->dev, "unexpected empty ADB");
 604		goto adb_decode_err;
 605	}
 606
 607	/* Loop through mixed session tables. */
 608	while (adth_index) {
 609		/* Get the reference to the table header. */
 610		adth = (struct mux_adth *)(block + adth_index);
 611
 612		/* Get the interface id and map it to the netif id. */
 613		if_id = adth->if_id;
 614		if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
 615			goto adb_decode_err;
 616
 617		if_id = array_index_nospec(if_id,
 618					   IPC_MEM_MUX_IP_SESSION_ENTRIES);
 619
 620		/* Is the session active ? */
 621		wwan = ipc_mux->session[if_id].wwan;
 622		if (!wwan)
 623			goto adb_decode_err;
 624
 625		/* Consistency checks for aggregated datagram table. */
 626		if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
 627			goto adb_decode_err;
 628
 629		if (le16_to_cpu(adth->table_length) < sizeof(struct mux_adth))
 
 630			goto adb_decode_err;
 631
 632		/* Calculate the number of datagrams. */
 633		nr_of_dg = (le16_to_cpu(adth->table_length) -
 634					sizeof(struct mux_adth)) /
 
 635					sizeof(struct mux_adth_dg);
 636
 637		/* Is the datagram table empty ? */
 638		if (nr_of_dg < 1) {
 639			dev_err(ipc_mux->dev,
 640				"adthidx=%u,nr_of_dg=%d,next_tblidx=%u",
 641				adth_index, nr_of_dg,
 642				le32_to_cpu(adth->next_table_index));
 643
 644			/* Move to the next aggregated datagram table. */
 645			adth_index = le32_to_cpu(adth->next_table_index);
 646			continue;
 647		}
 648
 649		/* New aggregated datagram table. */
 650		dg = adth->dg;
 651		if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
 652				      nr_of_dg) < 0)
 653			goto adb_decode_err;
 654
 655		/* mark session for final flush */
 656		ipc_mux->session[if_id].flush = 1;
 657
 658		/* Move to the next aggregated datagram table. */
 659		adth_index = le32_to_cpu(adth->next_table_index);
 660	}
 661
 662adb_decode_err:
 663	return;
 664}
 665
 666/**
 667 * ipc_mux_dl_decode -  Route the DL packet through the IP MUX layer
 668 *                      depending on Header.
 669 * @ipc_mux:            Pointer to MUX data-struct
 670 * @skb:                Pointer to ipc_skb.
 671 */
 672void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
 673{
 674	u32 signature;
 675
 676	if (!skb->data)
 677		return;
 678
 679	/* Decode the MUX header type. */
 680	signature = le32_to_cpup((__le32 *)skb->data);
 681
 682	switch (signature) {
 683	case IOSM_AGGR_MUX_SIG_ADBH:	/* Aggregated Data Block Header */
 684		mux_dl_adb_decode(ipc_mux, skb);
 685		break;
 686	case IOSM_AGGR_MUX_SIG_ADGH:
 687		ipc_mux_dl_adgh_decode(ipc_mux, skb);
 688		break;
 689	case MUX_SIG_FCTH:
 690		ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
 691		break;
 692	case IOSM_AGGR_MUX_SIG_ACBH:	/* Aggregated Command Block Header */
 693		ipc_mux_dl_acb_decode(ipc_mux, skb);
 694		break;
 695	case MUX_SIG_CMDH:
 696		ipc_mux_dl_cmd_decode(ipc_mux, skb);
 697		break;
 698
 699	default:
 700		dev_err(ipc_mux->dev, "invalid ABH signature");
 701	}
 702
 703	ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
 704}
 705
 706static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
 707				struct mux_adb *ul_adb, u32 type)
 708{
 709	/* Take the first element of the free list. */
 710	struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
 711	u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
 712	u32 *next_tb_id;
 713	int qlt_size;
 714	u32 if_id;
 715
 716	if (!skb)
 717		return -EBUSY; /* Wait for a free ADB skb. */
 718
 719	/* Mark it as UL ADB to select the right free operation. */
 720	IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
 721
 722	switch (type) {
 723	case IOSM_AGGR_MUX_SIG_ADBH:
 724		/* Save the ADB memory settings. */
 725		ul_adb->dest_skb = skb;
 726		ul_adb->buf = skb->data;
 727		ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
 728
 729		/* reset statistic counter */
 730		ul_adb->if_cnt = 0;
 731		ul_adb->payload_size = 0;
 732		ul_adb->dg_cnt_total = 0;
 733
 734		/* Initialize the ADBH. */
 735		ul_adb->adbh = (struct mux_adbh *)ul_adb->buf;
 736		memset(ul_adb->adbh, 0, sizeof(struct mux_adbh));
 737		ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH);
 738		ul_adb->adbh->block_length =
 739					cpu_to_le32(sizeof(struct mux_adbh));
 740		next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index;
 741		ul_adb->next_table_index = next_tb_id;
 742
 743		/* Clear the local copy of DGs for new ADB */
 744		memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
 745
 746		/* Clear the DG count and QLT updated status for new ADB */
 747		for (if_id = 0; if_id < no_if; if_id++) {
 748			ul_adb->dg_count[if_id] = 0;
 749			ul_adb->qlt_updated[if_id] = 0;
 750		}
 751		break;
 752
 753	case IOSM_AGGR_MUX_SIG_ADGH:
 754		/* Save the ADB memory settings. */
 755		ul_adb->dest_skb = skb;
 756		ul_adb->buf = skb->data;
 757		ul_adb->size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
 758		/* reset statistic counter */
 759		ul_adb->if_cnt = 0;
 760		ul_adb->payload_size = 0;
 761		ul_adb->dg_cnt_total = 0;
 762
 763		ul_adb->adgh = (struct mux_adgh *)skb->data;
 764		memset(ul_adb->adgh, 0, sizeof(struct mux_adgh));
 765		break;
 766
 767	case MUX_SIG_QLTH:
 768		qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
 769			   (MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl));
 770
 771		if (qlt_size > IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE) {
 772			dev_err(ipc_mux->dev,
 773				"can't support. QLT size:%d SKB size: %d",
 774				qlt_size, IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE);
 775			return -ERANGE;
 776		}
 777
 778		ul_adb->qlth_skb = skb;
 779		memset((ul_adb->qlth_skb)->data, 0, qlt_size);
 780		skb_put(skb, qlt_size);
 781		break;
 782	}
 783
 784	return 0;
 785}
 786
 787static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
 788{
 789	struct mux_adb *ul_adb = &ipc_mux->ul_adb;
 790	u16 adgh_len;
 791	long long bytes;
 792	char *str;
 793
 794	if (!ul_adb->dest_skb) {
 795		dev_err(ipc_mux->dev, "no dest skb");
 796		return;
 797	}
 798
 799	adgh_len = le16_to_cpu(ul_adb->adgh->length);
 800	skb_put(ul_adb->dest_skb, adgh_len);
 801	skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
 802	ul_adb->dest_skb = NULL;
 803
 804	if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
 805		struct mux_session *session;
 806
 807		session = &ipc_mux->session[ul_adb->adgh->if_id];
 808		str = "available_credits";
 809		bytes = (long long)session->ul_flow_credits;
 810
 811	} else {
 812		str = "pend_bytes";
 813		bytes = ipc_mux->ul_data_pend_bytes;
 814		ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes +
 815					      adgh_len;
 816	}
 817
 818	dev_dbg(ipc_mux->dev, "UL ADGH: size=%u, if_id=%d, payload=%d, %s=%lld",
 819		adgh_len, ul_adb->adgh->if_id, ul_adb->payload_size,
 820		str, bytes);
 821}
 822
 823static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
 824				   struct mux_adb *ul_adb, int *out_offset)
 825{
 826	int i, qlt_size, offset = *out_offset;
 827	struct mux_qlth *p_adb_qlt;
 828	struct mux_adth_dg *dg;
 829	struct mux_adth *adth;
 830	u16 adth_dg_size;
 831	u32 *next_tb_id;
 832
 833	qlt_size = offsetof(struct mux_qlth, ql) +
 834			MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
 835
 836	for (i = 0; i < ipc_mux->nr_sessions; i++) {
 837		if (ul_adb->dg_count[i] > 0) {
 838			adth_dg_size = offsetof(struct mux_adth, dg) +
 839					ul_adb->dg_count[i] * sizeof(*dg);
 840
 841			*ul_adb->next_table_index = offset;
 842			adth = (struct mux_adth *)&ul_adb->buf[offset];
 843			next_tb_id = (unsigned int *)&adth->next_table_index;
 844			ul_adb->next_table_index = next_tb_id;
 845			offset += adth_dg_size;
 846			adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH);
 847			adth->if_id = i;
 848			adth->table_length = cpu_to_le16(adth_dg_size);
 849			adth_dg_size -= offsetof(struct mux_adth, dg);
 850			memcpy(adth->dg, ul_adb->dg[i], adth_dg_size);
 851			ul_adb->if_cnt++;
 852		}
 853
 854		if (ul_adb->qlt_updated[i]) {
 855			*ul_adb->next_table_index = offset;
 856			p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset];
 857			ul_adb->next_table_index =
 858				(u32 *)&p_adb_qlt->next_table_index;
 859			memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size);
 860			offset += qlt_size;
 861		}
 862	}
 863	*out_offset = offset;
 864}
 865
 866/**
 867 * ipc_mux_ul_adb_finish - Add the TD of the aggregated session packets to TDR.
 868 * @ipc_mux:               Pointer to MUX data-struct.
 869 */
 870void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux)
 871{
 872	bool ul_data_pend = false;
 873	struct mux_adb *ul_adb;
 874	unsigned long flags;
 875	int offset;
 876
 877	ul_adb = &ipc_mux->ul_adb;
 878	if (!ul_adb->dest_skb)
 879		return;
 880
 881	offset = *ul_adb->next_table_index;
 882	ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset);
 883	ul_adb->adbh->block_length = cpu_to_le32(offset);
 884
 885	if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) {
 886		ul_adb->dest_skb = NULL;
 887		return;
 888	}
 889
 890	*ul_adb->next_table_index = 0;
 891	ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++);
 892	skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length));
 893
 894	spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags);
 895	__skb_queue_tail(&ipc_mux->channel->ul_list,  ul_adb->dest_skb);
 896	spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags);
 897
 898	ul_adb->dest_skb = NULL;
 899	/* Updates the TDs with ul_list */
 900	ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem);
 901
 902	/* Delay the doorbell irq */
 903	if (ul_data_pend)
 904		ipc_imem_td_update_timer_start(ipc_mux->imem);
 905
 906	ipc_mux->acc_adb_size +=  le32_to_cpu(ul_adb->adbh->block_length);
 907	ipc_mux->acc_payload_size += ul_adb->payload_size;
 908	ipc_mux->ul_data_pend_bytes += ul_adb->payload_size;
 909}
 910
 911/* Allocates an ADB from the free list and initializes it with ADBH  */
 912static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
 913				    struct mux_adb *adb, int *size_needed,
 914				    u32 type)
 915{
 916	bool ret_val = false;
 917	int status;
 918
 919	if (!adb->dest_skb) {
 920		/* Allocate memory for the ADB including of the
 921		 * datagram table header.
 922		 */
 923		status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type);
 924		if (status)
 925			/* Is a pending ADB available ? */
 926			ret_val = true; /* None. */
 927
 928		/* Update size need to zero only for new ADB memory */
 929		*size_needed = 0;
 930	}
 931
 932	return ret_val;
 933}
 934
 935/* Informs the network stack to stop sending further packets for all opened
 936 * sessions
 937 */
 938static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
 939{
 940	struct mux_session *session;
 941	int idx;
 942
 943	for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
 944		session = &ipc_mux->session[idx];
 945
 946		if (!session->wwan)
 947			continue;
 948
 949		session->net_tx_stop = true;
 950	}
 951}
 952
 953/* Sends Queue Level Table of all opened sessions */
 954static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
 955{
 956	struct ipc_mem_lite_gen_tbl *qlt;
 957	struct mux_session *session;
 958	bool qlt_updated = false;
 959	int i;
 960	int qlt_size;
 961
 962	if (!ipc_mux->initialized || ipc_mux->state != MUX_S_ACTIVE)
 963		return qlt_updated;
 964
 965	qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
 966		   MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
 967
 968	for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
 969		session = &ipc_mux->session[i];
 970
 971		if (!session->wwan || session->flow_ctl_mask)
 972			continue;
 973
 974		if (ipc_mux_ul_skb_alloc(ipc_mux, &ipc_mux->ul_adb,
 975					 MUX_SIG_QLTH)) {
 976			dev_err(ipc_mux->dev,
 977				"no reserved mem to send QLT of if_id: %d", i);
 978			break;
 979		}
 980
 981		/* Prepare QLT */
 982		qlt = (struct ipc_mem_lite_gen_tbl *)(ipc_mux->ul_adb.qlth_skb)
 983			      ->data;
 984		qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
 985		qlt->length = cpu_to_le16(qlt_size);
 986		qlt->if_id = i;
 987		qlt->vfl_length = MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
 988		qlt->reserved[0] = 0;
 989		qlt->reserved[1] = 0;
 990
 991		qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
 992
 993		/* Add QLT to the transfer list. */
 994		skb_queue_tail(&ipc_mux->channel->ul_list,
 995			       ipc_mux->ul_adb.qlth_skb);
 996
 997		qlt_updated = true;
 998		ipc_mux->ul_adb.qlth_skb = NULL;
 999	}
1000
1001	if (qlt_updated)
1002		/* Updates the TDs with ul_list */
1003		(void)ipc_imem_ul_write_td(ipc_mux->imem);
1004
1005	return qlt_updated;
1006}
1007
1008/* Checks the available credits for the specified session and returns
1009 * number of packets for which credits are available.
1010 */
1011static int ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux,
1012					  struct mux_session *session,
1013					  struct sk_buff_head *ul_list,
1014					  int max_nr_of_pkts)
1015{
1016	int pkts_to_send = 0;
1017	struct sk_buff *skb;
1018	int credits = 0;
1019
1020	if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
1021		credits = session->ul_flow_credits;
1022		if (credits <= 0) {
1023			dev_dbg(ipc_mux->dev,
1024				"FC::if_id[%d] Insuff.Credits/Qlen:%d/%u",
1025				session->if_id, session->ul_flow_credits,
1026				session->ul_list.qlen); /* nr_of_bytes */
1027			return 0;
1028		}
1029	} else {
1030		credits = IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B -
1031			  ipc_mux->ul_data_pend_bytes;
1032		if (credits <= 0) {
1033			ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1034
1035			dev_dbg(ipc_mux->dev,
1036				"if_id[%d] encod. fail Bytes: %llu, thresh: %d",
1037				session->if_id, ipc_mux->ul_data_pend_bytes,
1038				IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B);
1039			return 0;
1040		}
1041	}
1042
1043	/* Check if there are enough credits/bytes available to send the
1044	 * requested max_nr_of_pkts. Otherwise restrict the nr_of_pkts
1045	 * depending on available credits.
1046	 */
1047	skb_queue_walk(ul_list, skb)
1048	{
1049		if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts))
1050			break;
1051		credits -= skb->len;
1052		pkts_to_send++;
1053	}
1054
1055	return pkts_to_send;
1056}
1057
1058/* Encode the UL IP packet according to Lite spec. */
1059static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
1060				  struct mux_session *session,
1061				  struct sk_buff_head *ul_list,
1062				  struct mux_adb *adb, int nr_of_pkts)
1063{
1064	int offset = sizeof(struct mux_adgh);
1065	int adb_updated = -EINVAL;
1066	struct sk_buff *src_skb;
1067	int aligned_size = 0;
1068	int nr_of_skb = 0;
1069	u32 pad_len = 0;
1070
1071	/* Re-calculate the number of packets depending on number of bytes to be
1072	 * processed/available credits.
1073	 */
1074	nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
1075						    nr_of_pkts);
1076
1077	/* If calculated nr_of_pkts from available credits is <= 0
1078	 * then nothing to do.
1079	 */
1080	if (nr_of_pkts <= 0)
1081		return 0;
1082
1083	/* Read configured UL head_pad_length for session.*/
1084	if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1085		pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1086
1087	/* Process all pending UL packets for this session
1088	 * depending on the allocated datagram table size.
1089	 */
1090	while (nr_of_pkts > 0) {
1091		/* get destination skb allocated */
1092		if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1093					    IOSM_AGGR_MUX_SIG_ADGH)) {
1094			dev_err(ipc_mux->dev, "no reserved memory for ADGH");
1095			return -ENOMEM;
1096		}
1097
1098		/* Peek at the head of the list. */
1099		src_skb = skb_peek(ul_list);
1100		if (!src_skb) {
1101			dev_err(ipc_mux->dev,
1102				"skb peek return NULL with count : %d",
1103				nr_of_pkts);
1104			break;
1105		}
1106
1107		/* Calculate the memory value. */
1108		aligned_size = ALIGN((pad_len + src_skb->len), 4);
1109
1110		ipc_mux->size_needed = sizeof(struct mux_adgh) + aligned_size;
1111
1112		if (ipc_mux->size_needed > adb->size) {
1113			dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
1114				ipc_mux->size_needed, adb->size);
1115			/* Return 1 if any IP packet is added to the transfer
1116			 * list.
1117			 */
1118			return nr_of_skb ? 1 : 0;
1119		}
1120
1121		/* Add buffer (without head padding to next pending transfer) */
1122		memcpy(adb->buf + offset + pad_len, src_skb->data,
1123		       src_skb->len);
1124
1125		adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH);
1126		adb->adgh->if_id = session_id;
1127		adb->adgh->length =
1128			cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
1129				    src_skb->len);
1130		adb->adgh->service_class = src_skb->priority;
1131		adb->adgh->next_count = --nr_of_pkts;
1132		adb->dg_cnt_total++;
1133		adb->payload_size += src_skb->len;
1134
1135		if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS)
1136			/* Decrement the credit value as we are processing the
1137			 * datagram from the UL list.
1138			 */
1139			session->ul_flow_credits -= src_skb->len;
1140
1141		/* Remove the processed elements and free it. */
1142		src_skb = skb_dequeue(ul_list);
1143		dev_kfree_skb(src_skb);
1144		nr_of_skb++;
1145
1146		ipc_mux_ul_adgh_finish(ipc_mux);
1147	}
1148
1149	if (nr_of_skb) {
1150		/* Send QLT info to modem if pending bytes > high watermark
1151		 * in case of mux lite
1152		 */
1153		if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
1154		    ipc_mux->ul_data_pend_bytes >=
1155			    IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
1156			adb_updated = ipc_mux_lite_send_qlt(ipc_mux);
1157		else
1158			adb_updated = 1;
1159
1160		/* Updates the TDs with ul_list */
1161		(void)ipc_imem_ul_write_td(ipc_mux->imem);
1162	}
1163
1164	return adb_updated;
1165}
1166
1167/**
1168 * ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB
1169 * @ipc_mux:            pointer to MUX instance data
1170 * @p_adb:              pointer to UL aggegated data block
1171 * @session_id:         session id
1172 * @qlth_n_ql_size:     Length (in bytes) of the datagram table
1173 * @ul_list:            pointer to skb buffer head
1174 */
1175void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
1176			      int session_id, int qlth_n_ql_size,
1177			      struct sk_buff_head *ul_list)
1178{
1179	int qlevel = ul_list->qlen;
1180	struct mux_qlth *p_qlt;
1181
1182	p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id];
1183
1184	/* Initialize QLTH if not been done */
1185	if (p_adb->qlt_updated[session_id] == 0) {
1186		p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
1187		p_qlt->if_id = session_id;
1188		p_qlt->table_length = cpu_to_le16(qlth_n_ql_size);
1189		p_qlt->reserved = 0;
1190		p_qlt->reserved2 = 0;
1191	}
1192
1193	/* Update Queue Level information always */
1194	p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel);
1195	p_adb->qlt_updated[session_id] = 1;
1196}
1197
1198/* Update the next table index. */
1199static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
1200				      int session_id,
1201				      struct sk_buff_head *ul_list,
1202				      struct mux_adth_dg *dg,
1203				      int aligned_size,
1204				      u32 qlth_n_ql_size,
1205				      struct mux_adb *adb,
1206				      struct sk_buff *src_skb)
1207{
1208	ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1209				 qlth_n_ql_size, ul_list);
1210	ipc_mux_ul_adb_finish(ipc_mux);
1211	if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1212				    IOSM_AGGR_MUX_SIG_ADBH))
1213		return -ENOMEM;
1214
1215	ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
1216
1217	ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1218	ipc_mux->size_needed += qlth_n_ql_size;
1219	ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1220	return 0;
1221}
1222
1223/* Process encode session UL data. */
1224static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb,
1225			    struct mux_adth_dg *dg,
1226			    struct sk_buff_head *ul_list,
1227			    struct sk_buff *src_skb, int session_id,
1228			    int pkt_to_send, u32 qlth_n_ql_size,
1229			    int *out_offset, int head_pad_len)
1230{
1231	int aligned_size;
1232	int offset = *out_offset;
1233	unsigned long flags;
1234	int nr_of_skb = 0;
1235
1236	while (pkt_to_send > 0) {
1237		/* Peek at the head of the list. */
1238		src_skb = skb_peek(ul_list);
1239		if (!src_skb) {
1240			dev_err(ipc_mux->dev,
1241				"skb peek return NULL with count : %d",
1242				pkt_to_send);
1243			return -1;
1244		}
1245		aligned_size = ALIGN((head_pad_len + src_skb->len), 4);
1246		ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1247
1248		if (ipc_mux->size_needed > adb->size ||
1249		    ((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >=
1250		      IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) {
1251			*adb->next_table_index = offset;
1252			if (mux_ul_dg_update_tbl_index(ipc_mux, session_id,
1253						       ul_list, dg,
1254						       aligned_size,
1255						       qlth_n_ql_size, adb,
1256						       src_skb) < 0)
1257				return -ENOMEM;
1258			nr_of_skb = 0;
1259			offset = le32_to_cpu(adb->adbh->block_length);
1260			/* Load pointer to next available datagram entry */
1261			dg = adb->dg[session_id] + adb->dg_count[session_id];
1262		}
1263		/* Add buffer without head padding to next pending transfer. */
1264		memcpy(adb->buf + offset + head_pad_len,
1265		       src_skb->data, src_skb->len);
1266		/* Setup datagram entry. */
1267		dg->datagram_index = cpu_to_le32(offset);
1268		dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len);
1269		dg->service_class = (((struct sk_buff *)src_skb)->priority);
1270		dg->reserved = 0;
1271		adb->dg_cnt_total++;
1272		adb->payload_size += le16_to_cpu(dg->datagram_length);
1273		dg++;
1274		adb->dg_count[session_id]++;
1275		offset += aligned_size;
1276		/* Remove the processed elements and free it. */
1277		spin_lock_irqsave(&ul_list->lock, flags);
1278		src_skb = __skb_dequeue(ul_list);
1279		spin_unlock_irqrestore(&ul_list->lock, flags);
1280
1281		dev_kfree_skb(src_skb);
1282		nr_of_skb++;
1283		pkt_to_send--;
1284	}
1285	*out_offset = offset;
1286	return nr_of_skb;
1287}
1288
1289/* Process encode session UL data to ADB. */
1290static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id,
1291			     struct mux_session *session,
1292			     struct sk_buff_head *ul_list, struct mux_adb *adb,
1293			     int pkt_to_send)
1294{
1295	int adb_updated = -EINVAL;
1296	int head_pad_len, offset;
1297	struct sk_buff *src_skb = NULL;
1298	struct mux_adth_dg *dg;
1299	u32 qlth_n_ql_size;
1300
1301	/* If any of the opened session has set Flow Control ON then limit the
1302	 * UL data to mux_flow_ctrl_high_thresh_b bytes
1303	 */
1304	if (ipc_mux->ul_data_pend_bytes >=
1305		IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
1306		ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1307		return adb_updated;
1308	}
1309
1310	qlth_n_ql_size = offsetof(struct mux_qlth, ql) +
1311			 MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
1312	head_pad_len = session->ul_head_pad_len;
1313
1314	if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1315		head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1316
1317	if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1318				    IOSM_AGGR_MUX_SIG_ADBH))
1319		return -ENOMEM;
1320
1321	offset = le32_to_cpu(adb->adbh->block_length);
1322
1323	if (ipc_mux->size_needed == 0)
1324		ipc_mux->size_needed = offset;
1325
1326	/* Calculate the size needed for ADTH, QLTH and QL*/
1327	if (adb->dg_count[session_id] == 0) {
1328		ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1329		ipc_mux->size_needed += qlth_n_ql_size;
1330	}
1331
1332	dg = adb->dg[session_id] + adb->dg_count[session_id];
1333
1334	if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb,
1335			     session_id, pkt_to_send, qlth_n_ql_size, &offset,
1336			     head_pad_len) > 0) {
1337		adb_updated = 1;
1338		*adb->next_table_index = offset;
1339		ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1340					 qlth_n_ql_size, ul_list);
1341		adb->adbh->block_length = cpu_to_le32(offset);
1342	}
1343
1344	return adb_updated;
1345}
1346
1347bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
1348{
1349	struct sk_buff_head *ul_list;
1350	struct mux_session *session;
1351	int updated = 0;
1352	int session_id;
1353	int dg_n;
1354	int i;
1355
1356	if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
1357	    ipc_mux->adb_prep_ongoing)
1358		return false;
1359
1360	ipc_mux->adb_prep_ongoing = true;
1361
1362	for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
1363		session_id = ipc_mux->rr_next_session;
1364		session = &ipc_mux->session[session_id];
1365
1366		/* Go to next handle rr_next_session overflow */
1367		ipc_mux->rr_next_session++;
1368		if (ipc_mux->rr_next_session >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
1369			ipc_mux->rr_next_session = 0;
1370
1371		if (!session->wwan || session->flow_ctl_mask ||
1372		    session->net_tx_stop)
1373			continue;
1374
1375		ul_list = &session->ul_list;
1376
1377		/* Is something pending in UL and flow ctrl off */
1378		dg_n = skb_queue_len(ul_list);
1379		if (dg_n > MUX_MAX_UL_DG_ENTRIES)
1380			dg_n = MUX_MAX_UL_DG_ENTRIES;
1381
1382		if (dg_n == 0)
1383			/* Nothing to do for ipc_mux session
1384			 * -> try next session id.
1385			 */
1386			continue;
1387		if (ipc_mux->protocol == MUX_LITE)
1388			updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
1389							 session, ul_list,
1390							 &ipc_mux->ul_adb,
1391							 dg_n);
1392		else
1393			updated = mux_ul_adb_encode(ipc_mux, session_id,
1394						    session, ul_list,
1395						    &ipc_mux->ul_adb,
1396						    dg_n);
1397	}
1398
1399	ipc_mux->adb_prep_ongoing = false;
1400	return updated == 1;
1401}
1402
1403/* Calculates the Payload from any given ADB. */
1404static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
1405					struct mux_adbh *p_adbh)
1406{
1407	struct mux_adth_dg *dg;
1408	struct mux_adth *adth;
1409	u32 payload_size = 0;
1410	u32 next_table_idx;
1411	int nr_of_dg, i;
1412
1413	/* Process the aggregated datagram tables. */
1414	next_table_idx = le32_to_cpu(p_adbh->first_table_index);
1415
1416	if (next_table_idx < sizeof(struct mux_adbh)) {
1417		dev_err(ipc_mux->dev, "unexpected empty ADB");
1418		return payload_size;
1419	}
1420
1421	while (next_table_idx != 0) {
1422		/* Get the reference to the table header. */
1423		adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx);
1424
1425		if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
1426			nr_of_dg = (le16_to_cpu(adth->table_length) -
1427					sizeof(struct mux_adth)) /
 
1428					sizeof(struct mux_adth_dg);
1429
1430			if (nr_of_dg <= 0)
1431				return payload_size;
1432
1433			dg = adth->dg;
1434
1435			for (i = 0; i < nr_of_dg; i++, dg++) {
1436				if (le32_to_cpu(dg->datagram_index) <
1437					sizeof(struct mux_adbh)) {
1438					return payload_size;
1439				}
1440				payload_size +=
1441					le16_to_cpu(dg->datagram_length);
1442			}
1443		}
1444		next_table_idx = le32_to_cpu(adth->next_table_index);
1445	}
1446
1447	return payload_size;
1448}
1449
1450void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
1451{
1452	union mux_type_header hr;
1453	u16 adgh_len;
1454	int payload;
1455
1456	if (ipc_mux->protocol == MUX_LITE) {
1457		hr.adgh = (struct mux_adgh *)skb->data;
1458		adgh_len = le16_to_cpu(hr.adgh->length);
1459		if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) &&
1460		    ipc_mux->ul_flow == MUX_UL)
1461			ipc_mux->ul_data_pend_bytes =
1462					ipc_mux->ul_data_pend_bytes - adgh_len;
1463	} else {
1464		hr.adbh = (struct mux_adbh *)(skb->data);
1465		payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh);
1466		ipc_mux->ul_data_pend_bytes -= payload;
1467	}
1468
1469	if (ipc_mux->ul_flow == MUX_UL)
1470		dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
1471			ipc_mux->ul_data_pend_bytes);
1472
1473	/* Reset the skb settings. */
1474	skb_trim(skb, 0);
1475
1476	/* Add the consumed ADB to the free list. */
1477	skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
1478}
1479
1480/* Start the NETIF uplink send transfer in MUX mode. */
1481static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
1482					void *msg, size_t size)
1483{
1484	struct iosm_mux *ipc_mux = ipc_imem->mux;
1485	bool ul_data_pend = false;
1486
1487	/* Add session UL data to a ADB and ADGH */
1488	ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
1489	if (ul_data_pend) {
1490		if (ipc_mux->protocol == MUX_AGGREGATION)
1491			ipc_imem_adb_timer_start(ipc_mux->imem);
1492
1493		/* Delay the doorbell irq */
1494		ipc_imem_td_update_timer_start(ipc_mux->imem);
1495	}
1496	/* reset the debounce flag */
1497	ipc_mux->ev_mux_net_transmit_pending = false;
1498
1499	return 0;
1500}
1501
1502int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
1503			      struct sk_buff *skb)
1504{
1505	struct mux_session *session = &ipc_mux->session[if_id];
1506	int ret = -EINVAL;
1507
1508	if (ipc_mux->channel &&
1509	    ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
1510		dev_err(ipc_mux->dev,
1511			"channel state is not IMEM_CHANNEL_ACTIVE");
1512		goto out;
1513	}
1514
1515	if (!session->wwan) {
1516		dev_err(ipc_mux->dev, "session net ID is NULL");
1517		ret = -EFAULT;
1518		goto out;
1519	}
1520
1521	/* Session is under flow control.
1522	 * Check if packet can be queued in session list, if not
1523	 * suspend net tx
1524	 */
1525	if (skb_queue_len(&session->ul_list) >=
1526	    (session->net_tx_stop ?
1527		     IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
1528		     (IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
1529		      IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
1530		ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
1531		ret = -EBUSY;
1532		goto out;
1533	}
1534
1535	/* Add skb to the uplink skb accumulator. */
1536	skb_queue_tail(&session->ul_list, skb);
1537
1538	/* Inform the IPC kthread to pass uplink IP packets to CP. */
1539	if (!ipc_mux->ev_mux_net_transmit_pending) {
1540		ipc_mux->ev_mux_net_transmit_pending = true;
1541		ret = ipc_task_queue_send_task(ipc_mux->imem,
1542					       ipc_mux_tq_ul_trigger_encode, 0,
1543					       NULL, 0, false);
1544		if (ret)
1545			goto out;
1546	}
1547	dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
1548		if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
1549		skb->len, skb->truesize, skb->priority);
1550	ret = 0;
1551out:
1552	return ret;
1553}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2020-21 Intel Corporation.
   4 */
   5
   6#include <linux/nospec.h>
   7
   8#include "iosm_ipc_imem_ops.h"
   9#include "iosm_ipc_mux_codec.h"
  10#include "iosm_ipc_task_queue.h"
  11
  12/* Test the link power state and send a MUX command in blocking mode. */
  13static int ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
  14			       size_t size)
  15{
  16	struct iosm_mux *ipc_mux = ipc_imem->mux;
  17	const struct mux_acb *acb = msg;
  18
  19	skb_queue_tail(&ipc_mux->channel->ul_list, acb->skb);
  20	ipc_imem_ul_send(ipc_mux->imem);
  21
  22	return 0;
  23}
  24
  25static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
  26{
  27	struct completion *completion = &ipc_mux->channel->ul_sem;
  28	int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
  29					   0, &ipc_mux->acb,
  30					   sizeof(ipc_mux->acb), false);
  31	if (ret) {
  32		dev_err(ipc_mux->dev, "unable to send mux command");
  33		return ret;
  34	}
  35
  36	/* if blocking, suspend the app and wait for irq in the flash or
  37	 * crash phase. return false on timeout to indicate failure.
  38	 */
  39	if (blocking) {
  40		u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
  41
  42		reinit_completion(completion);
  43
  44		if (wait_for_completion_interruptible_timeout
  45		   (completion, msecs_to_jiffies(wait_time_milliseconds)) ==
  46		   0) {
  47			dev_err(ipc_mux->dev, "ch[%d] timeout",
  48				ipc_mux->channel_id);
  49			ipc_uevent_send(ipc_mux->imem->dev, UEVENT_MDM_TIMEOUT);
  50			return -ETIMEDOUT;
  51		}
  52	}
  53
  54	return 0;
  55}
  56
  57/* Initialize the command header. */
  58static void ipc_mux_acb_init(struct iosm_mux *ipc_mux)
  59{
  60	struct mux_acb *acb = &ipc_mux->acb;
  61	struct mux_acbh *header;
  62
  63	header = (struct mux_acbh *)(acb->skb)->data;
  64	header->block_length = cpu_to_le32(sizeof(struct mux_acbh));
  65	header->first_cmd_index = header->block_length;
  66	header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH);
  67	header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++);
  68}
  69
  70/* Add a command to the ACB. */
  71static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd,
  72					    void *param, u32 param_size)
  73{
  74	struct mux_acbh *header;
  75	struct mux_cmdh *cmdh;
  76	struct mux_acb *acb;
  77
  78	acb = &ipc_mux->acb;
  79	header = (struct mux_acbh *)(acb->skb)->data;
  80	cmdh = (struct mux_cmdh *)
  81		((acb->skb)->data + le32_to_cpu(header->block_length));
  82
  83	cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
  84	cmdh->command_type = cpu_to_le32(cmd);
  85	cmdh->if_id = acb->if_id;
  86
  87	acb->cmd = cmd;
  88	cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) +
  89				    param_size);
  90	cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
  91	if (param)
  92		memcpy(&cmdh->param, param, param_size);
  93
  94	skb_put(acb->skb, le32_to_cpu(header->block_length) +
  95					le16_to_cpu(cmdh->cmd_len));
  96
  97	return cmdh;
  98}
  99
 100/* Prepare mux Command */
 101static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
 102						  u32 cmd, struct mux_acb *acb,
 103						  void *param, u32 param_size)
 104{
 105	struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)acb->skb->data;
 106
 107	cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
 108	cmdh->command_type = cpu_to_le32(cmd);
 109	cmdh->if_id = acb->if_id;
 110
 111	acb->cmd = cmd;
 112
 113	cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_lite_cmdh, param) +
 114				    param_size);
 115	cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
 116
 117	if (param)
 118		memcpy(&cmdh->param, param, param_size);
 119
 120	skb_put(acb->skb, le16_to_cpu(cmdh->cmd_len));
 121
 122	return cmdh;
 123}
 124
 125static int ipc_mux_acb_alloc(struct iosm_mux *ipc_mux)
 126{
 127	struct mux_acb *acb = &ipc_mux->acb;
 128	struct sk_buff *skb;
 129	dma_addr_t mapping;
 130
 131	/* Allocate skb memory for the uplink buffer. */
 132	skb = ipc_pcie_alloc_skb(ipc_mux->pcie, MUX_MAX_UL_ACB_BUF_SIZE,
 133				 GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
 134	if (!skb)
 135		return -ENOMEM;
 136
 137	/* Save the skb address. */
 138	acb->skb = skb;
 139
 140	memset(skb->data, 0, MUX_MAX_UL_ACB_BUF_SIZE);
 141
 142	return 0;
 143}
 144
 145int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
 146			     u32 transaction_id, union mux_cmd_param *param,
 147			     size_t res_size, bool blocking, bool respond)
 148{
 149	struct mux_acb *acb = &ipc_mux->acb;
 150	union mux_type_cmdh cmdh;
 151	int ret = 0;
 152
 153	acb->if_id = if_id;
 154	ret = ipc_mux_acb_alloc(ipc_mux);
 155	if (ret)
 156		return ret;
 157
 158	if (ipc_mux->protocol == MUX_LITE) {
 159		cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb,
 160						     param, res_size);
 161
 162		if (respond)
 163			cmdh.ack_lite->transaction_id =
 164					cpu_to_le32(transaction_id);
 165	} else {
 166		/* Initialize the ACB header. */
 167		ipc_mux_acb_init(ipc_mux);
 168		cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param,
 169						    res_size);
 170
 171		if (respond)
 172			cmdh.ack_aggr->transaction_id =
 173					cpu_to_le32(transaction_id);
 174	}
 175	ret = ipc_mux_acb_send(ipc_mux, blocking);
 176
 177	return ret;
 178}
 179
 180void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
 181{
 182	/* Inform the network interface to start/stop flow ctrl */
 183	ipc_wwan_tx_flowctrl(session->wwan, idx, on);
 184}
 185
 186static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
 187					      union mux_cmd_param param,
 188					      __le32 command_type, u8 if_id,
 189					      __le32 transaction_id)
 190{
 191	struct mux_acb *acb = &ipc_mux->acb;
 192
 193	switch (le32_to_cpu(command_type)) {
 194	case MUX_CMD_OPEN_SESSION_RESP:
 195	case MUX_CMD_CLOSE_SESSION_RESP:
 196		/* Resume the control application. */
 197		acb->got_param = param;
 198		break;
 199
 200	case MUX_LITE_CMD_FLOW_CTL_ACK:
 201		/* This command type is not expected as response for
 202		 * Aggregation version of the protocol. So return non-zero.
 203		 */
 204		if (ipc_mux->protocol != MUX_LITE)
 205			return -EINVAL;
 206
 207		dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received",
 208			if_id, le32_to_cpu(transaction_id));
 209		break;
 210
 211	case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK:
 212		/* This command type is not expected as response for
 213		 * Lite version of the protocol. So return non-zero.
 214		 */
 215		if (ipc_mux->protocol == MUX_LITE)
 216			return -EINVAL;
 217		break;
 218
 219	default:
 220		return -EINVAL;
 221	}
 222
 223	acb->wanted_response = MUX_CMD_INVALID;
 224	acb->got_response = le32_to_cpu(command_type);
 225	complete(&ipc_mux->channel->ul_sem);
 226
 227	return 0;
 228}
 229
 230static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux,
 231					  union mux_cmd_param *param,
 232					  __le32 command_type, u8 if_id,
 233					  __le16 cmd_len, int size)
 234{
 235	struct mux_session *session;
 236	struct hrtimer *adb_timer;
 237
 238	dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
 239		if_id, le32_to_cpu(command_type));
 240
 241	switch (le32_to_cpu(command_type)) {
 242	case MUX_LITE_CMD_FLOW_CTL:
 243	case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE:
 244
 245		if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
 246			dev_err(ipc_mux->dev, "if_id [%d] not valid",
 247				if_id);
 248			return -EINVAL; /* No session interface id. */
 249		}
 250
 251		session = &ipc_mux->session[if_id];
 252		adb_timer = &ipc_mux->imem->adb_timer;
 253
 254		if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
 255			/* Backward Compatibility */
 256			if (cmd_len == cpu_to_le16(size))
 257				session->flow_ctl_mask =
 258					le32_to_cpu(param->flow_ctl.mask);
 259			else
 260				session->flow_ctl_mask = ~0;
 261			/* if CP asks for FLOW CTRL Enable
 262			 * then set our internal flow control Tx flag
 263			 * to limit uplink session queueing
 264			 */
 265			session->net_tx_stop = true;
 266
 267			/* We have to call Finish ADB here.
 268			 * Otherwise any already queued data
 269			 * will be sent to CP when ADB is full
 270			 * for some other sessions.
 271			 */
 272			if (ipc_mux->protocol == MUX_AGGREGATION) {
 273				ipc_mux_ul_adb_finish(ipc_mux);
 274				ipc_imem_hrtimer_stop(adb_timer);
 275			}
 276			/* Update the stats */
 277			session->flow_ctl_en_cnt++;
 278		} else if (param->flow_ctl.mask == 0) {
 279			/* Just reset the Flow control mask and let
 280			 * mux_flow_ctrl_low_thre_b take control on
 281			 * our internal Tx flag and enabling kernel
 282			 * flow control
 283			 */
 284			dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
 285				if_id, le32_to_cpu(param->flow_ctl.mask));
 286			/* Backward Compatibility */
 287			if (cmd_len == cpu_to_le16(size))
 288				session->flow_ctl_mask =
 289					le32_to_cpu(param->flow_ctl.mask);
 290			else
 291				session->flow_ctl_mask = 0;
 292			/* Update the stats */
 293			session->flow_ctl_dis_cnt++;
 294		} else {
 295			break;
 296		}
 297
 298		ipc_mux->acc_adb_size = 0;
 299		ipc_mux->acc_payload_size = 0;
 300
 301		dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id,
 302			le32_to_cpu(param->flow_ctl.mask));
 303		break;
 304
 305	case MUX_LITE_CMD_LINK_STATUS_REPORT:
 306		break;
 307
 308	default:
 309		return -EINVAL;
 310	}
 311	return 0;
 312}
 313
 314/* Decode and Send appropriate response to a command block. */
 315static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
 316{
 317	struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
 318	__le32 trans_id = cmdh->transaction_id;
 319	int size;
 320
 321	if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
 322					       cmdh->command_type, cmdh->if_id,
 323					       cmdh->transaction_id)) {
 324		/* Unable to decode command response indicates the cmd_type
 325		 * may be a command instead of response. So try to decoding it.
 326		 */
 327		size = offsetof(struct mux_lite_cmdh, param) +
 328				sizeof(cmdh->param.flow_ctl);
 329		if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
 330						    cmdh->command_type,
 331						    cmdh->if_id,
 332						    cmdh->cmd_len, size)) {
 333			/* Decoded command may need a response. Give the
 334			 * response according to the command type.
 335			 */
 336			union mux_cmd_param *mux_cmd = NULL;
 337			size_t size = 0;
 338			u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
 339
 340			if (cmdh->command_type ==
 341			    cpu_to_le32(MUX_LITE_CMD_LINK_STATUS_REPORT)) {
 342				mux_cmd = &cmdh->param;
 343				mux_cmd->link_status_resp.response =
 344					cpu_to_le32(MUX_CMD_RESP_SUCCESS);
 345				/* response field is u32 */
 346				size = sizeof(u32);
 347			} else if (cmdh->command_type ==
 348				   cpu_to_le32(MUX_LITE_CMD_FLOW_CTL)) {
 349				cmd = MUX_LITE_CMD_FLOW_CTL_ACK;
 350			} else {
 351				return;
 352			}
 353
 354			if (ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
 355						     le32_to_cpu(trans_id),
 356						     mux_cmd, size, false,
 357						     true))
 358				dev_err(ipc_mux->dev,
 359					"if_id %d: cmd send failed",
 360					cmdh->if_id);
 361		}
 362	}
 363}
 364
 365/* Pass the DL packet to the netif layer. */
 366static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
 367			       struct iosm_wwan *wwan, u32 offset,
 368			       u8 service_class, struct sk_buff *skb,
 369			       u32 pkt_len)
 370{
 371	struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
 372
 373	if (!dest_skb)
 374		return -ENOMEM;
 375
 376	skb_pull(dest_skb, offset);
 377	skb_trim(dest_skb, pkt_len);
 378	/* Pass the packet to the netif layer. */
 379	dest_skb->priority = service_class;
 380
 381	return ipc_wwan_receive(wwan, dest_skb, false, if_id);
 382}
 383
 384/* Decode Flow Credit Table in the block */
 385static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
 386				   unsigned char *block)
 387{
 388	struct ipc_mem_lite_gen_tbl *fct = (struct ipc_mem_lite_gen_tbl *)block;
 389	struct iosm_wwan *wwan;
 390	int ul_credits;
 391	int if_id;
 392
 393	if (fct->vfl_length != sizeof(fct->vfl.nr_of_bytes)) {
 394		dev_err(ipc_mux->dev, "unexpected FCT length: %d",
 395			fct->vfl_length);
 396		return;
 397	}
 398
 399	if_id = fct->if_id;
 400	if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
 401		dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
 402		return;
 403	}
 404
 405	/* Is the session active ? */
 406	if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
 407	wwan = ipc_mux->session[if_id].wwan;
 408	if (!wwan) {
 409		dev_err(ipc_mux->dev, "session Net ID is NULL");
 410		return;
 411	}
 412
 413	ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
 414
 415	dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
 416		if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
 417
 418	/* Update the Flow Credit information from ADB */
 419	ipc_mux->session[if_id].ul_flow_credits += ul_credits;
 420
 421	/* Check whether the TX can be started */
 422	if (ipc_mux->session[if_id].ul_flow_credits > 0) {
 423		ipc_mux->session[if_id].net_tx_stop = false;
 424		ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
 425					  ipc_mux->session[if_id].if_id, false);
 426	}
 427}
 428
 429/* Decode non-aggregated datagram */
 430static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
 431				   struct sk_buff *skb)
 432{
 433	u32 pad_len, packet_offset, adgh_len;
 434	struct iosm_wwan *wwan;
 435	struct mux_adgh *adgh;
 436	u8 *block = skb->data;
 437	int rc = 0;
 438	u8 if_id;
 439
 440	adgh = (struct mux_adgh *)block;
 441
 442	if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) {
 443		dev_err(ipc_mux->dev, "invalid ADGH signature received");
 444		return;
 445	}
 446
 447	if_id = adgh->if_id;
 448	if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
 449		dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
 450		return;
 451	}
 452
 453	/* Is the session active ? */
 454	if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
 455	wwan = ipc_mux->session[if_id].wwan;
 456	if (!wwan) {
 457		dev_err(ipc_mux->dev, "session Net ID is NULL");
 458		return;
 459	}
 460
 461	/* Store the pad len for the corresponding session
 462	 * Pad bytes as negotiated in the open session less the header size
 463	 * (see session management chapter for details).
 464	 * If resulting padding is zero or less, the additional head padding is
 465	 * omitted. For e.g., if HEAD_PAD_LEN = 16 or less, this field is
 466	 * omitted if HEAD_PAD_LEN = 20, then this field will have 4 bytes
 467	 * set to zero
 468	 */
 469	pad_len =
 470		ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
 471	packet_offset = sizeof(*adgh) + pad_len;
 472
 473	if_id += ipc_mux->wwan_q_offset;
 474	adgh_len = le16_to_cpu(adgh->length);
 475
 476	/* Pass the packet to the netif layer */
 477	rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
 478				 adgh->service_class, skb,
 479				 adgh_len - packet_offset);
 480	if (rc) {
 481		dev_err(ipc_mux->dev, "mux adgh decoding error");
 482		return;
 483	}
 484	ipc_mux->session[if_id].flush = 1;
 485}
 486
 487static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux,
 488				     struct mux_cmdh *cmdh, int size)
 489{
 490	u32 link_st  = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP;
 491	u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE;
 492	u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE;
 493	u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK;
 494	union mux_cmd_param *cmd_p = NULL;
 495	u32 cmd = link_st;
 496	u32 trans_id;
 497
 498	if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
 499					    cmdh->command_type, cmdh->if_id,
 500					    cmdh->cmd_len, size)) {
 501		size = 0;
 502		if (cmdh->command_type == cpu_to_le32(link_st)) {
 503			cmd_p = &cmdh->param;
 504			cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS;
 505		} else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) ||
 506				(cmdh->command_type == cpu_to_le32(fctl_dis))) {
 507			cmd = fctl_ack;
 508		} else {
 509			return;
 510			}
 511		trans_id = le32_to_cpu(cmdh->transaction_id);
 512		ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
 513					 trans_id, cmd_p, size, false, true);
 514	}
 515}
 516
 517/* Decode an aggregated command block. */
 518static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
 519{
 520	struct mux_acbh *acbh;
 521	struct mux_cmdh *cmdh;
 522	u32 next_cmd_index;
 523	u8 *block;
 524	int size;
 525
 526	acbh = (struct mux_acbh *)(skb->data);
 527	block = (u8 *)(skb->data);
 528
 529	next_cmd_index = le32_to_cpu(acbh->first_cmd_index);
 530	next_cmd_index = array_index_nospec(next_cmd_index,
 531					    sizeof(struct mux_cmdh));
 532
 533	while (next_cmd_index != 0) {
 534		cmdh = (struct mux_cmdh *)&block[next_cmd_index];
 535		next_cmd_index = le32_to_cpu(cmdh->next_cmd_index);
 536		if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
 537						       cmdh->command_type,
 538						       cmdh->if_id,
 539						       cmdh->transaction_id)) {
 540			size = offsetof(struct mux_cmdh, param) +
 541				sizeof(cmdh->param.flow_ctl);
 542			ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size);
 543		}
 544	}
 545}
 546
 547/* process datagram */
 548static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
 549			     struct mux_adth_dg *dg, struct sk_buff *skb,
 550			     int if_id, int nr_of_dg)
 551{
 552	u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
 553	u32 packet_offset, i, rc, dg_len;
 554
 555	for (i = 0; i < nr_of_dg; i++, dg++) {
 556		if (le32_to_cpu(dg->datagram_index)
 557				< sizeof(struct mux_adbh))
 558			goto dg_error;
 559
 560		/* Is the packet inside of the ADB */
 561		if (le32_to_cpu(dg->datagram_index) >=
 562					le32_to_cpu(adbh->block_length)) {
 563			goto dg_error;
 564		} else {
 565			packet_offset =
 566				le32_to_cpu(dg->datagram_index) +
 567				dl_head_pad_len;
 568			dg_len = le16_to_cpu(dg->datagram_length);
 569			/* Pass the packet to the netif layer. */
 570			rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
 571						 packet_offset,
 572						 dg->service_class, skb,
 573						 dg_len - dl_head_pad_len);
 574			if (rc)
 575				goto dg_error;
 576		}
 577	}
 578	return 0;
 579dg_error:
 580	return -1;
 581}
 582
 583/* Decode an aggregated data block. */
 584static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
 585			      struct sk_buff *skb)
 586{
 587	struct mux_adth_dg *dg;
 588	struct iosm_wwan *wwan;
 589	struct mux_adbh *adbh;
 590	struct mux_adth *adth;
 591	int nr_of_dg, if_id;
 592	u32 adth_index;
 593	u8 *block;
 594
 595	block = skb->data;
 596	adbh = (struct mux_adbh *)block;
 597
 598	/* Process the aggregated datagram tables. */
 599	adth_index = le32_to_cpu(adbh->first_table_index);
 600
 601	/* Has CP sent an empty ADB ? */
 602	if (adth_index < 1) {
 603		dev_err(ipc_mux->dev, "unexpected empty ADB");
 604		goto adb_decode_err;
 605	}
 606
 607	/* Loop through mixed session tables. */
 608	while (adth_index) {
 609		/* Get the reference to the table header. */
 610		adth = (struct mux_adth *)(block + adth_index);
 611
 612		/* Get the interface id and map it to the netif id. */
 613		if_id = adth->if_id;
 614		if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
 615			goto adb_decode_err;
 616
 617		if_id = array_index_nospec(if_id,
 618					   IPC_MEM_MUX_IP_SESSION_ENTRIES);
 619
 620		/* Is the session active ? */
 621		wwan = ipc_mux->session[if_id].wwan;
 622		if (!wwan)
 623			goto adb_decode_err;
 624
 625		/* Consistency checks for aggregated datagram table. */
 626		if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
 627			goto adb_decode_err;
 628
 629		if (le16_to_cpu(adth->table_length) < (sizeof(struct mux_adth) -
 630				sizeof(struct mux_adth_dg)))
 631			goto adb_decode_err;
 632
 633		/* Calculate the number of datagrams. */
 634		nr_of_dg = (le16_to_cpu(adth->table_length) -
 635					sizeof(struct mux_adth) +
 636					sizeof(struct mux_adth_dg)) /
 637					sizeof(struct mux_adth_dg);
 638
 639		/* Is the datagram table empty ? */
 640		if (nr_of_dg < 1) {
 641			dev_err(ipc_mux->dev,
 642				"adthidx=%u,nr_of_dg=%d,next_tblidx=%u",
 643				adth_index, nr_of_dg,
 644				le32_to_cpu(adth->next_table_index));
 645
 646			/* Move to the next aggregated datagram table. */
 647			adth_index = le32_to_cpu(adth->next_table_index);
 648			continue;
 649		}
 650
 651		/* New aggregated datagram table. */
 652		dg = &adth->dg;
 653		if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
 654				      nr_of_dg) < 0)
 655			goto adb_decode_err;
 656
 657		/* mark session for final flush */
 658		ipc_mux->session[if_id].flush = 1;
 659
 660		/* Move to the next aggregated datagram table. */
 661		adth_index = le32_to_cpu(adth->next_table_index);
 662	}
 663
 664adb_decode_err:
 665	return;
 666}
 667
 668/**
 669 * ipc_mux_dl_decode -  Route the DL packet through the IP MUX layer
 670 *                      depending on Header.
 671 * @ipc_mux:            Pointer to MUX data-struct
 672 * @skb:                Pointer to ipc_skb.
 673 */
 674void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
 675{
 676	u32 signature;
 677
 678	if (!skb->data)
 679		return;
 680
 681	/* Decode the MUX header type. */
 682	signature = le32_to_cpup((__le32 *)skb->data);
 683
 684	switch (signature) {
 685	case IOSM_AGGR_MUX_SIG_ADBH:	/* Aggregated Data Block Header */
 686		mux_dl_adb_decode(ipc_mux, skb);
 687		break;
 688	case IOSM_AGGR_MUX_SIG_ADGH:
 689		ipc_mux_dl_adgh_decode(ipc_mux, skb);
 690		break;
 691	case MUX_SIG_FCTH:
 692		ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
 693		break;
 694	case IOSM_AGGR_MUX_SIG_ACBH:	/* Aggregated Command Block Header */
 695		ipc_mux_dl_acb_decode(ipc_mux, skb);
 696		break;
 697	case MUX_SIG_CMDH:
 698		ipc_mux_dl_cmd_decode(ipc_mux, skb);
 699		break;
 700
 701	default:
 702		dev_err(ipc_mux->dev, "invalid ABH signature");
 703	}
 704
 705	ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
 706}
 707
 708static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
 709				struct mux_adb *ul_adb, u32 type)
 710{
 711	/* Take the first element of the free list. */
 712	struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
 713	u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
 714	u32 *next_tb_id;
 715	int qlt_size;
 716	u32 if_id;
 717
 718	if (!skb)
 719		return -EBUSY; /* Wait for a free ADB skb. */
 720
 721	/* Mark it as UL ADB to select the right free operation. */
 722	IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
 723
 724	switch (type) {
 725	case IOSM_AGGR_MUX_SIG_ADBH:
 726		/* Save the ADB memory settings. */
 727		ul_adb->dest_skb = skb;
 728		ul_adb->buf = skb->data;
 729		ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
 730
 731		/* reset statistic counter */
 732		ul_adb->if_cnt = 0;
 733		ul_adb->payload_size = 0;
 734		ul_adb->dg_cnt_total = 0;
 735
 736		/* Initialize the ADBH. */
 737		ul_adb->adbh = (struct mux_adbh *)ul_adb->buf;
 738		memset(ul_adb->adbh, 0, sizeof(struct mux_adbh));
 739		ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH);
 740		ul_adb->adbh->block_length =
 741					cpu_to_le32(sizeof(struct mux_adbh));
 742		next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index;
 743		ul_adb->next_table_index = next_tb_id;
 744
 745		/* Clear the local copy of DGs for new ADB */
 746		memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
 747
 748		/* Clear the DG count and QLT updated status for new ADB */
 749		for (if_id = 0; if_id < no_if; if_id++) {
 750			ul_adb->dg_count[if_id] = 0;
 751			ul_adb->qlt_updated[if_id] = 0;
 752		}
 753		break;
 754
 755	case IOSM_AGGR_MUX_SIG_ADGH:
 756		/* Save the ADB memory settings. */
 757		ul_adb->dest_skb = skb;
 758		ul_adb->buf = skb->data;
 759		ul_adb->size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
 760		/* reset statistic counter */
 761		ul_adb->if_cnt = 0;
 762		ul_adb->payload_size = 0;
 763		ul_adb->dg_cnt_total = 0;
 764
 765		ul_adb->adgh = (struct mux_adgh *)skb->data;
 766		memset(ul_adb->adgh, 0, sizeof(struct mux_adgh));
 767		break;
 768
 769	case MUX_SIG_QLTH:
 770		qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
 771			   (MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl));
 772
 773		if (qlt_size > IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE) {
 774			dev_err(ipc_mux->dev,
 775				"can't support. QLT size:%d SKB size: %d",
 776				qlt_size, IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE);
 777			return -ERANGE;
 778		}
 779
 780		ul_adb->qlth_skb = skb;
 781		memset((ul_adb->qlth_skb)->data, 0, qlt_size);
 782		skb_put(skb, qlt_size);
 783		break;
 784	}
 785
 786	return 0;
 787}
 788
 789static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
 790{
 791	struct mux_adb *ul_adb = &ipc_mux->ul_adb;
 792	u16 adgh_len;
 793	long long bytes;
 794	char *str;
 795
 796	if (!ul_adb->dest_skb) {
 797		dev_err(ipc_mux->dev, "no dest skb");
 798		return;
 799	}
 800
 801	adgh_len = le16_to_cpu(ul_adb->adgh->length);
 802	skb_put(ul_adb->dest_skb, adgh_len);
 803	skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
 804	ul_adb->dest_skb = NULL;
 805
 806	if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
 807		struct mux_session *session;
 808
 809		session = &ipc_mux->session[ul_adb->adgh->if_id];
 810		str = "available_credits";
 811		bytes = (long long)session->ul_flow_credits;
 812
 813	} else {
 814		str = "pend_bytes";
 815		bytes = ipc_mux->ul_data_pend_bytes;
 816		ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes +
 817					      adgh_len;
 818	}
 819
 820	dev_dbg(ipc_mux->dev, "UL ADGH: size=%u, if_id=%d, payload=%d, %s=%lld",
 821		adgh_len, ul_adb->adgh->if_id, ul_adb->payload_size,
 822		str, bytes);
 823}
 824
 825static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
 826				   struct mux_adb *ul_adb, int *out_offset)
 827{
 828	int i, qlt_size, offset = *out_offset;
 829	struct mux_qlth *p_adb_qlt;
 830	struct mux_adth_dg *dg;
 831	struct mux_adth *adth;
 832	u16 adth_dg_size;
 833	u32 *next_tb_id;
 834
 835	qlt_size = offsetof(struct mux_qlth, ql) +
 836			MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
 837
 838	for (i = 0; i < ipc_mux->nr_sessions; i++) {
 839		if (ul_adb->dg_count[i] > 0) {
 840			adth_dg_size = offsetof(struct mux_adth, dg) +
 841					ul_adb->dg_count[i] * sizeof(*dg);
 842
 843			*ul_adb->next_table_index = offset;
 844			adth = (struct mux_adth *)&ul_adb->buf[offset];
 845			next_tb_id = (unsigned int *)&adth->next_table_index;
 846			ul_adb->next_table_index = next_tb_id;
 847			offset += adth_dg_size;
 848			adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH);
 849			adth->if_id = i;
 850			adth->table_length = cpu_to_le16(adth_dg_size);
 851			adth_dg_size -= offsetof(struct mux_adth, dg);
 852			memcpy(&adth->dg, ul_adb->dg[i], adth_dg_size);
 853			ul_adb->if_cnt++;
 854		}
 855
 856		if (ul_adb->qlt_updated[i]) {
 857			*ul_adb->next_table_index = offset;
 858			p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset];
 859			ul_adb->next_table_index =
 860				(u32 *)&p_adb_qlt->next_table_index;
 861			memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size);
 862			offset += qlt_size;
 863		}
 864	}
 865	*out_offset = offset;
 866}
 867
 868/**
 869 * ipc_mux_ul_adb_finish - Add the TD of the aggregated session packets to TDR.
 870 * @ipc_mux:               Pointer to MUX data-struct.
 871 */
 872void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux)
 873{
 874	bool ul_data_pend = false;
 875	struct mux_adb *ul_adb;
 876	unsigned long flags;
 877	int offset;
 878
 879	ul_adb = &ipc_mux->ul_adb;
 880	if (!ul_adb->dest_skb)
 881		return;
 882
 883	offset = *ul_adb->next_table_index;
 884	ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset);
 885	ul_adb->adbh->block_length = cpu_to_le32(offset);
 886
 887	if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) {
 888		ul_adb->dest_skb = NULL;
 889		return;
 890	}
 891
 892	*ul_adb->next_table_index = 0;
 893	ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++);
 894	skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length));
 895
 896	spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags);
 897	__skb_queue_tail(&ipc_mux->channel->ul_list,  ul_adb->dest_skb);
 898	spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags);
 899
 900	ul_adb->dest_skb = NULL;
 901	/* Updates the TDs with ul_list */
 902	ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem);
 903
 904	/* Delay the doorbell irq */
 905	if (ul_data_pend)
 906		ipc_imem_td_update_timer_start(ipc_mux->imem);
 907
 908	ipc_mux->acc_adb_size +=  le32_to_cpu(ul_adb->adbh->block_length);
 909	ipc_mux->acc_payload_size += ul_adb->payload_size;
 910	ipc_mux->ul_data_pend_bytes += ul_adb->payload_size;
 911}
 912
 913/* Allocates an ADB from the free list and initializes it with ADBH  */
 914static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
 915				    struct mux_adb *adb, int *size_needed,
 916				    u32 type)
 917{
 918	bool ret_val = false;
 919	int status;
 920
 921	if (!adb->dest_skb) {
 922		/* Allocate memory for the ADB including of the
 923		 * datagram table header.
 924		 */
 925		status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type);
 926		if (status)
 927			/* Is a pending ADB available ? */
 928			ret_val = true; /* None. */
 929
 930		/* Update size need to zero only for new ADB memory */
 931		*size_needed = 0;
 932	}
 933
 934	return ret_val;
 935}
 936
 937/* Informs the network stack to stop sending further packets for all opened
 938 * sessions
 939 */
 940static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
 941{
 942	struct mux_session *session;
 943	int idx;
 944
 945	for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
 946		session = &ipc_mux->session[idx];
 947
 948		if (!session->wwan)
 949			continue;
 950
 951		session->net_tx_stop = true;
 952	}
 953}
 954
 955/* Sends Queue Level Table of all opened sessions */
 956static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
 957{
 958	struct ipc_mem_lite_gen_tbl *qlt;
 959	struct mux_session *session;
 960	bool qlt_updated = false;
 961	int i;
 962	int qlt_size;
 963
 964	if (!ipc_mux->initialized || ipc_mux->state != MUX_S_ACTIVE)
 965		return qlt_updated;
 966
 967	qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
 968		   MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
 969
 970	for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
 971		session = &ipc_mux->session[i];
 972
 973		if (!session->wwan || session->flow_ctl_mask)
 974			continue;
 975
 976		if (ipc_mux_ul_skb_alloc(ipc_mux, &ipc_mux->ul_adb,
 977					 MUX_SIG_QLTH)) {
 978			dev_err(ipc_mux->dev,
 979				"no reserved mem to send QLT of if_id: %d", i);
 980			break;
 981		}
 982
 983		/* Prepare QLT */
 984		qlt = (struct ipc_mem_lite_gen_tbl *)(ipc_mux->ul_adb.qlth_skb)
 985			      ->data;
 986		qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
 987		qlt->length = cpu_to_le16(qlt_size);
 988		qlt->if_id = i;
 989		qlt->vfl_length = MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
 990		qlt->reserved[0] = 0;
 991		qlt->reserved[1] = 0;
 992
 993		qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
 994
 995		/* Add QLT to the transfer list. */
 996		skb_queue_tail(&ipc_mux->channel->ul_list,
 997			       ipc_mux->ul_adb.qlth_skb);
 998
 999		qlt_updated = true;
1000		ipc_mux->ul_adb.qlth_skb = NULL;
1001	}
1002
1003	if (qlt_updated)
1004		/* Updates the TDs with ul_list */
1005		(void)ipc_imem_ul_write_td(ipc_mux->imem);
1006
1007	return qlt_updated;
1008}
1009
1010/* Checks the available credits for the specified session and returns
1011 * number of packets for which credits are available.
1012 */
1013static int ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux,
1014					  struct mux_session *session,
1015					  struct sk_buff_head *ul_list,
1016					  int max_nr_of_pkts)
1017{
1018	int pkts_to_send = 0;
1019	struct sk_buff *skb;
1020	int credits = 0;
1021
1022	if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
1023		credits = session->ul_flow_credits;
1024		if (credits <= 0) {
1025			dev_dbg(ipc_mux->dev,
1026				"FC::if_id[%d] Insuff.Credits/Qlen:%d/%u",
1027				session->if_id, session->ul_flow_credits,
1028				session->ul_list.qlen); /* nr_of_bytes */
1029			return 0;
1030		}
1031	} else {
1032		credits = IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B -
1033			  ipc_mux->ul_data_pend_bytes;
1034		if (credits <= 0) {
1035			ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1036
1037			dev_dbg(ipc_mux->dev,
1038				"if_id[%d] encod. fail Bytes: %llu, thresh: %d",
1039				session->if_id, ipc_mux->ul_data_pend_bytes,
1040				IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B);
1041			return 0;
1042		}
1043	}
1044
1045	/* Check if there are enough credits/bytes available to send the
1046	 * requested max_nr_of_pkts. Otherwise restrict the nr_of_pkts
1047	 * depending on available credits.
1048	 */
1049	skb_queue_walk(ul_list, skb)
1050	{
1051		if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts))
1052			break;
1053		credits -= skb->len;
1054		pkts_to_send++;
1055	}
1056
1057	return pkts_to_send;
1058}
1059
1060/* Encode the UL IP packet according to Lite spec. */
1061static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
1062				  struct mux_session *session,
1063				  struct sk_buff_head *ul_list,
1064				  struct mux_adb *adb, int nr_of_pkts)
1065{
1066	int offset = sizeof(struct mux_adgh);
1067	int adb_updated = -EINVAL;
1068	struct sk_buff *src_skb;
1069	int aligned_size = 0;
1070	int nr_of_skb = 0;
1071	u32 pad_len = 0;
1072
1073	/* Re-calculate the number of packets depending on number of bytes to be
1074	 * processed/available credits.
1075	 */
1076	nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
1077						    nr_of_pkts);
1078
1079	/* If calculated nr_of_pkts from available credits is <= 0
1080	 * then nothing to do.
1081	 */
1082	if (nr_of_pkts <= 0)
1083		return 0;
1084
1085	/* Read configured UL head_pad_length for session.*/
1086	if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1087		pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1088
1089	/* Process all pending UL packets for this session
1090	 * depending on the allocated datagram table size.
1091	 */
1092	while (nr_of_pkts > 0) {
1093		/* get destination skb allocated */
1094		if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1095					    IOSM_AGGR_MUX_SIG_ADGH)) {
1096			dev_err(ipc_mux->dev, "no reserved memory for ADGH");
1097			return -ENOMEM;
1098		}
1099
1100		/* Peek at the head of the list. */
1101		src_skb = skb_peek(ul_list);
1102		if (!src_skb) {
1103			dev_err(ipc_mux->dev,
1104				"skb peek return NULL with count : %d",
1105				nr_of_pkts);
1106			break;
1107		}
1108
1109		/* Calculate the memory value. */
1110		aligned_size = ALIGN((pad_len + src_skb->len), 4);
1111
1112		ipc_mux->size_needed = sizeof(struct mux_adgh) + aligned_size;
1113
1114		if (ipc_mux->size_needed > adb->size) {
1115			dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
1116				ipc_mux->size_needed, adb->size);
1117			/* Return 1 if any IP packet is added to the transfer
1118			 * list.
1119			 */
1120			return nr_of_skb ? 1 : 0;
1121		}
1122
1123		/* Add buffer (without head padding to next pending transfer) */
1124		memcpy(adb->buf + offset + pad_len, src_skb->data,
1125		       src_skb->len);
1126
1127		adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH);
1128		adb->adgh->if_id = session_id;
1129		adb->adgh->length =
1130			cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
1131				    src_skb->len);
1132		adb->adgh->service_class = src_skb->priority;
1133		adb->adgh->next_count = --nr_of_pkts;
1134		adb->dg_cnt_total++;
1135		adb->payload_size += src_skb->len;
1136
1137		if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS)
1138			/* Decrement the credit value as we are processing the
1139			 * datagram from the UL list.
1140			 */
1141			session->ul_flow_credits -= src_skb->len;
1142
1143		/* Remove the processed elements and free it. */
1144		src_skb = skb_dequeue(ul_list);
1145		dev_kfree_skb(src_skb);
1146		nr_of_skb++;
1147
1148		ipc_mux_ul_adgh_finish(ipc_mux);
1149	}
1150
1151	if (nr_of_skb) {
1152		/* Send QLT info to modem if pending bytes > high watermark
1153		 * in case of mux lite
1154		 */
1155		if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
1156		    ipc_mux->ul_data_pend_bytes >=
1157			    IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
1158			adb_updated = ipc_mux_lite_send_qlt(ipc_mux);
1159		else
1160			adb_updated = 1;
1161
1162		/* Updates the TDs with ul_list */
1163		(void)ipc_imem_ul_write_td(ipc_mux->imem);
1164	}
1165
1166	return adb_updated;
1167}
1168
1169/**
1170 * ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB
1171 * @ipc_mux:            pointer to MUX instance data
1172 * @p_adb:              pointer to UL aggegated data block
1173 * @session_id:         session id
1174 * @qlth_n_ql_size:     Length (in bytes) of the datagram table
1175 * @ul_list:            pointer to skb buffer head
1176 */
1177void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
1178			      int session_id, int qlth_n_ql_size,
1179			      struct sk_buff_head *ul_list)
1180{
1181	int qlevel = ul_list->qlen;
1182	struct mux_qlth *p_qlt;
1183
1184	p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id];
1185
1186	/* Initialize QLTH if not been done */
1187	if (p_adb->qlt_updated[session_id] == 0) {
1188		p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
1189		p_qlt->if_id = session_id;
1190		p_qlt->table_length = cpu_to_le16(qlth_n_ql_size);
1191		p_qlt->reserved = 0;
1192		p_qlt->reserved2 = 0;
1193	}
1194
1195	/* Update Queue Level information always */
1196	p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel);
1197	p_adb->qlt_updated[session_id] = 1;
1198}
1199
1200/* Update the next table index. */
1201static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
1202				      int session_id,
1203				      struct sk_buff_head *ul_list,
1204				      struct mux_adth_dg *dg,
1205				      int aligned_size,
1206				      u32 qlth_n_ql_size,
1207				      struct mux_adb *adb,
1208				      struct sk_buff *src_skb)
1209{
1210	ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1211				 qlth_n_ql_size, ul_list);
1212	ipc_mux_ul_adb_finish(ipc_mux);
1213	if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1214				    IOSM_AGGR_MUX_SIG_ADBH))
1215		return -ENOMEM;
1216
1217	ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
1218
1219	ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1220	ipc_mux->size_needed += qlth_n_ql_size;
1221	ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1222	return 0;
1223}
1224
1225/* Process encode session UL data. */
1226static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb,
1227			    struct mux_adth_dg *dg,
1228			    struct sk_buff_head *ul_list,
1229			    struct sk_buff *src_skb, int session_id,
1230			    int pkt_to_send, u32 qlth_n_ql_size,
1231			    int *out_offset, int head_pad_len)
1232{
1233	int aligned_size;
1234	int offset = *out_offset;
1235	unsigned long flags;
1236	int nr_of_skb = 0;
1237
1238	while (pkt_to_send > 0) {
1239		/* Peek at the head of the list. */
1240		src_skb = skb_peek(ul_list);
1241		if (!src_skb) {
1242			dev_err(ipc_mux->dev,
1243				"skb peek return NULL with count : %d",
1244				pkt_to_send);
1245			return -1;
1246		}
1247		aligned_size = ALIGN((head_pad_len + src_skb->len), 4);
1248		ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1249
1250		if (ipc_mux->size_needed > adb->size ||
1251		    ((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >=
1252		      IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) {
1253			*adb->next_table_index = offset;
1254			if (mux_ul_dg_update_tbl_index(ipc_mux, session_id,
1255						       ul_list, dg,
1256						       aligned_size,
1257						       qlth_n_ql_size, adb,
1258						       src_skb) < 0)
1259				return -ENOMEM;
1260			nr_of_skb = 0;
1261			offset = le32_to_cpu(adb->adbh->block_length);
1262			/* Load pointer to next available datagram entry */
1263			dg = adb->dg[session_id] + adb->dg_count[session_id];
1264		}
1265		/* Add buffer without head padding to next pending transfer. */
1266		memcpy(adb->buf + offset + head_pad_len,
1267		       src_skb->data, src_skb->len);
1268		/* Setup datagram entry. */
1269		dg->datagram_index = cpu_to_le32(offset);
1270		dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len);
1271		dg->service_class = (((struct sk_buff *)src_skb)->priority);
1272		dg->reserved = 0;
1273		adb->dg_cnt_total++;
1274		adb->payload_size += le16_to_cpu(dg->datagram_length);
1275		dg++;
1276		adb->dg_count[session_id]++;
1277		offset += aligned_size;
1278		/* Remove the processed elements and free it. */
1279		spin_lock_irqsave(&ul_list->lock, flags);
1280		src_skb = __skb_dequeue(ul_list);
1281		spin_unlock_irqrestore(&ul_list->lock, flags);
1282
1283		dev_kfree_skb(src_skb);
1284		nr_of_skb++;
1285		pkt_to_send--;
1286	}
1287	*out_offset = offset;
1288	return nr_of_skb;
1289}
1290
1291/* Process encode session UL data to ADB. */
1292static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id,
1293			     struct mux_session *session,
1294			     struct sk_buff_head *ul_list, struct mux_adb *adb,
1295			     int pkt_to_send)
1296{
1297	int adb_updated = -EINVAL;
1298	int head_pad_len, offset;
1299	struct sk_buff *src_skb = NULL;
1300	struct mux_adth_dg *dg;
1301	u32 qlth_n_ql_size;
1302
1303	/* If any of the opened session has set Flow Control ON then limit the
1304	 * UL data to mux_flow_ctrl_high_thresh_b bytes
1305	 */
1306	if (ipc_mux->ul_data_pend_bytes >=
1307		IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
1308		ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1309		return adb_updated;
1310	}
1311
1312	qlth_n_ql_size = offsetof(struct mux_qlth, ql) +
1313			 MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
1314	head_pad_len = session->ul_head_pad_len;
1315
1316	if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1317		head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1318
1319	if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1320				    IOSM_AGGR_MUX_SIG_ADBH))
1321		return -ENOMEM;
1322
1323	offset = le32_to_cpu(adb->adbh->block_length);
1324
1325	if (ipc_mux->size_needed == 0)
1326		ipc_mux->size_needed = offset;
1327
1328	/* Calculate the size needed for ADTH, QLTH and QL*/
1329	if (adb->dg_count[session_id] == 0) {
1330		ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1331		ipc_mux->size_needed += qlth_n_ql_size;
1332	}
1333
1334	dg = adb->dg[session_id] + adb->dg_count[session_id];
1335
1336	if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb,
1337			     session_id, pkt_to_send, qlth_n_ql_size, &offset,
1338			     head_pad_len) > 0) {
1339		adb_updated = 1;
1340		*adb->next_table_index = offset;
1341		ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1342					 qlth_n_ql_size, ul_list);
1343		adb->adbh->block_length = cpu_to_le32(offset);
1344	}
1345
1346	return adb_updated;
1347}
1348
1349bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
1350{
1351	struct sk_buff_head *ul_list;
1352	struct mux_session *session;
1353	int updated = 0;
1354	int session_id;
1355	int dg_n;
1356	int i;
1357
1358	if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
1359	    ipc_mux->adb_prep_ongoing)
1360		return false;
1361
1362	ipc_mux->adb_prep_ongoing = true;
1363
1364	for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
1365		session_id = ipc_mux->rr_next_session;
1366		session = &ipc_mux->session[session_id];
1367
1368		/* Go to next handle rr_next_session overflow */
1369		ipc_mux->rr_next_session++;
1370		if (ipc_mux->rr_next_session >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
1371			ipc_mux->rr_next_session = 0;
1372
1373		if (!session->wwan || session->flow_ctl_mask ||
1374		    session->net_tx_stop)
1375			continue;
1376
1377		ul_list = &session->ul_list;
1378
1379		/* Is something pending in UL and flow ctrl off */
1380		dg_n = skb_queue_len(ul_list);
1381		if (dg_n > MUX_MAX_UL_DG_ENTRIES)
1382			dg_n = MUX_MAX_UL_DG_ENTRIES;
1383
1384		if (dg_n == 0)
1385			/* Nothing to do for ipc_mux session
1386			 * -> try next session id.
1387			 */
1388			continue;
1389		if (ipc_mux->protocol == MUX_LITE)
1390			updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
1391							 session, ul_list,
1392							 &ipc_mux->ul_adb,
1393							 dg_n);
1394		else
1395			updated = mux_ul_adb_encode(ipc_mux, session_id,
1396						    session, ul_list,
1397						    &ipc_mux->ul_adb,
1398						    dg_n);
1399	}
1400
1401	ipc_mux->adb_prep_ongoing = false;
1402	return updated == 1;
1403}
1404
1405/* Calculates the Payload from any given ADB. */
1406static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
1407					struct mux_adbh *p_adbh)
1408{
1409	struct mux_adth_dg *dg;
1410	struct mux_adth *adth;
1411	u32 payload_size = 0;
1412	u32 next_table_idx;
1413	int nr_of_dg, i;
1414
1415	/* Process the aggregated datagram tables. */
1416	next_table_idx = le32_to_cpu(p_adbh->first_table_index);
1417
1418	if (next_table_idx < sizeof(struct mux_adbh)) {
1419		dev_err(ipc_mux->dev, "unexpected empty ADB");
1420		return payload_size;
1421	}
1422
1423	while (next_table_idx != 0) {
1424		/* Get the reference to the table header. */
1425		adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx);
1426
1427		if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
1428			nr_of_dg = (le16_to_cpu(adth->table_length) -
1429					sizeof(struct mux_adth) +
1430					sizeof(struct mux_adth_dg)) /
1431					sizeof(struct mux_adth_dg);
1432
1433			if (nr_of_dg <= 0)
1434				return payload_size;
1435
1436			dg = &adth->dg;
1437
1438			for (i = 0; i < nr_of_dg; i++, dg++) {
1439				if (le32_to_cpu(dg->datagram_index) <
1440					sizeof(struct mux_adbh)) {
1441					return payload_size;
1442				}
1443				payload_size +=
1444					le16_to_cpu(dg->datagram_length);
1445			}
1446		}
1447		next_table_idx = le32_to_cpu(adth->next_table_index);
1448	}
1449
1450	return payload_size;
1451}
1452
1453void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
1454{
1455	union mux_type_header hr;
1456	u16 adgh_len;
1457	int payload;
1458
1459	if (ipc_mux->protocol == MUX_LITE) {
1460		hr.adgh = (struct mux_adgh *)skb->data;
1461		adgh_len = le16_to_cpu(hr.adgh->length);
1462		if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) &&
1463		    ipc_mux->ul_flow == MUX_UL)
1464			ipc_mux->ul_data_pend_bytes =
1465					ipc_mux->ul_data_pend_bytes - adgh_len;
1466	} else {
1467		hr.adbh = (struct mux_adbh *)(skb->data);
1468		payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh);
1469		ipc_mux->ul_data_pend_bytes -= payload;
1470	}
1471
1472	if (ipc_mux->ul_flow == MUX_UL)
1473		dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
1474			ipc_mux->ul_data_pend_bytes);
1475
1476	/* Reset the skb settings. */
1477	skb_trim(skb, 0);
1478
1479	/* Add the consumed ADB to the free list. */
1480	skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
1481}
1482
1483/* Start the NETIF uplink send transfer in MUX mode. */
1484static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
1485					void *msg, size_t size)
1486{
1487	struct iosm_mux *ipc_mux = ipc_imem->mux;
1488	bool ul_data_pend = false;
1489
1490	/* Add session UL data to a ADB and ADGH */
1491	ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
1492	if (ul_data_pend) {
1493		if (ipc_mux->protocol == MUX_AGGREGATION)
1494			ipc_imem_adb_timer_start(ipc_mux->imem);
1495
1496		/* Delay the doorbell irq */
1497		ipc_imem_td_update_timer_start(ipc_mux->imem);
1498	}
1499	/* reset the debounce flag */
1500	ipc_mux->ev_mux_net_transmit_pending = false;
1501
1502	return 0;
1503}
1504
1505int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
1506			      struct sk_buff *skb)
1507{
1508	struct mux_session *session = &ipc_mux->session[if_id];
1509	int ret = -EINVAL;
1510
1511	if (ipc_mux->channel &&
1512	    ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
1513		dev_err(ipc_mux->dev,
1514			"channel state is not IMEM_CHANNEL_ACTIVE");
1515		goto out;
1516	}
1517
1518	if (!session->wwan) {
1519		dev_err(ipc_mux->dev, "session net ID is NULL");
1520		ret = -EFAULT;
1521		goto out;
1522	}
1523
1524	/* Session is under flow control.
1525	 * Check if packet can be queued in session list, if not
1526	 * suspend net tx
1527	 */
1528	if (skb_queue_len(&session->ul_list) >=
1529	    (session->net_tx_stop ?
1530		     IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
1531		     (IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
1532		      IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
1533		ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
1534		ret = -EBUSY;
1535		goto out;
1536	}
1537
1538	/* Add skb to the uplink skb accumulator. */
1539	skb_queue_tail(&session->ul_list, skb);
1540
1541	/* Inform the IPC kthread to pass uplink IP packets to CP. */
1542	if (!ipc_mux->ev_mux_net_transmit_pending) {
1543		ipc_mux->ev_mux_net_transmit_pending = true;
1544		ret = ipc_task_queue_send_task(ipc_mux->imem,
1545					       ipc_mux_tq_ul_trigger_encode, 0,
1546					       NULL, 0, false);
1547		if (ret)
1548			goto out;
1549	}
1550	dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
1551		if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
1552		skb->len, skb->truesize, skb->priority);
1553	ret = 0;
1554out:
1555	return ret;
1556}