Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2020-21 Intel Corporation.
  4 */
  5
  6#include <linux/delay.h>
  7
  8#include "iosm_ipc_chnl_cfg.h"
  9#include "iosm_ipc_devlink.h"
 10#include "iosm_ipc_imem.h"
 11#include "iosm_ipc_imem_ops.h"
 12#include "iosm_ipc_port.h"
 13#include "iosm_ipc_task_queue.h"
 14
 15/* Open a packet data online channel between the network layer and CP. */
 16int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
 17{
 18	dev_dbg(ipc_imem->dev, "%s if id: %d",
 19		ipc_imem_phase_get_string(ipc_imem->phase), if_id);
 20
 21	/* The network interface is only supported in the runtime phase. */
 22	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
 23		dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
 24			ipc_imem_phase_get_string(ipc_imem->phase));
 25		return -EIO;
 26	}
 27
 28	return ipc_mux_open_session(ipc_imem->mux, if_id);
 29}
 30
 31/* Release a net link to CP. */
 32void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
 33			     int channel_id)
 34{
 35	if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
 36	    if_id <= IP_MUX_SESSION_END)
 37		ipc_mux_close_session(ipc_imem->mux, if_id);
 38}
 39
 40/* Tasklet call to do uplink transfer. */
 41static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
 42				  void *msg, size_t size)
 43{
 44	ipc_imem_ul_send(ipc_imem);
 45
 46	return 0;
 47}
 48
 49/* Through tasklet to do sio write. */
 50static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
 51{
 52	return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
 53					NULL, 0, false);
 54}
 55
 56/* Function for transfer UL data */
 57int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
 58			       int if_id, int channel_id, struct sk_buff *skb)
 59{
 60	int ret = -EINVAL;
 61
 62	if (!ipc_imem || channel_id < 0)
 63		goto out;
 64
 65	/* Is CP Running? */
 66	if (ipc_imem->phase != IPC_P_RUN) {
 67		dev_dbg(ipc_imem->dev, "phase %s transmit",
 68			ipc_imem_phase_get_string(ipc_imem->phase));
 69		ret = -EIO;
 70		goto out;
 71	}
 72
 73	/* Route the UL packet through IP MUX Layer */
 74	ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
 75out:
 76	return ret;
 77}
 78
 79/* Initialize wwan channel */
 80int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
 81			       enum ipc_mux_protocol mux_type)
 82{
 83	struct ipc_chnl_cfg chnl_cfg = { 0 };
 84
 85	ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
 86
 87	/* If modem version is invalid (0xffffffff), do not initialize WWAN. */
 88	if (ipc_imem->cp_version == -1) {
 89		dev_err(ipc_imem->dev, "invalid CP version");
 90		return -EIO;
 91	}
 92
 93	ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
 94
 95	if (ipc_imem->mmio->mux_protocol == MUX_AGGREGATION &&
 96	    ipc_imem->nr_of_channels == IPC_MEM_IP_CHL_ID_0) {
 97		chnl_cfg.ul_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
 98		chnl_cfg.dl_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_DL;
 99		chnl_cfg.dl_buf_size = IPC_MEM_MAX_ADB_BUF_SIZE;
100	}
101
102	ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
103			      IRQ_MOD_OFF);
104
105	/* WWAN registration. */
106	ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
107	if (!ipc_imem->wwan) {
108		dev_err(ipc_imem->dev,
109			"failed to register the ipc_wwan interfaces");
110		return -ENOMEM;
111	}
112
113	return 0;
114}
115
116/* Map SKB to DMA for transfer */
117static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
118				   struct sk_buff *skb)
119{
120	struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
121	char *buf = skb->data;
122	int len = skb->len;
123	dma_addr_t mapping;
124	int ret;
125
126	ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
127
128	if (ret)
129		goto err;
130
131	BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
132
133	IPC_CB(skb)->mapping = mapping;
134	IPC_CB(skb)->direction = DMA_TO_DEVICE;
135	IPC_CB(skb)->len = len;
136	IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
137
138err:
139	return ret;
140}
141
142/* return true if channel is ready for use */
143static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
144				       struct ipc_mem_channel *channel)
145{
146	enum ipc_phase phase;
147
148	/* Update the current operation phase. */
149	phase = ipc_imem->phase;
150
151	/* Select the operation depending on the execution stage. */
152	switch (phase) {
153	case IPC_P_RUN:
154	case IPC_P_PSI:
155	case IPC_P_EBL:
156		break;
157
158	case IPC_P_ROM:
159		/* Prepare the PSI image for the CP ROM driver and
160		 * suspend the flash app.
161		 */
162		if (channel->state != IMEM_CHANNEL_RESERVED) {
163			dev_err(ipc_imem->dev,
164				"ch[%d]:invalid channel state %d,expected %d",
165				channel->channel_id, channel->state,
166				IMEM_CHANNEL_RESERVED);
167			goto channel_unavailable;
168		}
169		goto channel_available;
170
171	default:
172		/* Ignore uplink actions in all other phases. */
173		dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
174			channel->channel_id, phase);
175		goto channel_unavailable;
176	}
177	/* Check the full availability of the channel. */
178	if (channel->state != IMEM_CHANNEL_ACTIVE) {
179		dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
180			channel->channel_id, channel->state);
181		goto channel_unavailable;
182	}
183
184channel_available:
185	return true;
186
187channel_unavailable:
188	return false;
189}
190
191/**
192 * ipc_imem_sys_port_close - Release a sio link to CP.
193 * @ipc_imem:          Imem instance.
194 * @channel:           Channel instance.
195 */
196void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem,
197			     struct ipc_mem_channel *channel)
198{
199	enum ipc_phase curr_phase;
200	int status = 0;
201	u32 tail = 0;
202
203	curr_phase = ipc_imem->phase;
204
205	/* If current phase is IPC_P_OFF or SIO ID is -ve then
206	 * channel is already freed. Nothing to do.
207	 */
208	if (curr_phase == IPC_P_OFF) {
209		dev_err(ipc_imem->dev,
210			"nothing to do. Current Phase: %s",
211			ipc_imem_phase_get_string(curr_phase));
212		return;
213	}
214
215	if (channel->state == IMEM_CHANNEL_FREE) {
216		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
217			channel->channel_id, channel->state);
218		return;
219	}
220
221	/* If there are any pending TDs then wait for Timeout/Completion before
222	 * closing pipe.
223	 */
224	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
225		ipc_imem->app_notify_ul_pend = 1;
226
227		/* Suspend the user app and wait a certain time for processing
228		 * UL Data.
229		 */
230		status = wait_for_completion_interruptible_timeout
231			 (&ipc_imem->ul_pend_sem,
232			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
233		if (status == 0) {
234			dev_dbg(ipc_imem->dev,
235				"Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
236				channel->ul_pipe.pipe_nr,
237				channel->ul_pipe.old_head,
238				channel->ul_pipe.old_tail);
239		}
240
241		ipc_imem->app_notify_ul_pend = 0;
242	}
243
244	/* If there are any pending TDs then wait for Timeout/Completion before
245	 * closing pipe.
246	 */
247	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
248					 &channel->dl_pipe, NULL, &tail);
249
250	if (tail != channel->dl_pipe.old_tail) {
251		ipc_imem->app_notify_dl_pend = 1;
252
253		/* Suspend the user app and wait a certain time for processing
254		 * DL Data.
255		 */
256		status = wait_for_completion_interruptible_timeout
257			 (&ipc_imem->dl_pend_sem,
258			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
259		if (status == 0) {
260			dev_dbg(ipc_imem->dev,
261				"Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
262				channel->dl_pipe.pipe_nr,
263				channel->dl_pipe.old_head,
264				channel->dl_pipe.old_tail);
265		}
266
267		ipc_imem->app_notify_dl_pend = 0;
268	}
269
270	/* Due to wait for completion in messages, there is a small window
271	 * between closing the pipe and updating the channel is closed. In this
272	 * small window there could be HP update from Host Driver. Hence update
273	 * the channel state as CLOSING to aviod unnecessary interrupt
274	 * towards CP.
275	 */
276	channel->state = IMEM_CHANNEL_CLOSING;
277
278	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
279	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
280
281	ipc_imem_channel_free(channel);
282}
283
284/* Open a PORT link to CP and return the channel */
285struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
286					       int chl_id, int hp_id)
287{
288	struct ipc_mem_channel *channel;
289	int ch_id;
290
291	/* The PORT interface is only supported in the runtime phase. */
292	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
293		dev_err(ipc_imem->dev, "PORT open refused, phase %s",
294			ipc_imem_phase_get_string(ipc_imem->phase));
295		return NULL;
296	}
297
298	ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
299
300	if (ch_id < 0) {
301		dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
302		return NULL;
303	}
304
305	channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
306
307	if (!channel) {
308		dev_err(ipc_imem->dev, "PORT channel id open failed");
309		return NULL;
310	}
311
312	return channel;
313}
314
315/* transfer skb to modem */
316int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
317{
318	struct ipc_mem_channel *channel = ipc_cdev->channel;
319	struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
320	int ret = -EIO;
321
322	if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
323	    ipc_imem->phase == IPC_P_OFF_REQ)
324		goto out;
325
326	ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
327
328	if (ret)
329		goto out;
330
331	/* Add skb to the uplink skbuf accumulator. */
332	skb_queue_tail(&channel->ul_list, skb);
333
334	ret = ipc_imem_call_cdev_write(ipc_imem);
335
336	if (ret) {
337		skb_dequeue_tail(&channel->ul_list);
338		dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
339			ipc_cdev->channel->channel_id);
340	}
341out:
342	return ret;
343}
344
345/* Open a SIO link to CP and return the channel instance */
346struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem)
347{
348	struct ipc_mem_channel *channel;
349	enum ipc_phase phase;
350	int channel_id;
351
352	phase = ipc_imem_phase_update(ipc_imem);
353	switch (phase) {
354	case IPC_P_OFF:
355	case IPC_P_ROM:
356		/* Get a channel id as flash id and reserve it. */
357		channel_id = ipc_imem_channel_alloc(ipc_imem,
358						    IPC_MEM_CTRL_CHL_ID_7,
359						    IPC_CTYPE_CTRL);
360
361		if (channel_id < 0) {
362			dev_err(ipc_imem->dev,
363				"reservation of a flash channel id failed");
364			goto error;
365		}
366
367		ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id;
368		channel = &ipc_imem->channels[channel_id];
369
370		/* Enqueue chip info data to be read */
371		if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) {
372			dev_err(ipc_imem->dev, "Enqueue of chip info failed");
373			channel->state = IMEM_CHANNEL_FREE;
374			goto error;
375		}
376
377		return channel;
378
379	case IPC_P_PSI:
380	case IPC_P_EBL:
381		ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
382		if (ipc_imem->cp_version == -1) {
383			dev_err(ipc_imem->dev, "invalid CP version");
384			goto error;
385		}
386
387		channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id;
388		return ipc_imem_channel_open(ipc_imem, channel_id,
389					     IPC_HP_CDEV_OPEN);
390
391	default:
392		/* CP is in the wrong state (e.g. CRASH or CD_READY) */
393		dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase);
394	}
395error:
396	return NULL;
397}
398
399/* Release a SIO channel link to CP. */
400void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
401{
402	struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
403	int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT;
404	enum ipc_mem_exec_stage exec_stage;
405	struct ipc_mem_channel *channel;
406	int status = 0;
407	u32 tail = 0;
408
409	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
410	/* Increase the total wait time to boot_check_timeout */
411	do {
412		exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
413		if (exec_stage == IPC_MEM_EXEC_STAGE_RUN ||
414		    exec_stage == IPC_MEM_EXEC_STAGE_PSI)
415			break;
416		msleep(20);
417		boot_check_timeout -= 20;
418	} while (boot_check_timeout > 0);
419
420	/* If there are any pending TDs then wait for Timeout/Completion before
421	 * closing pipe.
422	 */
423	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
424		status = wait_for_completion_interruptible_timeout
425			(&ipc_imem->ul_pend_sem,
426			 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
427		if (status == 0) {
428			dev_dbg(ipc_imem->dev,
429				"Data Timeout on UL-Pipe:%d Head:%d Tail:%d",
430				channel->ul_pipe.pipe_nr,
431				channel->ul_pipe.old_head,
432				channel->ul_pipe.old_tail);
433		}
434	}
435
436	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
437					 &channel->dl_pipe, NULL, &tail);
438
439	if (tail != channel->dl_pipe.old_tail) {
440		status = wait_for_completion_interruptible_timeout
441			(&ipc_imem->dl_pend_sem,
442			 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
443		if (status == 0) {
444			dev_dbg(ipc_imem->dev,
445				"Data Timeout on DL-Pipe:%d Head:%d Tail:%d",
446				channel->dl_pipe.pipe_nr,
447				channel->dl_pipe.old_head,
448				channel->dl_pipe.old_tail);
449		}
450	}
451
452	/* Due to wait for completion in messages, there is a small window
453	 * between closing the pipe and updating the channel is closed. In this
454	 * small window there could be HP update from Host Driver. Hence update
455	 * the channel state as CLOSING to aviod unnecessary interrupt
456	 * towards CP.
457	 */
458	channel->state = IMEM_CHANNEL_CLOSING;
459	/* Release the pipe resources */
460	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
461	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
462	ipc_imem->nr_of_channels--;
463}
464
465void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
466				    struct sk_buff *skb)
467{
468	skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb);
469	complete(&ipc_devlink->devlink_sio.read_sem);
470}
471
472/* PSI transfer */
473static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem,
474				     struct ipc_mem_channel *channel,
475				     unsigned char *buf, int count)
476{
477	int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT;
478	enum ipc_mem_exec_stage exec_stage;
479
480	dma_addr_t mapping = 0;
481	int ret;
482
483	ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping,
484				DMA_TO_DEVICE);
485	if (ret)
486		goto pcie_addr_map_fail;
487
488	/* Save the PSI information for the CP ROM driver on the doorbell
489	 * scratchpad.
490	 */
491	ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count);
492	ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT);
493
494	ret = wait_for_completion_interruptible_timeout
495		(&channel->ul_sem,
496		 msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
497
498	if (ret <= 0) {
499		dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d",
500			ret);
501		goto psi_transfer_fail;
502	}
503	/* If the PSI download fails, return the CP boot ROM exit code */
504	if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT &&
505	    ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) {
506		ret = (-1) * ((int)ipc_imem->rom_exit_code);
507		goto psi_transfer_fail;
508	}
509
510	dev_dbg(ipc_imem->dev, "PSI image successfully downloaded");
511
512	/* Wait psi_start_timeout milliseconds until the CP PSI image is
513	 * running and updates the execution_stage field with
514	 * IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage.
515	 */
516	do {
517		exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
518
519		if (exec_stage == IPC_MEM_EXEC_STAGE_PSI)
520			break;
521
522		msleep(20);
523		psi_start_timeout -= 20;
524	} while (psi_start_timeout > 0);
525
526	if (exec_stage != IPC_MEM_EXEC_STAGE_PSI)
527		goto psi_transfer_fail; /* Unknown status of CP PSI process. */
528
529	ipc_imem->phase = IPC_P_PSI;
530
531	/* Enter the PSI phase. */
532	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage);
533
534	/* Request the RUNNING state from CP and wait until it was reached
535	 * or timeout.
536	 */
537	ipc_imem_ipc_init_check(ipc_imem);
538
539	ret = wait_for_completion_interruptible_timeout
540		(&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
541	if (ret <= 0) {
542		dev_err(ipc_imem->dev,
543			"Failed PSI RUNNING state on CP, Error-%d", ret);
544		goto psi_transfer_fail;
545	}
546
547	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) !=
548			IPC_MEM_DEVICE_IPC_RUNNING) {
549		dev_err(ipc_imem->dev,
550			"ch[%d] %s: unexpected CP IPC state %d, not RUNNING",
551			channel->channel_id,
552			ipc_imem_phase_get_string(ipc_imem->phase),
553			ipc_mmio_get_ipc_state(ipc_imem->mmio));
554
555		goto psi_transfer_fail;
556	}
557
558	/* Create the flash channel for the transfer of the images. */
559	if (!ipc_imem_sys_devlink_open(ipc_imem)) {
560		dev_err(ipc_imem->dev, "can't open flash_channel");
561		goto psi_transfer_fail;
562	}
563
564	ret = 0;
565psi_transfer_fail:
566	ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE);
567pcie_addr_map_fail:
568	return ret;
569}
570
571int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
572			       unsigned char *buf, int count)
573{
574	struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
575	struct ipc_mem_channel *channel;
576	struct sk_buff *skb;
577	dma_addr_t mapping;
578	int ret;
579
580	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
581
582	/* In the ROM phase the PSI image is passed to CP about a specific
583	 *  shared memory area and doorbell scratchpad directly.
584	 */
585	if (ipc_imem->phase == IPC_P_ROM) {
586		ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count);
587		/* If the PSI transfer fails then send crash
588		 * Signature.
589		 */
590		if (ret > 0)
591			ipc_imem_msg_send_feature_set(ipc_imem,
592						      IPC_MEM_INBAND_CRASH_SIG,
593						      false);
594		goto out;
595	}
596
597	/* Allocate skb memory for the uplink buffer. */
598	skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping,
599				 DMA_TO_DEVICE, 0);
600	if (!skb) {
601		ret = -ENOMEM;
602		goto out;
603	}
604
605	skb_put_data(skb, buf, count);
606
607	IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
608
609	/* Add skb to the uplink skbuf accumulator. */
610	skb_queue_tail(&channel->ul_list, skb);
611
612	/* Inform the IPC tasklet to pass uplink IP packets to CP. */
613	if (!ipc_imem_call_cdev_write(ipc_imem)) {
614		ret = wait_for_completion_interruptible(&channel->ul_sem);
615
616		if (ret < 0) {
617			dev_err(ipc_imem->dev,
618				"ch[%d] no CP confirmation, status = %d",
619				channel->channel_id, ret);
620			ipc_pcie_kfree_skb(ipc_devlink->pcie, skb);
621			goto out;
622		}
623	}
624	ret = 0;
625out:
626	return ret;
627}
628
629int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
630			      u32 bytes_to_read, u32 *bytes_read)
631{
632	struct sk_buff *skb = NULL;
633	int rc = 0;
634
635	/* check skb is available in rx_list or wait for skb */
636	devlink->devlink_sio.devlink_read_pend = 1;
637	while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) {
638		if (!wait_for_completion_interruptible_timeout
639				(&devlink->devlink_sio.read_sem,
640				 msecs_to_jiffies(IPC_READ_TIMEOUT))) {
641			dev_err(devlink->dev, "Read timedout");
642			rc =  -ETIMEDOUT;
643			goto devlink_read_fail;
644		}
645	}
646	devlink->devlink_sio.devlink_read_pend = 0;
647	if (bytes_to_read < skb->len) {
648		dev_err(devlink->dev, "Invalid size,expected len %d", skb->len);
649		rc = -EINVAL;
650		goto devlink_read_fail;
651	}
652	*bytes_read = skb->len;
653	memcpy(data, skb->data, skb->len);
654
655devlink_read_fail:
656	dev_kfree_skb(skb);
657	return rc;
658}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2020-21 Intel Corporation.
  4 */
  5
  6#include <linux/delay.h>
  7
  8#include "iosm_ipc_chnl_cfg.h"
  9#include "iosm_ipc_devlink.h"
 10#include "iosm_ipc_imem.h"
 11#include "iosm_ipc_imem_ops.h"
 12#include "iosm_ipc_port.h"
 13#include "iosm_ipc_task_queue.h"
 14
 15/* Open a packet data online channel between the network layer and CP. */
 16int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
 17{
 18	dev_dbg(ipc_imem->dev, "%s if id: %d",
 19		ipc_imem_phase_get_string(ipc_imem->phase), if_id);
 20
 21	/* The network interface is only supported in the runtime phase. */
 22	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
 23		dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
 24			ipc_imem_phase_get_string(ipc_imem->phase));
 25		return -EIO;
 26	}
 27
 28	return ipc_mux_open_session(ipc_imem->mux, if_id);
 29}
 30
 31/* Release a net link to CP. */
 32void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
 33			     int channel_id)
 34{
 35	if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
 36	    if_id <= IP_MUX_SESSION_END)
 37		ipc_mux_close_session(ipc_imem->mux, if_id);
 38}
 39
 40/* Tasklet call to do uplink transfer. */
 41static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
 42				  void *msg, size_t size)
 43{
 44	ipc_imem_ul_send(ipc_imem);
 45
 46	return 0;
 47}
 48
 49/* Through tasklet to do sio write. */
 50static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
 51{
 52	return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
 53					NULL, 0, false);
 54}
 55
 56/* Function for transfer UL data */
 57int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
 58			       int if_id, int channel_id, struct sk_buff *skb)
 59{
 60	int ret = -EINVAL;
 61
 62	if (!ipc_imem || channel_id < 0)
 63		goto out;
 64
 65	/* Is CP Running? */
 66	if (ipc_imem->phase != IPC_P_RUN) {
 67		dev_dbg(ipc_imem->dev, "phase %s transmit",
 68			ipc_imem_phase_get_string(ipc_imem->phase));
 69		ret = -EIO;
 70		goto out;
 71	}
 72
 73	/* Route the UL packet through IP MUX Layer */
 74	ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
 75out:
 76	return ret;
 77}
 78
 79/* Initialize wwan channel */
 80void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
 81				enum ipc_mux_protocol mux_type)
 82{
 83	struct ipc_chnl_cfg chnl_cfg = { 0 };
 84
 85	ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
 86
 87	/* If modem version is invalid (0xffffffff), do not initialize WWAN. */
 88	if (ipc_imem->cp_version == -1) {
 89		dev_err(ipc_imem->dev, "invalid CP version");
 90		return;
 91	}
 92
 93	ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
 94
 95	if (ipc_imem->mmio->mux_protocol == MUX_AGGREGATION &&
 96	    ipc_imem->nr_of_channels == IPC_MEM_IP_CHL_ID_0) {
 97		chnl_cfg.ul_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
 98		chnl_cfg.dl_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_DL;
 99		chnl_cfg.dl_buf_size = IPC_MEM_MAX_ADB_BUF_SIZE;
100	}
101
102	ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
103			      IRQ_MOD_OFF);
104
105	/* WWAN registration. */
106	ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
107	if (!ipc_imem->wwan)
108		dev_err(ipc_imem->dev,
109			"failed to register the ipc_wwan interfaces");
 
 
 
 
110}
111
112/* Map SKB to DMA for transfer */
113static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
114				   struct sk_buff *skb)
115{
116	struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
117	char *buf = skb->data;
118	int len = skb->len;
119	dma_addr_t mapping;
120	int ret;
121
122	ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
123
124	if (ret)
125		goto err;
126
127	BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
128
129	IPC_CB(skb)->mapping = mapping;
130	IPC_CB(skb)->direction = DMA_TO_DEVICE;
131	IPC_CB(skb)->len = len;
132	IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
133
134err:
135	return ret;
136}
137
138/* return true if channel is ready for use */
139static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
140				       struct ipc_mem_channel *channel)
141{
142	enum ipc_phase phase;
143
144	/* Update the current operation phase. */
145	phase = ipc_imem->phase;
146
147	/* Select the operation depending on the execution stage. */
148	switch (phase) {
149	case IPC_P_RUN:
150	case IPC_P_PSI:
151	case IPC_P_EBL:
152		break;
153
154	case IPC_P_ROM:
155		/* Prepare the PSI image for the CP ROM driver and
156		 * suspend the flash app.
157		 */
158		if (channel->state != IMEM_CHANNEL_RESERVED) {
159			dev_err(ipc_imem->dev,
160				"ch[%d]:invalid channel state %d,expected %d",
161				channel->channel_id, channel->state,
162				IMEM_CHANNEL_RESERVED);
163			goto channel_unavailable;
164		}
165		goto channel_available;
166
167	default:
168		/* Ignore uplink actions in all other phases. */
169		dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
170			channel->channel_id, phase);
171		goto channel_unavailable;
172	}
173	/* Check the full availability of the channel. */
174	if (channel->state != IMEM_CHANNEL_ACTIVE) {
175		dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
176			channel->channel_id, channel->state);
177		goto channel_unavailable;
178	}
179
180channel_available:
181	return true;
182
183channel_unavailable:
184	return false;
185}
186
187/**
188 * ipc_imem_sys_port_close - Release a sio link to CP.
189 * @ipc_imem:          Imem instance.
190 * @channel:           Channel instance.
191 */
192void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem,
193			     struct ipc_mem_channel *channel)
194{
195	enum ipc_phase curr_phase;
196	int status = 0;
197	u32 tail = 0;
198
199	curr_phase = ipc_imem->phase;
200
201	/* If current phase is IPC_P_OFF or SIO ID is -ve then
202	 * channel is already freed. Nothing to do.
203	 */
204	if (curr_phase == IPC_P_OFF) {
205		dev_err(ipc_imem->dev,
206			"nothing to do. Current Phase: %s",
207			ipc_imem_phase_get_string(curr_phase));
208		return;
209	}
210
211	if (channel->state == IMEM_CHANNEL_FREE) {
212		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
213			channel->channel_id, channel->state);
214		return;
215	}
216
217	/* If there are any pending TDs then wait for Timeout/Completion before
218	 * closing pipe.
219	 */
220	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
221		ipc_imem->app_notify_ul_pend = 1;
222
223		/* Suspend the user app and wait a certain time for processing
224		 * UL Data.
225		 */
226		status = wait_for_completion_interruptible_timeout
227			 (&ipc_imem->ul_pend_sem,
228			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
229		if (status == 0) {
230			dev_dbg(ipc_imem->dev,
231				"Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
232				channel->ul_pipe.pipe_nr,
233				channel->ul_pipe.old_head,
234				channel->ul_pipe.old_tail);
235		}
236
237		ipc_imem->app_notify_ul_pend = 0;
238	}
239
240	/* If there are any pending TDs then wait for Timeout/Completion before
241	 * closing pipe.
242	 */
243	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
244					 &channel->dl_pipe, NULL, &tail);
245
246	if (tail != channel->dl_pipe.old_tail) {
247		ipc_imem->app_notify_dl_pend = 1;
248
249		/* Suspend the user app and wait a certain time for processing
250		 * DL Data.
251		 */
252		status = wait_for_completion_interruptible_timeout
253			 (&ipc_imem->dl_pend_sem,
254			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
255		if (status == 0) {
256			dev_dbg(ipc_imem->dev,
257				"Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
258				channel->dl_pipe.pipe_nr,
259				channel->dl_pipe.old_head,
260				channel->dl_pipe.old_tail);
261		}
262
263		ipc_imem->app_notify_dl_pend = 0;
264	}
265
266	/* Due to wait for completion in messages, there is a small window
267	 * between closing the pipe and updating the channel is closed. In this
268	 * small window there could be HP update from Host Driver. Hence update
269	 * the channel state as CLOSING to aviod unnecessary interrupt
270	 * towards CP.
271	 */
272	channel->state = IMEM_CHANNEL_CLOSING;
273
274	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
275	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
276
277	ipc_imem_channel_free(channel);
278}
279
280/* Open a PORT link to CP and return the channel */
281struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
282					       int chl_id, int hp_id)
283{
284	struct ipc_mem_channel *channel;
285	int ch_id;
286
287	/* The PORT interface is only supported in the runtime phase. */
288	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
289		dev_err(ipc_imem->dev, "PORT open refused, phase %s",
290			ipc_imem_phase_get_string(ipc_imem->phase));
291		return NULL;
292	}
293
294	ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
295
296	if (ch_id < 0) {
297		dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
298		return NULL;
299	}
300
301	channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
302
303	if (!channel) {
304		dev_err(ipc_imem->dev, "PORT channel id open failed");
305		return NULL;
306	}
307
308	return channel;
309}
310
311/* transfer skb to modem */
312int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
313{
314	struct ipc_mem_channel *channel = ipc_cdev->channel;
315	struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
316	int ret = -EIO;
317
318	if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
319	    ipc_imem->phase == IPC_P_OFF_REQ)
320		goto out;
321
322	ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
323
324	if (ret)
325		goto out;
326
327	/* Add skb to the uplink skbuf accumulator. */
328	skb_queue_tail(&channel->ul_list, skb);
329
330	ret = ipc_imem_call_cdev_write(ipc_imem);
331
332	if (ret) {
333		skb_dequeue_tail(&channel->ul_list);
334		dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
335			ipc_cdev->channel->channel_id);
336	}
337out:
338	return ret;
339}
340
341/* Open a SIO link to CP and return the channel instance */
342struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem)
343{
344	struct ipc_mem_channel *channel;
345	enum ipc_phase phase;
346	int channel_id;
347
348	phase = ipc_imem_phase_update(ipc_imem);
349	switch (phase) {
350	case IPC_P_OFF:
351	case IPC_P_ROM:
352		/* Get a channel id as flash id and reserve it. */
353		channel_id = ipc_imem_channel_alloc(ipc_imem,
354						    IPC_MEM_CTRL_CHL_ID_7,
355						    IPC_CTYPE_CTRL);
356
357		if (channel_id < 0) {
358			dev_err(ipc_imem->dev,
359				"reservation of a flash channel id failed");
360			goto error;
361		}
362
363		ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id;
364		channel = &ipc_imem->channels[channel_id];
365
366		/* Enqueue chip info data to be read */
367		if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) {
368			dev_err(ipc_imem->dev, "Enqueue of chip info failed");
369			channel->state = IMEM_CHANNEL_FREE;
370			goto error;
371		}
372
373		return channel;
374
375	case IPC_P_PSI:
376	case IPC_P_EBL:
377		ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
378		if (ipc_imem->cp_version == -1) {
379			dev_err(ipc_imem->dev, "invalid CP version");
380			goto error;
381		}
382
383		channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id;
384		return ipc_imem_channel_open(ipc_imem, channel_id,
385					     IPC_HP_CDEV_OPEN);
386
387	default:
388		/* CP is in the wrong state (e.g. CRASH or CD_READY) */
389		dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase);
390	}
391error:
392	return NULL;
393}
394
395/* Release a SIO channel link to CP. */
396void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
397{
398	struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
399	int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT;
400	enum ipc_mem_exec_stage exec_stage;
401	struct ipc_mem_channel *channel;
402	int status = 0;
403	u32 tail = 0;
404
405	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
406	/* Increase the total wait time to boot_check_timeout */
407	do {
408		exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
409		if (exec_stage == IPC_MEM_EXEC_STAGE_RUN ||
410		    exec_stage == IPC_MEM_EXEC_STAGE_PSI)
411			break;
412		msleep(20);
413		boot_check_timeout -= 20;
414	} while (boot_check_timeout > 0);
415
416	/* If there are any pending TDs then wait for Timeout/Completion before
417	 * closing pipe.
418	 */
419	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
420		status = wait_for_completion_interruptible_timeout
421			(&ipc_imem->ul_pend_sem,
422			 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
423		if (status == 0) {
424			dev_dbg(ipc_imem->dev,
425				"Data Timeout on UL-Pipe:%d Head:%d Tail:%d",
426				channel->ul_pipe.pipe_nr,
427				channel->ul_pipe.old_head,
428				channel->ul_pipe.old_tail);
429		}
430	}
431
432	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
433					 &channel->dl_pipe, NULL, &tail);
434
435	if (tail != channel->dl_pipe.old_tail) {
436		status = wait_for_completion_interruptible_timeout
437			(&ipc_imem->dl_pend_sem,
438			 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
439		if (status == 0) {
440			dev_dbg(ipc_imem->dev,
441				"Data Timeout on DL-Pipe:%d Head:%d Tail:%d",
442				channel->dl_pipe.pipe_nr,
443				channel->dl_pipe.old_head,
444				channel->dl_pipe.old_tail);
445		}
446	}
447
448	/* Due to wait for completion in messages, there is a small window
449	 * between closing the pipe and updating the channel is closed. In this
450	 * small window there could be HP update from Host Driver. Hence update
451	 * the channel state as CLOSING to aviod unnecessary interrupt
452	 * towards CP.
453	 */
454	channel->state = IMEM_CHANNEL_CLOSING;
455	/* Release the pipe resources */
456	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
457	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
458	ipc_imem->nr_of_channels--;
459}
460
461void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
462				    struct sk_buff *skb)
463{
464	skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb);
465	complete(&ipc_devlink->devlink_sio.read_sem);
466}
467
468/* PSI transfer */
469static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem,
470				     struct ipc_mem_channel *channel,
471				     unsigned char *buf, int count)
472{
473	int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT;
474	enum ipc_mem_exec_stage exec_stage;
475
476	dma_addr_t mapping = 0;
477	int ret;
478
479	ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping,
480				DMA_TO_DEVICE);
481	if (ret)
482		goto pcie_addr_map_fail;
483
484	/* Save the PSI information for the CP ROM driver on the doorbell
485	 * scratchpad.
486	 */
487	ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count);
488	ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT);
489
490	ret = wait_for_completion_interruptible_timeout
491		(&channel->ul_sem,
492		 msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
493
494	if (ret <= 0) {
495		dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d",
496			ret);
497		goto psi_transfer_fail;
498	}
499	/* If the PSI download fails, return the CP boot ROM exit code */
500	if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT &&
501	    ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) {
502		ret = (-1) * ((int)ipc_imem->rom_exit_code);
503		goto psi_transfer_fail;
504	}
505
506	dev_dbg(ipc_imem->dev, "PSI image successfully downloaded");
507
508	/* Wait psi_start_timeout milliseconds until the CP PSI image is
509	 * running and updates the execution_stage field with
510	 * IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage.
511	 */
512	do {
513		exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
514
515		if (exec_stage == IPC_MEM_EXEC_STAGE_PSI)
516			break;
517
518		msleep(20);
519		psi_start_timeout -= 20;
520	} while (psi_start_timeout > 0);
521
522	if (exec_stage != IPC_MEM_EXEC_STAGE_PSI)
523		goto psi_transfer_fail; /* Unknown status of CP PSI process. */
524
525	ipc_imem->phase = IPC_P_PSI;
526
527	/* Enter the PSI phase. */
528	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage);
529
530	/* Request the RUNNING state from CP and wait until it was reached
531	 * or timeout.
532	 */
533	ipc_imem_ipc_init_check(ipc_imem);
534
535	ret = wait_for_completion_interruptible_timeout
536		(&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
537	if (ret <= 0) {
538		dev_err(ipc_imem->dev,
539			"Failed PSI RUNNING state on CP, Error-%d", ret);
540		goto psi_transfer_fail;
541	}
542
543	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) !=
544			IPC_MEM_DEVICE_IPC_RUNNING) {
545		dev_err(ipc_imem->dev,
546			"ch[%d] %s: unexpected CP IPC state %d, not RUNNING",
547			channel->channel_id,
548			ipc_imem_phase_get_string(ipc_imem->phase),
549			ipc_mmio_get_ipc_state(ipc_imem->mmio));
550
551		goto psi_transfer_fail;
552	}
553
554	/* Create the flash channel for the transfer of the images. */
555	if (!ipc_imem_sys_devlink_open(ipc_imem)) {
556		dev_err(ipc_imem->dev, "can't open flash_channel");
557		goto psi_transfer_fail;
558	}
559
560	ret = 0;
561psi_transfer_fail:
562	ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE);
563pcie_addr_map_fail:
564	return ret;
565}
566
567int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
568			       unsigned char *buf, int count)
569{
570	struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
571	struct ipc_mem_channel *channel;
572	struct sk_buff *skb;
573	dma_addr_t mapping;
574	int ret;
575
576	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
577
578	/* In the ROM phase the PSI image is passed to CP about a specific
579	 *  shared memory area and doorbell scratchpad directly.
580	 */
581	if (ipc_imem->phase == IPC_P_ROM) {
582		ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count);
583		/* If the PSI transfer fails then send crash
584		 * Signature.
585		 */
586		if (ret > 0)
587			ipc_imem_msg_send_feature_set(ipc_imem,
588						      IPC_MEM_INBAND_CRASH_SIG,
589						      false);
590		goto out;
591	}
592
593	/* Allocate skb memory for the uplink buffer. */
594	skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping,
595				 DMA_TO_DEVICE, 0);
596	if (!skb) {
597		ret = -ENOMEM;
598		goto out;
599	}
600
601	skb_put_data(skb, buf, count);
602
603	IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
604
605	/* Add skb to the uplink skbuf accumulator. */
606	skb_queue_tail(&channel->ul_list, skb);
607
608	/* Inform the IPC tasklet to pass uplink IP packets to CP. */
609	if (!ipc_imem_call_cdev_write(ipc_imem)) {
610		ret = wait_for_completion_interruptible(&channel->ul_sem);
611
612		if (ret < 0) {
613			dev_err(ipc_imem->dev,
614				"ch[%d] no CP confirmation, status = %d",
615				channel->channel_id, ret);
616			ipc_pcie_kfree_skb(ipc_devlink->pcie, skb);
617			goto out;
618		}
619	}
620	ret = 0;
621out:
622	return ret;
623}
624
625int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
626			      u32 bytes_to_read, u32 *bytes_read)
627{
628	struct sk_buff *skb = NULL;
629	int rc = 0;
630
631	/* check skb is available in rx_list or wait for skb */
632	devlink->devlink_sio.devlink_read_pend = 1;
633	while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) {
634		if (!wait_for_completion_interruptible_timeout
635				(&devlink->devlink_sio.read_sem,
636				 msecs_to_jiffies(IPC_READ_TIMEOUT))) {
637			dev_err(devlink->dev, "Read timedout");
638			rc =  -ETIMEDOUT;
639			goto devlink_read_fail;
640		}
641	}
642	devlink->devlink_sio.devlink_read_pend = 0;
643	if (bytes_to_read < skb->len) {
644		dev_err(devlink->dev, "Invalid size,expected len %d", skb->len);
645		rc = -EINVAL;
646		goto devlink_read_fail;
647	}
648	*bytes_read = skb->len;
649	memcpy(data, skb->data, skb->len);
650
651devlink_read_fail:
652	dev_kfree_skb(skb);
653	return rc;
654}