Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * sst_ipc.c - Intel SST Driver for audio engine
4 *
5 * Copyright (C) 2008-14 Intel Corporation
6 * Authors: Vinod Koul <vinod.koul@intel.com>
7 * Harsha Priya <priya.harsha@intel.com>
8 * Dharageswari R <dharageswari.r@intel.com>
9 * KP Jeeja <jeeja.kp@intel.com>
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13 */
14#include <linux/pci.h>
15#include <linux/firmware.h>
16#include <linux/sched.h>
17#include <linux/delay.h>
18#include <sound/core.h>
19#include <sound/pcm.h>
20#include <sound/soc.h>
21#include <sound/compress_driver.h>
22
23#include <asm/platform_sst_audio.h>
24
25#include "../sst-mfld-platform.h"
26#include "sst.h"
27
28struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
29 u32 msg_id, u32 drv_id)
30{
31 struct sst_block *msg;
32
33 dev_dbg(ctx->dev, "Enter\n");
34 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
35 if (!msg)
36 return NULL;
37 msg->condition = false;
38 msg->on = true;
39 msg->msg_id = msg_id;
40 msg->drv_id = drv_id;
41 spin_lock_bh(&ctx->block_lock);
42 list_add_tail(&msg->node, &ctx->block_list);
43 spin_unlock_bh(&ctx->block_lock);
44
45 return msg;
46}
47
48/*
49 * while handling the interrupts, we need to check for message status and
50 * then if we are blocking for a message
51 *
52 * here we are unblocking the blocked ones, this is based on id we have
53 * passed and search that for block threads.
54 * We will not find block in two cases
55 * a) when its small message and block in not there, so silently ignore
56 * them
57 * b) when we are actually not able to find the block (bug perhaps)
58 *
59 * Since we have bit of small messages we can spam kernel log with err
60 * print on above so need to keep as debug prints which should be enabled
61 * via dynamic debug while debugging IPC issues
62 */
63int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
64 u32 drv_id, u32 ipc, void *data, u32 size)
65{
66 struct sst_block *block;
67
68 dev_dbg(ctx->dev, "Enter\n");
69
70 spin_lock_bh(&ctx->block_lock);
71 list_for_each_entry(block, &ctx->block_list, node) {
72 dev_dbg(ctx->dev, "Block ipc %d, drv_id %d\n", block->msg_id,
73 block->drv_id);
74 if (block->msg_id == ipc && block->drv_id == drv_id) {
75 dev_dbg(ctx->dev, "free up the block\n");
76 block->ret_code = result;
77 block->data = data;
78 block->size = size;
79 block->condition = true;
80 spin_unlock_bh(&ctx->block_lock);
81 wake_up(&ctx->wait_queue);
82 return 0;
83 }
84 }
85 spin_unlock_bh(&ctx->block_lock);
86 dev_dbg(ctx->dev,
87 "Block not found or a response received for a short msg for ipc %d, drv_id %d\n",
88 ipc, drv_id);
89 return -EINVAL;
90}
91
92int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed)
93{
94 struct sst_block *block, *__block;
95
96 dev_dbg(ctx->dev, "Enter\n");
97 spin_lock_bh(&ctx->block_lock);
98 list_for_each_entry_safe(block, __block, &ctx->block_list, node) {
99 if (block == freed) {
100 pr_debug("pvt_id freed --> %d\n", freed->drv_id);
101 /* toggle the index position of pvt_id */
102 list_del(&freed->node);
103 spin_unlock_bh(&ctx->block_lock);
104 kfree(freed->data);
105 freed->data = NULL;
106 kfree(freed);
107 return 0;
108 }
109 }
110 spin_unlock_bh(&ctx->block_lock);
111 dev_err(ctx->dev, "block is already freed!!!\n");
112 return -EINVAL;
113}
114
115int sst_post_message_mrfld(struct intel_sst_drv *sst_drv_ctx,
116 struct ipc_post *ipc_msg, bool sync)
117{
118 struct ipc_post *msg = ipc_msg;
119 union ipc_header_mrfld header;
120 unsigned int loop_count = 0;
121 int retval = 0;
122 unsigned long irq_flags;
123
124 dev_dbg(sst_drv_ctx->dev, "Enter: sync: %d\n", sync);
125 spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
126 header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
127 if (sync) {
128 while (header.p.header_high.part.busy) {
129 if (loop_count > 25) {
130 dev_err(sst_drv_ctx->dev,
131 "sst: Busy wait failed, can't send this msg\n");
132 retval = -EBUSY;
133 goto out;
134 }
135 cpu_relax();
136 loop_count++;
137 header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
138 }
139 } else {
140 if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
141 /* queue is empty, nothing to send */
142 spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
143 dev_dbg(sst_drv_ctx->dev,
144 "Empty msg queue... NO Action\n");
145 return 0;
146 }
147
148 if (header.p.header_high.part.busy) {
149 spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
150 dev_dbg(sst_drv_ctx->dev, "Busy not free... post later\n");
151 return 0;
152 }
153
154 /* copy msg from list */
155 msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
156 struct ipc_post, node);
157 list_del(&msg->node);
158 }
159 dev_dbg(sst_drv_ctx->dev, "sst: Post message: header = %x\n",
160 msg->mrfld_header.p.header_high.full);
161 dev_dbg(sst_drv_ctx->dev, "sst: size = 0x%x\n",
162 msg->mrfld_header.p.header_low_payload);
163
164 if (msg->mrfld_header.p.header_high.part.large)
165 memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
166 msg->mailbox_data,
167 msg->mrfld_header.p.header_low_payload);
168
169 sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full);
170
171out:
172 spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
173 kfree(msg->mailbox_data);
174 kfree(msg);
175 return retval;
176}
177
178void intel_sst_clear_intr_mrfld(struct intel_sst_drv *sst_drv_ctx)
179{
180 union interrupt_reg_mrfld isr;
181 union interrupt_reg_mrfld imr;
182 union ipc_header_mrfld clear_ipc;
183 unsigned long irq_flags;
184
185 spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
186 imr.full = sst_shim_read64(sst_drv_ctx->shim, SST_IMRX);
187 isr.full = sst_shim_read64(sst_drv_ctx->shim, SST_ISRX);
188
189 /* write 1 to clear*/
190 isr.part.busy_interrupt = 1;
191 sst_shim_write64(sst_drv_ctx->shim, SST_ISRX, isr.full);
192
193 /* Set IA done bit */
194 clear_ipc.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCD);
195
196 clear_ipc.p.header_high.part.busy = 0;
197 clear_ipc.p.header_high.part.done = 1;
198 clear_ipc.p.header_low_payload = IPC_ACK_SUCCESS;
199 sst_shim_write64(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full);
200 /* un mask busy interrupt */
201 imr.part.busy_interrupt = 0;
202 sst_shim_write64(sst_drv_ctx->shim, SST_IMRX, imr.full);
203 spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
204}
205
206
207/*
208 * process_fw_init - process the FW init msg
209 *
210 * @msg: IPC message mailbox data from FW
211 *
212 * This function processes the FW init msg from FW
213 * marks FW state and prints debug info of loaded FW
214 */
215static void process_fw_init(struct intel_sst_drv *sst_drv_ctx,
216 void *msg)
217{
218 struct ipc_header_fw_init *init =
219 (struct ipc_header_fw_init *)msg;
220 int retval = 0;
221
222 dev_dbg(sst_drv_ctx->dev, "*** FW Init msg came***\n");
223 if (init->result) {
224 sst_set_fw_state_locked(sst_drv_ctx, SST_RESET);
225 dev_err(sst_drv_ctx->dev, "FW Init failed, Error %x\n",
226 init->result);
227 retval = init->result;
228 goto ret;
229 }
230 if (memcmp(&sst_drv_ctx->fw_version, &init->fw_version,
231 sizeof(init->fw_version)))
232 dev_info(sst_drv_ctx->dev, "FW Version %02x.%02x.%02x.%02x\n",
233 init->fw_version.type, init->fw_version.major,
234 init->fw_version.minor, init->fw_version.build);
235 dev_dbg(sst_drv_ctx->dev, "Build date %s Time %s\n",
236 init->build_info.date, init->build_info.time);
237
238 /* Save FW version */
239 sst_drv_ctx->fw_version.type = init->fw_version.type;
240 sst_drv_ctx->fw_version.major = init->fw_version.major;
241 sst_drv_ctx->fw_version.minor = init->fw_version.minor;
242 sst_drv_ctx->fw_version.build = init->fw_version.build;
243
244ret:
245 sst_wake_up_block(sst_drv_ctx, retval, FW_DWNL_ID, 0 , NULL, 0);
246}
247
248static void process_fw_async_msg(struct intel_sst_drv *sst_drv_ctx,
249 struct ipc_post *msg)
250{
251 u32 msg_id;
252 int str_id;
253 u32 data_size, i;
254 void *data_offset;
255 struct stream_info *stream;
256 u32 msg_low, pipe_id;
257
258 msg_low = msg->mrfld_header.p.header_low_payload;
259 msg_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->cmd_id;
260 data_offset = (msg->mailbox_data + sizeof(struct ipc_dsp_hdr));
261 data_size = msg_low - (sizeof(struct ipc_dsp_hdr));
262
263 switch (msg_id) {
264 case IPC_SST_PERIOD_ELAPSED_MRFLD:
265 pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
266 str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
267 if (str_id > 0) {
268 dev_dbg(sst_drv_ctx->dev,
269 "Period elapsed rcvd for pipe id 0x%x\n",
270 pipe_id);
271 stream = &sst_drv_ctx->streams[str_id];
272 /* If stream is dropped, skip processing this message*/
273 if (stream->status == STREAM_INIT)
274 break;
275 if (stream->period_elapsed)
276 stream->period_elapsed(stream->pcm_substream);
277 if (stream->compr_cb)
278 stream->compr_cb(stream->compr_cb_param);
279 }
280 break;
281
282 case IPC_IA_DRAIN_STREAM_MRFLD:
283 pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
284 str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
285 if (str_id > 0) {
286 stream = &sst_drv_ctx->streams[str_id];
287 if (stream->drain_notify)
288 stream->drain_notify(stream->drain_cb_param);
289 }
290 break;
291
292 case IPC_IA_FW_ASYNC_ERR_MRFLD:
293 dev_err(sst_drv_ctx->dev, "FW sent async error msg:\n");
294 for (i = 0; i < (data_size/4); i++)
295 print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE,
296 16, 4, data_offset, data_size, false);
297 break;
298
299 case IPC_IA_FW_INIT_CMPLT_MRFLD:
300 process_fw_init(sst_drv_ctx, data_offset);
301 break;
302
303 case IPC_IA_BUF_UNDER_RUN_MRFLD:
304 pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
305 str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
306 if (str_id > 0)
307 dev_err(sst_drv_ctx->dev,
308 "Buffer under-run for pipe:%#x str_id:%d\n",
309 pipe_id, str_id);
310 break;
311
312 default:
313 dev_err(sst_drv_ctx->dev,
314 "Unrecognized async msg from FW msg_id %#x\n", msg_id);
315 }
316}
317
318void sst_process_reply_mrfld(struct intel_sst_drv *sst_drv_ctx,
319 struct ipc_post *msg)
320{
321 unsigned int drv_id;
322 void *data;
323 union ipc_header_high msg_high;
324 u32 msg_low;
325 struct ipc_dsp_hdr *dsp_hdr;
326
327 msg_high = msg->mrfld_header.p.header_high;
328 msg_low = msg->mrfld_header.p.header_low_payload;
329
330 dev_dbg(sst_drv_ctx->dev, "IPC process message header %x payload %x\n",
331 msg->mrfld_header.p.header_high.full,
332 msg->mrfld_header.p.header_low_payload);
333
334 drv_id = msg_high.part.drv_id;
335
336 /* Check for async messages first */
337 if (drv_id == SST_ASYNC_DRV_ID) {
338 /*FW sent async large message*/
339 process_fw_async_msg(sst_drv_ctx, msg);
340 return;
341 }
342
343 /* FW sent short error response for an IPC */
344 if (msg_high.part.result && !msg_high.part.large) {
345 /* 32-bit FW error code in msg_low */
346 dev_err(sst_drv_ctx->dev, "FW sent error response 0x%x", msg_low);
347 sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
348 msg_high.part.drv_id,
349 msg_high.part.msg_id, NULL, 0);
350 return;
351 }
352
353 /*
354 * Process all valid responses
355 * if it is a large message, the payload contains the size to
356 * copy from mailbox
357 **/
358 if (msg_high.part.large) {
359 data = kmemdup((void *)msg->mailbox_data, msg_low, GFP_KERNEL);
360 if (!data)
361 return;
362 /* Copy command id so that we can use to put sst to reset */
363 dsp_hdr = (struct ipc_dsp_hdr *)data;
364 dev_dbg(sst_drv_ctx->dev, "cmd_id %d\n", dsp_hdr->cmd_id);
365 if (sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
366 msg_high.part.drv_id,
367 msg_high.part.msg_id, data, msg_low))
368 kfree(data);
369 } else {
370 sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
371 msg_high.part.drv_id,
372 msg_high.part.msg_id, NULL, 0);
373 }
374
375}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * sst_ipc.c - Intel SST Driver for audio engine
4 *
5 * Copyright (C) 2008-14 Intel Corporation
6 * Authors: Vinod Koul <vinod.koul@intel.com>
7 * Harsha Priya <priya.harsha@intel.com>
8 * Dharageswari R <dharageswari.r@intel.com>
9 * KP Jeeja <jeeja.kp@intel.com>
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13 */
14#include <linux/pci.h>
15#include <linux/firmware.h>
16#include <linux/sched.h>
17#include <linux/delay.h>
18#include <sound/core.h>
19#include <sound/pcm.h>
20#include <sound/soc.h>
21#include <sound/compress_driver.h>
22#include <asm/intel-mid.h>
23#include <asm/platform_sst_audio.h>
24#include "../sst-mfld-platform.h"
25#include "sst.h"
26
27struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
28 u32 msg_id, u32 drv_id)
29{
30 struct sst_block *msg;
31
32 dev_dbg(ctx->dev, "Enter\n");
33 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
34 if (!msg)
35 return NULL;
36 msg->condition = false;
37 msg->on = true;
38 msg->msg_id = msg_id;
39 msg->drv_id = drv_id;
40 spin_lock_bh(&ctx->block_lock);
41 list_add_tail(&msg->node, &ctx->block_list);
42 spin_unlock_bh(&ctx->block_lock);
43
44 return msg;
45}
46
47/*
48 * while handling the interrupts, we need to check for message status and
49 * then if we are blocking for a message
50 *
51 * here we are unblocking the blocked ones, this is based on id we have
52 * passed and search that for block threads.
53 * We will not find block in two cases
54 * a) when its small message and block in not there, so silently ignore
55 * them
56 * b) when we are actually not able to find the block (bug perhaps)
57 *
58 * Since we have bit of small messages we can spam kernel log with err
59 * print on above so need to keep as debug prints which should be enabled
60 * via dynamic debug while debugging IPC issues
61 */
62int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
63 u32 drv_id, u32 ipc, void *data, u32 size)
64{
65 struct sst_block *block;
66
67 dev_dbg(ctx->dev, "Enter\n");
68
69 spin_lock_bh(&ctx->block_lock);
70 list_for_each_entry(block, &ctx->block_list, node) {
71 dev_dbg(ctx->dev, "Block ipc %d, drv_id %d\n", block->msg_id,
72 block->drv_id);
73 if (block->msg_id == ipc && block->drv_id == drv_id) {
74 dev_dbg(ctx->dev, "free up the block\n");
75 block->ret_code = result;
76 block->data = data;
77 block->size = size;
78 block->condition = true;
79 spin_unlock_bh(&ctx->block_lock);
80 wake_up(&ctx->wait_queue);
81 return 0;
82 }
83 }
84 spin_unlock_bh(&ctx->block_lock);
85 dev_dbg(ctx->dev,
86 "Block not found or a response received for a short msg for ipc %d, drv_id %d\n",
87 ipc, drv_id);
88 return -EINVAL;
89}
90
91int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed)
92{
93 struct sst_block *block, *__block;
94
95 dev_dbg(ctx->dev, "Enter\n");
96 spin_lock_bh(&ctx->block_lock);
97 list_for_each_entry_safe(block, __block, &ctx->block_list, node) {
98 if (block == freed) {
99 pr_debug("pvt_id freed --> %d\n", freed->drv_id);
100 /* toggle the index position of pvt_id */
101 list_del(&freed->node);
102 spin_unlock_bh(&ctx->block_lock);
103 kfree(freed->data);
104 freed->data = NULL;
105 kfree(freed);
106 return 0;
107 }
108 }
109 spin_unlock_bh(&ctx->block_lock);
110 dev_err(ctx->dev, "block is already freed!!!\n");
111 return -EINVAL;
112}
113
114int sst_post_message_mrfld(struct intel_sst_drv *sst_drv_ctx,
115 struct ipc_post *ipc_msg, bool sync)
116{
117 struct ipc_post *msg = ipc_msg;
118 union ipc_header_mrfld header;
119 unsigned int loop_count = 0;
120 int retval = 0;
121 unsigned long irq_flags;
122
123 dev_dbg(sst_drv_ctx->dev, "Enter: sync: %d\n", sync);
124 spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
125 header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
126 if (sync) {
127 while (header.p.header_high.part.busy) {
128 if (loop_count > 25) {
129 dev_err(sst_drv_ctx->dev,
130 "sst: Busy wait failed, can't send this msg\n");
131 retval = -EBUSY;
132 goto out;
133 }
134 cpu_relax();
135 loop_count++;
136 header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
137 }
138 } else {
139 if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
140 /* queue is empty, nothing to send */
141 spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
142 dev_dbg(sst_drv_ctx->dev,
143 "Empty msg queue... NO Action\n");
144 return 0;
145 }
146
147 if (header.p.header_high.part.busy) {
148 spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
149 dev_dbg(sst_drv_ctx->dev, "Busy not free... post later\n");
150 return 0;
151 }
152
153 /* copy msg from list */
154 msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
155 struct ipc_post, node);
156 list_del(&msg->node);
157 }
158 dev_dbg(sst_drv_ctx->dev, "sst: Post message: header = %x\n",
159 msg->mrfld_header.p.header_high.full);
160 dev_dbg(sst_drv_ctx->dev, "sst: size = 0x%x\n",
161 msg->mrfld_header.p.header_low_payload);
162
163 if (msg->mrfld_header.p.header_high.part.large)
164 memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
165 msg->mailbox_data,
166 msg->mrfld_header.p.header_low_payload);
167
168 sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full);
169
170out:
171 spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
172 kfree(msg->mailbox_data);
173 kfree(msg);
174 return retval;
175}
176
177void intel_sst_clear_intr_mrfld(struct intel_sst_drv *sst_drv_ctx)
178{
179 union interrupt_reg_mrfld isr;
180 union interrupt_reg_mrfld imr;
181 union ipc_header_mrfld clear_ipc;
182 unsigned long irq_flags;
183
184 spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
185 imr.full = sst_shim_read64(sst_drv_ctx->shim, SST_IMRX);
186 isr.full = sst_shim_read64(sst_drv_ctx->shim, SST_ISRX);
187
188 /* write 1 to clear*/
189 isr.part.busy_interrupt = 1;
190 sst_shim_write64(sst_drv_ctx->shim, SST_ISRX, isr.full);
191
192 /* Set IA done bit */
193 clear_ipc.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCD);
194
195 clear_ipc.p.header_high.part.busy = 0;
196 clear_ipc.p.header_high.part.done = 1;
197 clear_ipc.p.header_low_payload = IPC_ACK_SUCCESS;
198 sst_shim_write64(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full);
199 /* un mask busy interrupt */
200 imr.part.busy_interrupt = 0;
201 sst_shim_write64(sst_drv_ctx->shim, SST_IMRX, imr.full);
202 spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
203}
204
205
206/*
207 * process_fw_init - process the FW init msg
208 *
209 * @msg: IPC message mailbox data from FW
210 *
211 * This function processes the FW init msg from FW
212 * marks FW state and prints debug info of loaded FW
213 */
214static void process_fw_init(struct intel_sst_drv *sst_drv_ctx,
215 void *msg)
216{
217 struct ipc_header_fw_init *init =
218 (struct ipc_header_fw_init *)msg;
219 int retval = 0;
220
221 dev_dbg(sst_drv_ctx->dev, "*** FW Init msg came***\n");
222 if (init->result) {
223 sst_set_fw_state_locked(sst_drv_ctx, SST_RESET);
224 dev_err(sst_drv_ctx->dev, "FW Init failed, Error %x\n",
225 init->result);
226 retval = init->result;
227 goto ret;
228 }
229 if (memcmp(&sst_drv_ctx->fw_version, &init->fw_version,
230 sizeof(init->fw_version)))
231 dev_info(sst_drv_ctx->dev, "FW Version %02x.%02x.%02x.%02x\n",
232 init->fw_version.type, init->fw_version.major,
233 init->fw_version.minor, init->fw_version.build);
234 dev_dbg(sst_drv_ctx->dev, "Build date %s Time %s\n",
235 init->build_info.date, init->build_info.time);
236
237 /* Save FW version */
238 sst_drv_ctx->fw_version.type = init->fw_version.type;
239 sst_drv_ctx->fw_version.major = init->fw_version.major;
240 sst_drv_ctx->fw_version.minor = init->fw_version.minor;
241 sst_drv_ctx->fw_version.build = init->fw_version.build;
242
243ret:
244 sst_wake_up_block(sst_drv_ctx, retval, FW_DWNL_ID, 0 , NULL, 0);
245}
246
247static void process_fw_async_msg(struct intel_sst_drv *sst_drv_ctx,
248 struct ipc_post *msg)
249{
250 u32 msg_id;
251 int str_id;
252 u32 data_size, i;
253 void *data_offset;
254 struct stream_info *stream;
255 u32 msg_low, pipe_id;
256
257 msg_low = msg->mrfld_header.p.header_low_payload;
258 msg_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->cmd_id;
259 data_offset = (msg->mailbox_data + sizeof(struct ipc_dsp_hdr));
260 data_size = msg_low - (sizeof(struct ipc_dsp_hdr));
261
262 switch (msg_id) {
263 case IPC_SST_PERIOD_ELAPSED_MRFLD:
264 pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
265 str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
266 if (str_id > 0) {
267 dev_dbg(sst_drv_ctx->dev,
268 "Period elapsed rcvd for pipe id 0x%x\n",
269 pipe_id);
270 stream = &sst_drv_ctx->streams[str_id];
271 /* If stream is dropped, skip processing this message*/
272 if (stream->status == STREAM_INIT)
273 break;
274 if (stream->period_elapsed)
275 stream->period_elapsed(stream->pcm_substream);
276 if (stream->compr_cb)
277 stream->compr_cb(stream->compr_cb_param);
278 }
279 break;
280
281 case IPC_IA_DRAIN_STREAM_MRFLD:
282 pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
283 str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
284 if (str_id > 0) {
285 stream = &sst_drv_ctx->streams[str_id];
286 if (stream->drain_notify)
287 stream->drain_notify(stream->drain_cb_param);
288 }
289 break;
290
291 case IPC_IA_FW_ASYNC_ERR_MRFLD:
292 dev_err(sst_drv_ctx->dev, "FW sent async error msg:\n");
293 for (i = 0; i < (data_size/4); i++)
294 print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE,
295 16, 4, data_offset, data_size, false);
296 break;
297
298 case IPC_IA_FW_INIT_CMPLT_MRFLD:
299 process_fw_init(sst_drv_ctx, data_offset);
300 break;
301
302 case IPC_IA_BUF_UNDER_RUN_MRFLD:
303 pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
304 str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
305 if (str_id > 0)
306 dev_err(sst_drv_ctx->dev,
307 "Buffer under-run for pipe:%#x str_id:%d\n",
308 pipe_id, str_id);
309 break;
310
311 default:
312 dev_err(sst_drv_ctx->dev,
313 "Unrecognized async msg from FW msg_id %#x\n", msg_id);
314 }
315}
316
317void sst_process_reply_mrfld(struct intel_sst_drv *sst_drv_ctx,
318 struct ipc_post *msg)
319{
320 unsigned int drv_id;
321 void *data;
322 union ipc_header_high msg_high;
323 u32 msg_low;
324 struct ipc_dsp_hdr *dsp_hdr;
325
326 msg_high = msg->mrfld_header.p.header_high;
327 msg_low = msg->mrfld_header.p.header_low_payload;
328
329 dev_dbg(sst_drv_ctx->dev, "IPC process message header %x payload %x\n",
330 msg->mrfld_header.p.header_high.full,
331 msg->mrfld_header.p.header_low_payload);
332
333 drv_id = msg_high.part.drv_id;
334
335 /* Check for async messages first */
336 if (drv_id == SST_ASYNC_DRV_ID) {
337 /*FW sent async large message*/
338 process_fw_async_msg(sst_drv_ctx, msg);
339 return;
340 }
341
342 /* FW sent short error response for an IPC */
343 if (msg_high.part.result && !msg_high.part.large) {
344 /* 32-bit FW error code in msg_low */
345 dev_err(sst_drv_ctx->dev, "FW sent error response 0x%x", msg_low);
346 sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
347 msg_high.part.drv_id,
348 msg_high.part.msg_id, NULL, 0);
349 return;
350 }
351
352 /*
353 * Process all valid responses
354 * if it is a large message, the payload contains the size to
355 * copy from mailbox
356 **/
357 if (msg_high.part.large) {
358 data = kmemdup((void *)msg->mailbox_data, msg_low, GFP_KERNEL);
359 if (!data)
360 return;
361 /* Copy command id so that we can use to put sst to reset */
362 dsp_hdr = (struct ipc_dsp_hdr *)data;
363 dev_dbg(sst_drv_ctx->dev, "cmd_id %d\n", dsp_hdr->cmd_id);
364 if (sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
365 msg_high.part.drv_id,
366 msg_high.part.msg_id, data, msg_low))
367 kfree(data);
368 } else {
369 sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
370 msg_high.part.drv_id,
371 msg_high.part.msg_id, NULL, 0);
372 }
373
374}