Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: ISC
  2/*
  3 * Copyright (c) 2005-2011 Atheros Communications Inc.
  4 * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
  5 */
  6
  7#include "bmi.h"
  8#include "hif.h"
  9#include "debug.h"
 10#include "htc.h"
 11#include "hw.h"
 12
 13void ath10k_bmi_start(struct ath10k *ar)
 14{
 15	int ret;
 16
 17	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
 18
 19	ar->bmi.done_sent = false;
 20
 21	/* Enable hardware clock to speed up firmware download */
 22	if (ar->hw_params.hw_ops->enable_pll_clk) {
 23		ret = ar->hw_params.hw_ops->enable_pll_clk(ar);
 24		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi enable pll ret %d\n", ret);
 25	}
 26}
 27
 28int ath10k_bmi_done(struct ath10k *ar)
 29{
 30	struct bmi_cmd cmd;
 31	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
 32	int ret;
 33
 34	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
 35
 36	if (ar->bmi.done_sent) {
 37		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
 38		return 0;
 39	}
 40
 41	ar->bmi.done_sent = true;
 42	cmd.id = __cpu_to_le32(BMI_DONE);
 43
 44	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
 45	if (ret) {
 46		ath10k_warn(ar, "unable to write to the device: %d\n", ret);
 47		return ret;
 48	}
 49
 50	return 0;
 51}
 52
 53int ath10k_bmi_get_target_info(struct ath10k *ar,
 54			       struct bmi_target_info *target_info)
 55{
 56	struct bmi_cmd cmd;
 57	union bmi_resp resp;
 58	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
 59	u32 resplen = sizeof(resp.get_target_info);
 60	int ret;
 61
 62	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
 63
 64	if (ar->bmi.done_sent) {
 65		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
 66		return -EBUSY;
 67	}
 68
 69	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
 70
 71	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
 72	if (ret) {
 73		ath10k_warn(ar, "unable to get target info from device\n");
 74		return ret;
 75	}
 76
 77	if (resplen < sizeof(resp.get_target_info)) {
 78		ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
 79			    resplen);
 80		return -EIO;
 81	}
 82
 83	target_info->version = __le32_to_cpu(resp.get_target_info.version);
 84	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
 85
 86	return 0;
 87}
 88
 89#define TARGET_VERSION_SENTINAL 0xffffffffu
 90
 91int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
 92				    struct bmi_target_info *target_info)
 93{
 94	struct bmi_cmd cmd;
 95	union bmi_resp resp;
 96	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
 97	u32 resplen, ver_len;
 98	__le32 tmp;
 99	int ret;
100
101	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
102
103	if (ar->bmi.done_sent) {
104		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
105		return -EBUSY;
106	}
107
108	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
109
110	/* Step 1: Read 4 bytes of the target info and check if it is
111	 * the special sentinal version word or the first word in the
112	 * version response.
113	 */
114	resplen = sizeof(u32);
115	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
116	if (ret) {
117		ath10k_warn(ar, "unable to read from device\n");
118		return ret;
119	}
120
121	/* Some SDIO boards have a special sentinal byte before the real
122	 * version response.
123	 */
124	if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
125		/* Step 1b: Read the version length */
126		resplen = sizeof(u32);
127		ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
128						  &resplen);
129		if (ret) {
130			ath10k_warn(ar, "unable to read from device\n");
131			return ret;
132		}
133	}
134
135	ver_len = __le32_to_cpu(tmp);
136
137	/* Step 2: Check the target info length */
138	if (ver_len != sizeof(resp.get_target_info)) {
139		ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
140			    ver_len, sizeof(resp.get_target_info));
141		return -EINVAL;
142	}
143
144	/* Step 3: Read the rest of the version response */
145	resplen = sizeof(resp.get_target_info) - sizeof(u32);
146	ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
147					  &resp.get_target_info.version,
148					  &resplen);
149	if (ret) {
150		ath10k_warn(ar, "unable to read from device\n");
151		return ret;
152	}
153
154	target_info->version = __le32_to_cpu(resp.get_target_info.version);
155	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
156
157	return 0;
158}
159
160int ath10k_bmi_read_memory(struct ath10k *ar,
161			   u32 address, void *buffer, u32 length)
162{
163	struct bmi_cmd cmd;
164	union bmi_resp resp;
165	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
166	u32 rxlen;
167	int ret;
168
169	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
170		   address, length);
171
172	if (ar->bmi.done_sent) {
173		ath10k_warn(ar, "command disallowed\n");
174		return -EBUSY;
175	}
176
177	while (length) {
178		rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
179
180		cmd.id            = __cpu_to_le32(BMI_READ_MEMORY);
181		cmd.read_mem.addr = __cpu_to_le32(address);
182		cmd.read_mem.len  = __cpu_to_le32(rxlen);
183
184		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
185						  &resp, &rxlen);
186		if (ret) {
187			ath10k_warn(ar, "unable to read from the device (%d)\n",
188				    ret);
189			return ret;
190		}
191
192		memcpy(buffer, resp.read_mem.payload, rxlen);
193		address += rxlen;
194		buffer  += rxlen;
195		length  -= rxlen;
196	}
197
198	return 0;
199}
200
201int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
202{
203	struct bmi_cmd cmd;
204	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
205	int ret;
206
207	ath10k_dbg(ar, ATH10K_DBG_BMI,
208		   "bmi write soc register 0x%08x val 0x%08x\n",
209		   address, reg_val);
210
211	if (ar->bmi.done_sent) {
212		ath10k_warn(ar, "bmi write soc register command in progress\n");
213		return -EBUSY;
214	}
215
216	cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
217	cmd.write_soc_reg.addr = __cpu_to_le32(address);
218	cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
219
220	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
221	if (ret) {
222		ath10k_warn(ar, "Unable to write soc register to device: %d\n",
223			    ret);
224		return ret;
225	}
226
227	return 0;
228}
229
230int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
231{
232	struct bmi_cmd cmd;
233	union bmi_resp resp;
234	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
235	u32 resplen = sizeof(resp.read_soc_reg);
236	int ret;
237
238	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
239		   address);
240
241	if (ar->bmi.done_sent) {
242		ath10k_warn(ar, "bmi read soc register command in progress\n");
243		return -EBUSY;
244	}
245
246	cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
247	cmd.read_soc_reg.addr = __cpu_to_le32(address);
248
249	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
250	if (ret) {
251		ath10k_warn(ar, "Unable to read soc register from device: %d\n",
252			    ret);
253		return ret;
254	}
255
256	*reg_val = __le32_to_cpu(resp.read_soc_reg.value);
257
258	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
259		   *reg_val);
260
261	return 0;
262}
263
264int ath10k_bmi_write_memory(struct ath10k *ar,
265			    u32 address, const void *buffer, u32 length)
266{
267	struct bmi_cmd cmd;
268	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
269	u32 txlen;
270	int ret;
271
272	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
273		   address, length);
274
275	if (ar->bmi.done_sent) {
276		ath10k_warn(ar, "command disallowed\n");
277		return -EBUSY;
278	}
279
280	while (length) {
281		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
282
283		/* copy before roundup to avoid reading beyond buffer*/
284		memcpy(cmd.write_mem.payload, buffer, txlen);
285		txlen = roundup(txlen, 4);
286
287		cmd.id             = __cpu_to_le32(BMI_WRITE_MEMORY);
288		cmd.write_mem.addr = __cpu_to_le32(address);
289		cmd.write_mem.len  = __cpu_to_le32(txlen);
290
291		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
292						  NULL, NULL);
293		if (ret) {
294			ath10k_warn(ar, "unable to write to the device (%d)\n",
295				    ret);
296			return ret;
297		}
298
299		/* fixup roundup() so `length` zeroes out for last chunk */
300		txlen = min(txlen, length);
301
302		address += txlen;
303		buffer  += txlen;
304		length  -= txlen;
305	}
306
307	return 0;
308}
309
310int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
311{
312	struct bmi_cmd cmd;
313	union bmi_resp resp;
314	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
315	u32 resplen = sizeof(resp.execute);
316	int ret;
317
318	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
319		   address, param);
320
321	if (ar->bmi.done_sent) {
322		ath10k_warn(ar, "command disallowed\n");
323		return -EBUSY;
324	}
325
326	cmd.id            = __cpu_to_le32(BMI_EXECUTE);
327	cmd.execute.addr  = __cpu_to_le32(address);
328	cmd.execute.param = __cpu_to_le32(param);
329
330	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
331	if (ret) {
332		ath10k_warn(ar, "unable to read from the device\n");
333		return ret;
334	}
335
336	if (resplen < sizeof(resp.execute)) {
337		ath10k_warn(ar, "invalid execute response length (%d)\n",
338			    resplen);
339		return -EIO;
340	}
341
342	*result = __le32_to_cpu(resp.execute.result);
343
344	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
345
346	return 0;
347}
348
349static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
350{
351	struct bmi_cmd *cmd;
352	u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
353	u32 txlen;
354	int ret;
355	size_t buf_len;
356
357	ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
358		   buffer, length);
359
360	if (ar->bmi.done_sent) {
361		ath10k_warn(ar, "command disallowed\n");
362		return -EBUSY;
363	}
364
365	buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
366	cmd = kzalloc(buf_len, GFP_KERNEL);
367	if (!cmd)
368		return -ENOMEM;
369
370	while (length) {
371		txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
372
373		WARN_ON_ONCE(txlen & 3);
374
375		cmd->id          = __cpu_to_le32(BMI_LZ_DATA);
376		cmd->lz_data.len = __cpu_to_le32(txlen);
377		memcpy(cmd->lz_data.payload, buffer, txlen);
378
379		ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
380						  NULL, NULL);
381		if (ret) {
382			ath10k_warn(ar, "unable to write to the device\n");
383			kfree(cmd);
384			return ret;
385		}
386
387		buffer += txlen;
388		length -= txlen;
389	}
390
391	kfree(cmd);
392
393	return 0;
394}
395
396int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
397{
398	struct bmi_cmd cmd;
399	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
400	u32 txlen;
401	int ret;
402
403	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
404		   buffer, length);
405
406	if (ar->bmi.done_sent) {
407		ath10k_warn(ar, "command disallowed\n");
408		return -EBUSY;
409	}
410
411	while (length) {
412		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
413
414		WARN_ON_ONCE(txlen & 3);
415
416		cmd.id          = __cpu_to_le32(BMI_LZ_DATA);
417		cmd.lz_data.len = __cpu_to_le32(txlen);
418		memcpy(cmd.lz_data.payload, buffer, txlen);
419
420		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
421						  NULL, NULL);
422		if (ret) {
423			ath10k_warn(ar, "unable to write to the device\n");
424			return ret;
425		}
426
427		buffer += txlen;
428		length -= txlen;
429	}
430
431	return 0;
432}
433
434int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
435{
436	struct bmi_cmd cmd;
437	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
438	int ret;
439
440	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
441		   address);
442
443	if (ar->bmi.done_sent) {
444		ath10k_warn(ar, "command disallowed\n");
445		return -EBUSY;
446	}
447
448	cmd.id            = __cpu_to_le32(BMI_LZ_STREAM_START);
449	cmd.lz_start.addr = __cpu_to_le32(address);
450
451	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
452	if (ret) {
453		ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
454		return ret;
455	}
456
457	return 0;
458}
459
460int ath10k_bmi_fast_download(struct ath10k *ar,
461			     u32 address, const void *buffer, u32 length)
462{
463	u8 trailer[4] = {};
464	u32 head_len = rounddown(length, 4);
465	u32 trailer_len = length - head_len;
466	int ret;
467
468	ath10k_dbg(ar, ATH10K_DBG_BMI,
469		   "bmi fast download address 0x%x buffer 0x%pK length %d\n",
470		   address, buffer, length);
471
472	ret = ath10k_bmi_lz_stream_start(ar, address);
473	if (ret)
474		return ret;
475
476	/* copy the last word into a zero padded buffer */
477	if (trailer_len > 0)
478		memcpy(trailer, buffer + head_len, trailer_len);
479
480	if (ar->hw_params.bmi_large_size_download)
481		ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
482	else
483		ret = ath10k_bmi_lz_data(ar, buffer, head_len);
484
485	if (ret)
486		return ret;
487
488	if (trailer_len > 0)
489		ret = ath10k_bmi_lz_data(ar, trailer, 4);
490
491	if (ret != 0)
492		return ret;
493
494	/*
495	 * Close compressed stream and open a new (fake) one.
496	 * This serves mainly to flush Target caches.
497	 */
498	ret = ath10k_bmi_lz_stream_start(ar, 0x00);
499
500	return ret;
501}
502
503int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
504{
505	struct bmi_cmd cmd;
506	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
507	int ret;
508
509	if (ar->bmi.done_sent) {
510		ath10k_warn(ar, "bmi set start command disallowed\n");
511		return -EBUSY;
512	}
513
514	cmd.id = __cpu_to_le32(BMI_SET_APP_START);
515	cmd.set_app_start.addr = __cpu_to_le32(address);
516
517	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
518	if (ret) {
519		ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
520		return ret;
521	}
522
523	return 0;
524}