Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: ISC
  2/*
  3 * Copyright (c) 2005-2011 Atheros Communications Inc.
  4 * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
  5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
  6 */
  7
  8#include "bmi.h"
  9#include "hif.h"
 10#include "debug.h"
 11#include "htc.h"
 12#include "hw.h"
 13
 14void ath10k_bmi_start(struct ath10k *ar)
 15{
 
 
 16	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
 17
 18	ar->bmi.done_sent = false;
 
 
 
 
 
 
 19}
 20EXPORT_SYMBOL(ath10k_bmi_start);
 21
 22int ath10k_bmi_done(struct ath10k *ar)
 23{
 24	struct bmi_cmd cmd;
 25	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
 26	int ret;
 27
 28	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
 29
 30	if (ar->bmi.done_sent) {
 31		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
 32		return 0;
 33	}
 34
 35	ar->bmi.done_sent = true;
 36	cmd.id = __cpu_to_le32(BMI_DONE);
 37
 38	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
 39	if (ret) {
 40		ath10k_warn(ar, "unable to write to the device: %d\n", ret);
 41		return ret;
 42	}
 43
 44	return 0;
 45}
 46
 47int ath10k_bmi_get_target_info(struct ath10k *ar,
 48			       struct bmi_target_info *target_info)
 49{
 50	struct bmi_cmd cmd;
 51	union bmi_resp resp;
 52	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
 53	u32 resplen = sizeof(resp.get_target_info);
 54	int ret;
 55
 56	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
 57
 58	if (ar->bmi.done_sent) {
 59		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
 60		return -EBUSY;
 61	}
 62
 63	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
 64
 65	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
 66	if (ret) {
 67		ath10k_warn(ar, "unable to get target info from device\n");
 68		return ret;
 69	}
 70
 71	if (resplen < sizeof(resp.get_target_info)) {
 72		ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
 73			    resplen);
 74		return -EIO;
 75	}
 76
 77	target_info->version = __le32_to_cpu(resp.get_target_info.version);
 78	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
 79
 80	return 0;
 81}
 82
 83#define TARGET_VERSION_SENTINAL 0xffffffffu
 84
 85int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
 86				    struct bmi_target_info *target_info)
 87{
 88	struct bmi_cmd cmd;
 89	union bmi_resp resp;
 90	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
 91	u32 resplen, ver_len;
 92	__le32 tmp;
 93	int ret;
 94
 95	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
 96
 97	if (ar->bmi.done_sent) {
 98		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
 99		return -EBUSY;
100	}
101
102	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
103
104	/* Step 1: Read 4 bytes of the target info and check if it is
105	 * the special sentinel version word or the first word in the
106	 * version response.
107	 */
108	resplen = sizeof(u32);
109	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
110	if (ret) {
111		ath10k_warn(ar, "unable to read from device\n");
112		return ret;
113	}
114
115	/* Some SDIO boards have a special sentinel byte before the real
116	 * version response.
117	 */
118	if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
119		/* Step 1b: Read the version length */
120		resplen = sizeof(u32);
121		ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
122						  &resplen);
123		if (ret) {
124			ath10k_warn(ar, "unable to read from device\n");
125			return ret;
126		}
127	}
128
129	ver_len = __le32_to_cpu(tmp);
130
131	/* Step 2: Check the target info length */
132	if (ver_len != sizeof(resp.get_target_info)) {
133		ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
134			    ver_len, sizeof(resp.get_target_info));
135		return -EINVAL;
136	}
137
138	/* Step 3: Read the rest of the version response */
139	resplen = sizeof(resp.get_target_info) - sizeof(u32);
140	ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
141					  &resp.get_target_info.version,
142					  &resplen);
143	if (ret) {
144		ath10k_warn(ar, "unable to read from device\n");
145		return ret;
146	}
147
148	target_info->version = __le32_to_cpu(resp.get_target_info.version);
149	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
150
151	return 0;
152}
153
154int ath10k_bmi_read_memory(struct ath10k *ar,
155			   u32 address, void *buffer, u32 length)
156{
157	struct bmi_cmd cmd;
158	union bmi_resp resp;
159	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
160	u32 rxlen;
161	int ret;
162
163	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
164		   address, length);
165
166	if (ar->bmi.done_sent) {
167		ath10k_warn(ar, "command disallowed\n");
168		return -EBUSY;
169	}
170
171	while (length) {
172		rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
173
174		cmd.id            = __cpu_to_le32(BMI_READ_MEMORY);
175		cmd.read_mem.addr = __cpu_to_le32(address);
176		cmd.read_mem.len  = __cpu_to_le32(rxlen);
177
178		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
179						  &resp, &rxlen);
180		if (ret) {
181			ath10k_warn(ar, "unable to read from the device (%d)\n",
182				    ret);
183			return ret;
184		}
185
186		memcpy(buffer, resp.read_mem.payload, rxlen);
187		address += rxlen;
188		buffer  += rxlen;
189		length  -= rxlen;
190	}
191
192	return 0;
193}
194EXPORT_SYMBOL(ath10k_bmi_read_memory);
195
196int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
197{
198	struct bmi_cmd cmd;
199	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
200	int ret;
201
202	ath10k_dbg(ar, ATH10K_DBG_BMI,
203		   "bmi write soc register 0x%08x val 0x%08x\n",
204		   address, reg_val);
205
206	if (ar->bmi.done_sent) {
207		ath10k_warn(ar, "bmi write soc register command in progress\n");
208		return -EBUSY;
209	}
210
211	cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
212	cmd.write_soc_reg.addr = __cpu_to_le32(address);
213	cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
214
215	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
216	if (ret) {
217		ath10k_warn(ar, "Unable to write soc register to device: %d\n",
218			    ret);
219		return ret;
220	}
221
222	return 0;
223}
224
225int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
226{
227	struct bmi_cmd cmd;
228	union bmi_resp resp;
229	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
230	u32 resplen = sizeof(resp.read_soc_reg);
231	int ret;
232
233	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
234		   address);
235
236	if (ar->bmi.done_sent) {
237		ath10k_warn(ar, "bmi read soc register command in progress\n");
238		return -EBUSY;
239	}
240
241	cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
242	cmd.read_soc_reg.addr = __cpu_to_le32(address);
243
244	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
245	if (ret) {
246		ath10k_warn(ar, "Unable to read soc register from device: %d\n",
247			    ret);
248		return ret;
249	}
250
251	*reg_val = __le32_to_cpu(resp.read_soc_reg.value);
252
253	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
254		   *reg_val);
255
256	return 0;
257}
258
259int ath10k_bmi_write_memory(struct ath10k *ar,
260			    u32 address, const void *buffer, u32 length)
261{
262	struct bmi_cmd cmd;
263	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
264	u32 txlen;
265	int ret;
266
267	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
268		   address, length);
269
270	if (ar->bmi.done_sent) {
271		ath10k_warn(ar, "command disallowed\n");
272		return -EBUSY;
273	}
274
275	while (length) {
276		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
277
278		/* copy before roundup to avoid reading beyond buffer*/
279		memcpy(cmd.write_mem.payload, buffer, txlen);
280		txlen = roundup(txlen, 4);
281
282		cmd.id             = __cpu_to_le32(BMI_WRITE_MEMORY);
283		cmd.write_mem.addr = __cpu_to_le32(address);
284		cmd.write_mem.len  = __cpu_to_le32(txlen);
285
286		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
287						  NULL, NULL);
288		if (ret) {
289			ath10k_warn(ar, "unable to write to the device (%d)\n",
290				    ret);
291			return ret;
292		}
293
294		/* fixup roundup() so `length` zeroes out for last chunk */
295		txlen = min(txlen, length);
296
297		address += txlen;
298		buffer  += txlen;
299		length  -= txlen;
300	}
301
302	return 0;
303}
304
305int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
306{
307	struct bmi_cmd cmd;
308	union bmi_resp resp;
309	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
310	u32 resplen = sizeof(resp.execute);
311	int ret;
312
313	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
314		   address, param);
315
316	if (ar->bmi.done_sent) {
317		ath10k_warn(ar, "command disallowed\n");
318		return -EBUSY;
319	}
320
321	cmd.id            = __cpu_to_le32(BMI_EXECUTE);
322	cmd.execute.addr  = __cpu_to_le32(address);
323	cmd.execute.param = __cpu_to_le32(param);
324
325	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
326	if (ret) {
327		ath10k_warn(ar, "unable to read from the device\n");
328		return ret;
329	}
330
331	if (resplen < sizeof(resp.execute)) {
332		ath10k_warn(ar, "invalid execute response length (%d)\n",
333			    resplen);
334		return -EIO;
335	}
336
337	*result = __le32_to_cpu(resp.execute.result);
338
339	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
340
341	return 0;
342}
343
344static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
345{
346	struct bmi_cmd *cmd;
347	u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
348	u32 txlen;
349	int ret;
350	size_t buf_len;
351
352	ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
353		   buffer, length);
354
355	if (ar->bmi.done_sent) {
356		ath10k_warn(ar, "command disallowed\n");
357		return -EBUSY;
358	}
359
360	buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
361	cmd = kzalloc(buf_len, GFP_KERNEL);
362	if (!cmd)
363		return -ENOMEM;
364
365	while (length) {
366		txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
367
368		WARN_ON_ONCE(txlen & 3);
369
370		cmd->id          = __cpu_to_le32(BMI_LZ_DATA);
371		cmd->lz_data.len = __cpu_to_le32(txlen);
372		memcpy(cmd->lz_data.payload, buffer, txlen);
373
374		ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
375						  NULL, NULL);
376		if (ret) {
377			ath10k_warn(ar, "unable to write to the device\n");
378			kfree(cmd);
379			return ret;
380		}
381
382		buffer += txlen;
383		length -= txlen;
384	}
385
386	kfree(cmd);
387
388	return 0;
389}
390
391int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
392{
393	struct bmi_cmd cmd;
394	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
395	u32 txlen;
396	int ret;
397
398	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
399		   buffer, length);
400
401	if (ar->bmi.done_sent) {
402		ath10k_warn(ar, "command disallowed\n");
403		return -EBUSY;
404	}
405
406	while (length) {
407		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
408
409		WARN_ON_ONCE(txlen & 3);
410
411		cmd.id          = __cpu_to_le32(BMI_LZ_DATA);
412		cmd.lz_data.len = __cpu_to_le32(txlen);
413		memcpy(cmd.lz_data.payload, buffer, txlen);
414
415		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
416						  NULL, NULL);
417		if (ret) {
418			ath10k_warn(ar, "unable to write to the device\n");
419			return ret;
420		}
421
422		buffer += txlen;
423		length -= txlen;
424	}
425
426	return 0;
427}
428
429int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
430{
431	struct bmi_cmd cmd;
432	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
433	int ret;
434
435	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
436		   address);
437
438	if (ar->bmi.done_sent) {
439		ath10k_warn(ar, "command disallowed\n");
440		return -EBUSY;
441	}
442
443	cmd.id            = __cpu_to_le32(BMI_LZ_STREAM_START);
444	cmd.lz_start.addr = __cpu_to_le32(address);
445
446	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
447	if (ret) {
448		ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
449		return ret;
450	}
451
452	return 0;
453}
454
455int ath10k_bmi_fast_download(struct ath10k *ar,
456			     u32 address, const void *buffer, u32 length)
457{
458	u8 trailer[4] = {};
459	u32 head_len = rounddown(length, 4);
460	u32 trailer_len = length - head_len;
461	int ret;
462
463	ath10k_dbg(ar, ATH10K_DBG_BMI,
464		   "bmi fast download address 0x%x buffer 0x%pK length %d\n",
465		   address, buffer, length);
466
467	ret = ath10k_bmi_lz_stream_start(ar, address);
468	if (ret)
469		return ret;
470
471	/* copy the last word into a zero padded buffer */
472	if (trailer_len > 0)
473		memcpy(trailer, buffer + head_len, trailer_len);
474
475	if (ar->hw_params.bmi_large_size_download)
476		ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
477	else
478		ret = ath10k_bmi_lz_data(ar, buffer, head_len);
479
480	if (ret)
481		return ret;
482
483	if (trailer_len > 0)
484		ret = ath10k_bmi_lz_data(ar, trailer, 4);
485
486	if (ret != 0)
487		return ret;
488
489	/*
490	 * Close compressed stream and open a new (fake) one.
491	 * This serves mainly to flush Target caches.
492	 */
493	ret = ath10k_bmi_lz_stream_start(ar, 0x00);
494
495	return ret;
496}
497
498int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
499{
500	struct bmi_cmd cmd;
501	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
502	int ret;
503
504	if (ar->bmi.done_sent) {
505		ath10k_warn(ar, "bmi set start command disallowed\n");
506		return -EBUSY;
507	}
508
509	cmd.id = __cpu_to_le32(BMI_SET_APP_START);
510	cmd.set_app_start.addr = __cpu_to_le32(address);
511
512	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
513	if (ret) {
514		ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
515		return ret;
516	}
517
518	return 0;
519}
v4.17
 
  1/*
  2 * Copyright (c) 2005-2011 Atheros Communications Inc.
  3 * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
  4 *
  5 * Permission to use, copy, modify, and/or distribute this software for any
  6 * purpose with or without fee is hereby granted, provided that the above
  7 * copyright notice and this permission notice appear in all copies.
  8 *
  9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 16 */
 17
 18#include "bmi.h"
 19#include "hif.h"
 20#include "debug.h"
 21#include "htc.h"
 22#include "hw.h"
 23
 24void ath10k_bmi_start(struct ath10k *ar)
 25{
 26	int ret;
 27
 28	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
 29
 30	ar->bmi.done_sent = false;
 31
 32	/* Enable hardware clock to speed up firmware download */
 33	if (ar->hw_params.hw_ops->enable_pll_clk) {
 34		ret = ar->hw_params.hw_ops->enable_pll_clk(ar);
 35		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi enable pll ret %d\n", ret);
 36	}
 37}
 
 38
 39int ath10k_bmi_done(struct ath10k *ar)
 40{
 41	struct bmi_cmd cmd;
 42	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
 43	int ret;
 44
 45	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
 46
 47	if (ar->bmi.done_sent) {
 48		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
 49		return 0;
 50	}
 51
 52	ar->bmi.done_sent = true;
 53	cmd.id = __cpu_to_le32(BMI_DONE);
 54
 55	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
 56	if (ret) {
 57		ath10k_warn(ar, "unable to write to the device: %d\n", ret);
 58		return ret;
 59	}
 60
 61	return 0;
 62}
 63
 64int ath10k_bmi_get_target_info(struct ath10k *ar,
 65			       struct bmi_target_info *target_info)
 66{
 67	struct bmi_cmd cmd;
 68	union bmi_resp resp;
 69	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
 70	u32 resplen = sizeof(resp.get_target_info);
 71	int ret;
 72
 73	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
 74
 75	if (ar->bmi.done_sent) {
 76		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
 77		return -EBUSY;
 78	}
 79
 80	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
 81
 82	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
 83	if (ret) {
 84		ath10k_warn(ar, "unable to get target info from device\n");
 85		return ret;
 86	}
 87
 88	if (resplen < sizeof(resp.get_target_info)) {
 89		ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
 90			    resplen);
 91		return -EIO;
 92	}
 93
 94	target_info->version = __le32_to_cpu(resp.get_target_info.version);
 95	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
 96
 97	return 0;
 98}
 99
100#define TARGET_VERSION_SENTINAL 0xffffffffu
101
102int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
103				    struct bmi_target_info *target_info)
104{
105	struct bmi_cmd cmd;
106	union bmi_resp resp;
107	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
108	u32 resplen, ver_len;
109	__le32 tmp;
110	int ret;
111
112	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
113
114	if (ar->bmi.done_sent) {
115		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
116		return -EBUSY;
117	}
118
119	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
120
121	/* Step 1: Read 4 bytes of the target info and check if it is
122	 * the special sentinal version word or the first word in the
123	 * version response.
124	 */
125	resplen = sizeof(u32);
126	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
127	if (ret) {
128		ath10k_warn(ar, "unable to read from device\n");
129		return ret;
130	}
131
132	/* Some SDIO boards have a special sentinal byte before the real
133	 * version response.
134	 */
135	if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
136		/* Step 1b: Read the version length */
137		resplen = sizeof(u32);
138		ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
139						  &resplen);
140		if (ret) {
141			ath10k_warn(ar, "unable to read from device\n");
142			return ret;
143		}
144	}
145
146	ver_len = __le32_to_cpu(tmp);
147
148	/* Step 2: Check the target info length */
149	if (ver_len != sizeof(resp.get_target_info)) {
150		ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
151			    ver_len, sizeof(resp.get_target_info));
152		return -EINVAL;
153	}
154
155	/* Step 3: Read the rest of the version response */
156	resplen = sizeof(resp.get_target_info) - sizeof(u32);
157	ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
158					  &resp.get_target_info.version,
159					  &resplen);
160	if (ret) {
161		ath10k_warn(ar, "unable to read from device\n");
162		return ret;
163	}
164
165	target_info->version = __le32_to_cpu(resp.get_target_info.version);
166	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
167
168	return 0;
169}
170
171int ath10k_bmi_read_memory(struct ath10k *ar,
172			   u32 address, void *buffer, u32 length)
173{
174	struct bmi_cmd cmd;
175	union bmi_resp resp;
176	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
177	u32 rxlen;
178	int ret;
179
180	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
181		   address, length);
182
183	if (ar->bmi.done_sent) {
184		ath10k_warn(ar, "command disallowed\n");
185		return -EBUSY;
186	}
187
188	while (length) {
189		rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
190
191		cmd.id            = __cpu_to_le32(BMI_READ_MEMORY);
192		cmd.read_mem.addr = __cpu_to_le32(address);
193		cmd.read_mem.len  = __cpu_to_le32(rxlen);
194
195		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
196						  &resp, &rxlen);
197		if (ret) {
198			ath10k_warn(ar, "unable to read from the device (%d)\n",
199				    ret);
200			return ret;
201		}
202
203		memcpy(buffer, resp.read_mem.payload, rxlen);
204		address += rxlen;
205		buffer  += rxlen;
206		length  -= rxlen;
207	}
208
209	return 0;
210}
 
211
212int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
213{
214	struct bmi_cmd cmd;
215	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
216	int ret;
217
218	ath10k_dbg(ar, ATH10K_DBG_BMI,
219		   "bmi write soc register 0x%08x val 0x%08x\n",
220		   address, reg_val);
221
222	if (ar->bmi.done_sent) {
223		ath10k_warn(ar, "bmi write soc register command in progress\n");
224		return -EBUSY;
225	}
226
227	cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
228	cmd.write_soc_reg.addr = __cpu_to_le32(address);
229	cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
230
231	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
232	if (ret) {
233		ath10k_warn(ar, "Unable to write soc register to device: %d\n",
234			    ret);
235		return ret;
236	}
237
238	return 0;
239}
240
241int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
242{
243	struct bmi_cmd cmd;
244	union bmi_resp resp;
245	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
246	u32 resplen = sizeof(resp.read_soc_reg);
247	int ret;
248
249	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
250		   address);
251
252	if (ar->bmi.done_sent) {
253		ath10k_warn(ar, "bmi read soc register command in progress\n");
254		return -EBUSY;
255	}
256
257	cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
258	cmd.read_soc_reg.addr = __cpu_to_le32(address);
259
260	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
261	if (ret) {
262		ath10k_warn(ar, "Unable to read soc register from device: %d\n",
263			    ret);
264		return ret;
265	}
266
267	*reg_val = __le32_to_cpu(resp.read_soc_reg.value);
268
269	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
270		   *reg_val);
271
272	return 0;
273}
274
275int ath10k_bmi_write_memory(struct ath10k *ar,
276			    u32 address, const void *buffer, u32 length)
277{
278	struct bmi_cmd cmd;
279	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
280	u32 txlen;
281	int ret;
282
283	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
284		   address, length);
285
286	if (ar->bmi.done_sent) {
287		ath10k_warn(ar, "command disallowed\n");
288		return -EBUSY;
289	}
290
291	while (length) {
292		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
293
294		/* copy before roundup to avoid reading beyond buffer*/
295		memcpy(cmd.write_mem.payload, buffer, txlen);
296		txlen = roundup(txlen, 4);
297
298		cmd.id             = __cpu_to_le32(BMI_WRITE_MEMORY);
299		cmd.write_mem.addr = __cpu_to_le32(address);
300		cmd.write_mem.len  = __cpu_to_le32(txlen);
301
302		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
303						  NULL, NULL);
304		if (ret) {
305			ath10k_warn(ar, "unable to write to the device (%d)\n",
306				    ret);
307			return ret;
308		}
309
310		/* fixup roundup() so `length` zeroes out for last chunk */
311		txlen = min(txlen, length);
312
313		address += txlen;
314		buffer  += txlen;
315		length  -= txlen;
316	}
317
318	return 0;
319}
320
321int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
322{
323	struct bmi_cmd cmd;
324	union bmi_resp resp;
325	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
326	u32 resplen = sizeof(resp.execute);
327	int ret;
328
329	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
330		   address, param);
331
332	if (ar->bmi.done_sent) {
333		ath10k_warn(ar, "command disallowed\n");
334		return -EBUSY;
335	}
336
337	cmd.id            = __cpu_to_le32(BMI_EXECUTE);
338	cmd.execute.addr  = __cpu_to_le32(address);
339	cmd.execute.param = __cpu_to_le32(param);
340
341	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
342	if (ret) {
343		ath10k_warn(ar, "unable to read from the device\n");
344		return ret;
345	}
346
347	if (resplen < sizeof(resp.execute)) {
348		ath10k_warn(ar, "invalid execute response length (%d)\n",
349			    resplen);
350		return -EIO;
351	}
352
353	*result = __le32_to_cpu(resp.execute.result);
354
355	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
356
357	return 0;
358}
359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
361{
362	struct bmi_cmd cmd;
363	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
364	u32 txlen;
365	int ret;
366
367	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
368		   buffer, length);
369
370	if (ar->bmi.done_sent) {
371		ath10k_warn(ar, "command disallowed\n");
372		return -EBUSY;
373	}
374
375	while (length) {
376		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
377
378		WARN_ON_ONCE(txlen & 3);
379
380		cmd.id          = __cpu_to_le32(BMI_LZ_DATA);
381		cmd.lz_data.len = __cpu_to_le32(txlen);
382		memcpy(cmd.lz_data.payload, buffer, txlen);
383
384		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
385						  NULL, NULL);
386		if (ret) {
387			ath10k_warn(ar, "unable to write to the device\n");
388			return ret;
389		}
390
391		buffer += txlen;
392		length -= txlen;
393	}
394
395	return 0;
396}
397
398int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
399{
400	struct bmi_cmd cmd;
401	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
402	int ret;
403
404	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
405		   address);
406
407	if (ar->bmi.done_sent) {
408		ath10k_warn(ar, "command disallowed\n");
409		return -EBUSY;
410	}
411
412	cmd.id            = __cpu_to_le32(BMI_LZ_STREAM_START);
413	cmd.lz_start.addr = __cpu_to_le32(address);
414
415	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
416	if (ret) {
417		ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
418		return ret;
419	}
420
421	return 0;
422}
423
424int ath10k_bmi_fast_download(struct ath10k *ar,
425			     u32 address, const void *buffer, u32 length)
426{
427	u8 trailer[4] = {};
428	u32 head_len = rounddown(length, 4);
429	u32 trailer_len = length - head_len;
430	int ret;
431
432	ath10k_dbg(ar, ATH10K_DBG_BMI,
433		   "bmi fast download address 0x%x buffer 0x%pK length %d\n",
434		   address, buffer, length);
435
436	ret = ath10k_bmi_lz_stream_start(ar, address);
437	if (ret)
438		return ret;
439
440	/* copy the last word into a zero padded buffer */
441	if (trailer_len > 0)
442		memcpy(trailer, buffer + head_len, trailer_len);
443
444	ret = ath10k_bmi_lz_data(ar, buffer, head_len);
 
 
 
 
445	if (ret)
446		return ret;
447
448	if (trailer_len > 0)
449		ret = ath10k_bmi_lz_data(ar, trailer, 4);
450
451	if (ret != 0)
452		return ret;
453
454	/*
455	 * Close compressed stream and open a new (fake) one.
456	 * This serves mainly to flush Target caches.
457	 */
458	ret = ath10k_bmi_lz_stream_start(ar, 0x00);
459
460	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461}