Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: ISC
  2/*
  3 * Copyright (c) 2005-2011 Atheros Communications Inc.
  4 * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
  5 */
  6
  7#include "bmi.h"
  8#include "hif.h"
  9#include "debug.h"
 10#include "htc.h"
 11#include "hw.h"
 12
 13void ath10k_bmi_start(struct ath10k *ar)
 14{
 15	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
 16
 17	ar->bmi.done_sent = false;
 18}
 19EXPORT_SYMBOL(ath10k_bmi_start);
 20
 21int ath10k_bmi_done(struct ath10k *ar)
 22{
 23	struct bmi_cmd cmd;
 24	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
 25	int ret;
 26
 27	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
 28
 29	if (ar->bmi.done_sent) {
 30		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
 31		return 0;
 32	}
 33
 34	ar->bmi.done_sent = true;
 35	cmd.id = __cpu_to_le32(BMI_DONE);
 36
 37	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
 38	if (ret) {
 39		ath10k_warn(ar, "unable to write to the device: %d\n", ret);
 40		return ret;
 41	}
 42
 43	return 0;
 44}
 45
 46int ath10k_bmi_get_target_info(struct ath10k *ar,
 47			       struct bmi_target_info *target_info)
 48{
 49	struct bmi_cmd cmd;
 50	union bmi_resp resp;
 51	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
 52	u32 resplen = sizeof(resp.get_target_info);
 53	int ret;
 54
 55	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
 56
 57	if (ar->bmi.done_sent) {
 58		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
 59		return -EBUSY;
 60	}
 61
 62	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
 63
 64	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
 65	if (ret) {
 66		ath10k_warn(ar, "unable to get target info from device\n");
 67		return ret;
 68	}
 69
 70	if (resplen < sizeof(resp.get_target_info)) {
 71		ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
 72			    resplen);
 73		return -EIO;
 74	}
 75
 76	target_info->version = __le32_to_cpu(resp.get_target_info.version);
 77	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
 78
 79	return 0;
 80}
 81
 82#define TARGET_VERSION_SENTINAL 0xffffffffu
 83
 84int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
 85				    struct bmi_target_info *target_info)
 86{
 87	struct bmi_cmd cmd;
 88	union bmi_resp resp;
 89	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
 90	u32 resplen, ver_len;
 91	__le32 tmp;
 92	int ret;
 93
 94	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
 95
 96	if (ar->bmi.done_sent) {
 97		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
 98		return -EBUSY;
 99	}
100
101	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
102
103	/* Step 1: Read 4 bytes of the target info and check if it is
104	 * the special sentinel version word or the first word in the
105	 * version response.
106	 */
107	resplen = sizeof(u32);
108	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
109	if (ret) {
110		ath10k_warn(ar, "unable to read from device\n");
111		return ret;
112	}
113
114	/* Some SDIO boards have a special sentinel byte before the real
115	 * version response.
116	 */
117	if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
118		/* Step 1b: Read the version length */
119		resplen = sizeof(u32);
120		ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
121						  &resplen);
122		if (ret) {
123			ath10k_warn(ar, "unable to read from device\n");
124			return ret;
125		}
126	}
127
128	ver_len = __le32_to_cpu(tmp);
129
130	/* Step 2: Check the target info length */
131	if (ver_len != sizeof(resp.get_target_info)) {
132		ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
133			    ver_len, sizeof(resp.get_target_info));
134		return -EINVAL;
135	}
136
137	/* Step 3: Read the rest of the version response */
138	resplen = sizeof(resp.get_target_info) - sizeof(u32);
139	ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
140					  &resp.get_target_info.version,
141					  &resplen);
142	if (ret) {
143		ath10k_warn(ar, "unable to read from device\n");
144		return ret;
145	}
146
147	target_info->version = __le32_to_cpu(resp.get_target_info.version);
148	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
149
150	return 0;
151}
152
153int ath10k_bmi_read_memory(struct ath10k *ar,
154			   u32 address, void *buffer, u32 length)
155{
156	struct bmi_cmd cmd;
157	union bmi_resp resp;
158	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
159	u32 rxlen;
160	int ret;
161
162	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
163		   address, length);
164
165	if (ar->bmi.done_sent) {
166		ath10k_warn(ar, "command disallowed\n");
167		return -EBUSY;
168	}
169
170	while (length) {
171		rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
172
173		cmd.id            = __cpu_to_le32(BMI_READ_MEMORY);
174		cmd.read_mem.addr = __cpu_to_le32(address);
175		cmd.read_mem.len  = __cpu_to_le32(rxlen);
176
177		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
178						  &resp, &rxlen);
179		if (ret) {
180			ath10k_warn(ar, "unable to read from the device (%d)\n",
181				    ret);
182			return ret;
183		}
184
185		memcpy(buffer, resp.read_mem.payload, rxlen);
186		address += rxlen;
187		buffer  += rxlen;
188		length  -= rxlen;
189	}
190
191	return 0;
192}
193EXPORT_SYMBOL(ath10k_bmi_read_memory);
194
195int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
196{
197	struct bmi_cmd cmd;
198	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
199	int ret;
200
201	ath10k_dbg(ar, ATH10K_DBG_BMI,
202		   "bmi write soc register 0x%08x val 0x%08x\n",
203		   address, reg_val);
204
205	if (ar->bmi.done_sent) {
206		ath10k_warn(ar, "bmi write soc register command in progress\n");
207		return -EBUSY;
208	}
209
210	cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
211	cmd.write_soc_reg.addr = __cpu_to_le32(address);
212	cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
213
214	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
215	if (ret) {
216		ath10k_warn(ar, "Unable to write soc register to device: %d\n",
217			    ret);
218		return ret;
219	}
220
221	return 0;
222}
223
224int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
225{
226	struct bmi_cmd cmd;
227	union bmi_resp resp;
228	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
229	u32 resplen = sizeof(resp.read_soc_reg);
230	int ret;
231
232	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
233		   address);
234
235	if (ar->bmi.done_sent) {
236		ath10k_warn(ar, "bmi read soc register command in progress\n");
237		return -EBUSY;
238	}
239
240	cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
241	cmd.read_soc_reg.addr = __cpu_to_le32(address);
242
243	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
244	if (ret) {
245		ath10k_warn(ar, "Unable to read soc register from device: %d\n",
246			    ret);
247		return ret;
248	}
249
250	*reg_val = __le32_to_cpu(resp.read_soc_reg.value);
251
252	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
253		   *reg_val);
254
255	return 0;
256}
257
258int ath10k_bmi_write_memory(struct ath10k *ar,
259			    u32 address, const void *buffer, u32 length)
260{
261	struct bmi_cmd cmd;
262	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
263	u32 txlen;
264	int ret;
265
266	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
267		   address, length);
268
269	if (ar->bmi.done_sent) {
270		ath10k_warn(ar, "command disallowed\n");
271		return -EBUSY;
272	}
273
274	while (length) {
275		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
276
277		/* copy before roundup to avoid reading beyond buffer*/
278		memcpy(cmd.write_mem.payload, buffer, txlen);
279		txlen = roundup(txlen, 4);
280
281		cmd.id             = __cpu_to_le32(BMI_WRITE_MEMORY);
282		cmd.write_mem.addr = __cpu_to_le32(address);
283		cmd.write_mem.len  = __cpu_to_le32(txlen);
284
285		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
286						  NULL, NULL);
287		if (ret) {
288			ath10k_warn(ar, "unable to write to the device (%d)\n",
289				    ret);
290			return ret;
291		}
292
293		/* fixup roundup() so `length` zeroes out for last chunk */
294		txlen = min(txlen, length);
295
296		address += txlen;
297		buffer  += txlen;
298		length  -= txlen;
299	}
300
301	return 0;
302}
303
304int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
305{
306	struct bmi_cmd cmd;
307	union bmi_resp resp;
308	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
309	u32 resplen = sizeof(resp.execute);
310	int ret;
311
312	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
313		   address, param);
314
315	if (ar->bmi.done_sent) {
316		ath10k_warn(ar, "command disallowed\n");
317		return -EBUSY;
318	}
319
320	cmd.id            = __cpu_to_le32(BMI_EXECUTE);
321	cmd.execute.addr  = __cpu_to_le32(address);
322	cmd.execute.param = __cpu_to_le32(param);
323
324	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
325	if (ret) {
326		ath10k_warn(ar, "unable to read from the device\n");
327		return ret;
328	}
329
330	if (resplen < sizeof(resp.execute)) {
331		ath10k_warn(ar, "invalid execute response length (%d)\n",
332			    resplen);
333		return -EIO;
334	}
335
336	*result = __le32_to_cpu(resp.execute.result);
337
338	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
339
340	return 0;
341}
342
343static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
344{
345	struct bmi_cmd *cmd;
346	u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
347	u32 txlen;
348	int ret;
349	size_t buf_len;
350
351	ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
352		   buffer, length);
353
354	if (ar->bmi.done_sent) {
355		ath10k_warn(ar, "command disallowed\n");
356		return -EBUSY;
357	}
358
359	buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
360	cmd = kzalloc(buf_len, GFP_KERNEL);
361	if (!cmd)
362		return -ENOMEM;
363
364	while (length) {
365		txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
366
367		WARN_ON_ONCE(txlen & 3);
368
369		cmd->id          = __cpu_to_le32(BMI_LZ_DATA);
370		cmd->lz_data.len = __cpu_to_le32(txlen);
371		memcpy(cmd->lz_data.payload, buffer, txlen);
372
373		ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
374						  NULL, NULL);
375		if (ret) {
376			ath10k_warn(ar, "unable to write to the device\n");
377			kfree(cmd);
378			return ret;
379		}
380
381		buffer += txlen;
382		length -= txlen;
383	}
384
385	kfree(cmd);
386
387	return 0;
388}
389
390int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
391{
392	struct bmi_cmd cmd;
393	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
394	u32 txlen;
395	int ret;
396
397	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
398		   buffer, length);
399
400	if (ar->bmi.done_sent) {
401		ath10k_warn(ar, "command disallowed\n");
402		return -EBUSY;
403	}
404
405	while (length) {
406		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
407
408		WARN_ON_ONCE(txlen & 3);
409
410		cmd.id          = __cpu_to_le32(BMI_LZ_DATA);
411		cmd.lz_data.len = __cpu_to_le32(txlen);
412		memcpy(cmd.lz_data.payload, buffer, txlen);
413
414		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
415						  NULL, NULL);
416		if (ret) {
417			ath10k_warn(ar, "unable to write to the device\n");
418			return ret;
419		}
420
421		buffer += txlen;
422		length -= txlen;
423	}
424
425	return 0;
426}
427
428int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
429{
430	struct bmi_cmd cmd;
431	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
432	int ret;
433
434	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
435		   address);
436
437	if (ar->bmi.done_sent) {
438		ath10k_warn(ar, "command disallowed\n");
439		return -EBUSY;
440	}
441
442	cmd.id            = __cpu_to_le32(BMI_LZ_STREAM_START);
443	cmd.lz_start.addr = __cpu_to_le32(address);
444
445	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
446	if (ret) {
447		ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
448		return ret;
449	}
450
451	return 0;
452}
453
454int ath10k_bmi_fast_download(struct ath10k *ar,
455			     u32 address, const void *buffer, u32 length)
456{
457	u8 trailer[4] = {};
458	u32 head_len = rounddown(length, 4);
459	u32 trailer_len = length - head_len;
460	int ret;
461
462	ath10k_dbg(ar, ATH10K_DBG_BMI,
463		   "bmi fast download address 0x%x buffer 0x%pK length %d\n",
464		   address, buffer, length);
465
466	ret = ath10k_bmi_lz_stream_start(ar, address);
467	if (ret)
468		return ret;
469
470	/* copy the last word into a zero padded buffer */
471	if (trailer_len > 0)
472		memcpy(trailer, buffer + head_len, trailer_len);
473
474	if (ar->hw_params.bmi_large_size_download)
475		ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
476	else
477		ret = ath10k_bmi_lz_data(ar, buffer, head_len);
478
479	if (ret)
480		return ret;
481
482	if (trailer_len > 0)
483		ret = ath10k_bmi_lz_data(ar, trailer, 4);
484
485	if (ret != 0)
486		return ret;
487
488	/*
489	 * Close compressed stream and open a new (fake) one.
490	 * This serves mainly to flush Target caches.
491	 */
492	ret = ath10k_bmi_lz_stream_start(ar, 0x00);
493
494	return ret;
495}
496
497int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
498{
499	struct bmi_cmd cmd;
500	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
501	int ret;
502
503	if (ar->bmi.done_sent) {
504		ath10k_warn(ar, "bmi set start command disallowed\n");
505		return -EBUSY;
506	}
507
508	cmd.id = __cpu_to_le32(BMI_SET_APP_START);
509	cmd.set_app_start.addr = __cpu_to_le32(address);
510
511	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
512	if (ret) {
513		ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
514		return ret;
515	}
516
517	return 0;
518}