Loading...
1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6 */
7
8#include "bmi.h"
9#include "hif.h"
10#include "debug.h"
11#include "htc.h"
12#include "hw.h"
13
14void ath10k_bmi_start(struct ath10k *ar)
15{
16 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
17
18 ar->bmi.done_sent = false;
19}
20EXPORT_SYMBOL(ath10k_bmi_start);
21
22int ath10k_bmi_done(struct ath10k *ar)
23{
24 struct bmi_cmd cmd;
25 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
26 int ret;
27
28 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
29
30 if (ar->bmi.done_sent) {
31 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
32 return 0;
33 }
34
35 ar->bmi.done_sent = true;
36 cmd.id = __cpu_to_le32(BMI_DONE);
37
38 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
39 if (ret) {
40 ath10k_warn(ar, "unable to write to the device: %d\n", ret);
41 return ret;
42 }
43
44 return 0;
45}
46
47int ath10k_bmi_get_target_info(struct ath10k *ar,
48 struct bmi_target_info *target_info)
49{
50 struct bmi_cmd cmd;
51 union bmi_resp resp;
52 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
53 u32 resplen = sizeof(resp.get_target_info);
54 int ret;
55
56 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
57
58 if (ar->bmi.done_sent) {
59 ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
60 return -EBUSY;
61 }
62
63 cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
64
65 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
66 if (ret) {
67 ath10k_warn(ar, "unable to get target info from device\n");
68 return ret;
69 }
70
71 if (resplen < sizeof(resp.get_target_info)) {
72 ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
73 resplen);
74 return -EIO;
75 }
76
77 target_info->version = __le32_to_cpu(resp.get_target_info.version);
78 target_info->type = __le32_to_cpu(resp.get_target_info.type);
79
80 return 0;
81}
82
83#define TARGET_VERSION_SENTINAL 0xffffffffu
84
85int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
86 struct bmi_target_info *target_info)
87{
88 struct bmi_cmd cmd;
89 union bmi_resp resp;
90 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
91 u32 resplen, ver_len;
92 __le32 tmp;
93 int ret;
94
95 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
96
97 if (ar->bmi.done_sent) {
98 ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
99 return -EBUSY;
100 }
101
102 cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
103
104 /* Step 1: Read 4 bytes of the target info and check if it is
105 * the special sentinel version word or the first word in the
106 * version response.
107 */
108 resplen = sizeof(u32);
109 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
110 if (ret) {
111 ath10k_warn(ar, "unable to read from device\n");
112 return ret;
113 }
114
115 /* Some SDIO boards have a special sentinel byte before the real
116 * version response.
117 */
118 if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
119 /* Step 1b: Read the version length */
120 resplen = sizeof(u32);
121 ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
122 &resplen);
123 if (ret) {
124 ath10k_warn(ar, "unable to read from device\n");
125 return ret;
126 }
127 }
128
129 ver_len = __le32_to_cpu(tmp);
130
131 /* Step 2: Check the target info length */
132 if (ver_len != sizeof(resp.get_target_info)) {
133 ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
134 ver_len, sizeof(resp.get_target_info));
135 return -EINVAL;
136 }
137
138 /* Step 3: Read the rest of the version response */
139 resplen = sizeof(resp.get_target_info) - sizeof(u32);
140 ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
141 &resp.get_target_info.version,
142 &resplen);
143 if (ret) {
144 ath10k_warn(ar, "unable to read from device\n");
145 return ret;
146 }
147
148 target_info->version = __le32_to_cpu(resp.get_target_info.version);
149 target_info->type = __le32_to_cpu(resp.get_target_info.type);
150
151 return 0;
152}
153
154int ath10k_bmi_read_memory(struct ath10k *ar,
155 u32 address, void *buffer, u32 length)
156{
157 struct bmi_cmd cmd;
158 union bmi_resp resp;
159 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
160 u32 rxlen;
161 int ret;
162
163 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
164 address, length);
165
166 if (ar->bmi.done_sent) {
167 ath10k_warn(ar, "command disallowed\n");
168 return -EBUSY;
169 }
170
171 while (length) {
172 rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
173
174 cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
175 cmd.read_mem.addr = __cpu_to_le32(address);
176 cmd.read_mem.len = __cpu_to_le32(rxlen);
177
178 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
179 &resp, &rxlen);
180 if (ret) {
181 ath10k_warn(ar, "unable to read from the device (%d)\n",
182 ret);
183 return ret;
184 }
185
186 memcpy(buffer, resp.read_mem.payload, rxlen);
187 address += rxlen;
188 buffer += rxlen;
189 length -= rxlen;
190 }
191
192 return 0;
193}
194EXPORT_SYMBOL(ath10k_bmi_read_memory);
195
196int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
197{
198 struct bmi_cmd cmd;
199 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
200 int ret;
201
202 ath10k_dbg(ar, ATH10K_DBG_BMI,
203 "bmi write soc register 0x%08x val 0x%08x\n",
204 address, reg_val);
205
206 if (ar->bmi.done_sent) {
207 ath10k_warn(ar, "bmi write soc register command in progress\n");
208 return -EBUSY;
209 }
210
211 cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
212 cmd.write_soc_reg.addr = __cpu_to_le32(address);
213 cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
214
215 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
216 if (ret) {
217 ath10k_warn(ar, "Unable to write soc register to device: %d\n",
218 ret);
219 return ret;
220 }
221
222 return 0;
223}
224
225int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
226{
227 struct bmi_cmd cmd;
228 union bmi_resp resp;
229 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
230 u32 resplen = sizeof(resp.read_soc_reg);
231 int ret;
232
233 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
234 address);
235
236 if (ar->bmi.done_sent) {
237 ath10k_warn(ar, "bmi read soc register command in progress\n");
238 return -EBUSY;
239 }
240
241 cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
242 cmd.read_soc_reg.addr = __cpu_to_le32(address);
243
244 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
245 if (ret) {
246 ath10k_warn(ar, "Unable to read soc register from device: %d\n",
247 ret);
248 return ret;
249 }
250
251 *reg_val = __le32_to_cpu(resp.read_soc_reg.value);
252
253 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
254 *reg_val);
255
256 return 0;
257}
258
259int ath10k_bmi_write_memory(struct ath10k *ar,
260 u32 address, const void *buffer, u32 length)
261{
262 struct bmi_cmd cmd;
263 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
264 u32 txlen;
265 int ret;
266
267 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
268 address, length);
269
270 if (ar->bmi.done_sent) {
271 ath10k_warn(ar, "command disallowed\n");
272 return -EBUSY;
273 }
274
275 while (length) {
276 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
277
278 /* copy before roundup to avoid reading beyond buffer*/
279 memcpy(cmd.write_mem.payload, buffer, txlen);
280 txlen = roundup(txlen, 4);
281
282 cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
283 cmd.write_mem.addr = __cpu_to_le32(address);
284 cmd.write_mem.len = __cpu_to_le32(txlen);
285
286 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
287 NULL, NULL);
288 if (ret) {
289 ath10k_warn(ar, "unable to write to the device (%d)\n",
290 ret);
291 return ret;
292 }
293
294 /* fixup roundup() so `length` zeroes out for last chunk */
295 txlen = min(txlen, length);
296
297 address += txlen;
298 buffer += txlen;
299 length -= txlen;
300 }
301
302 return 0;
303}
304
305int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
306{
307 struct bmi_cmd cmd;
308 union bmi_resp resp;
309 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
310 u32 resplen = sizeof(resp.execute);
311 int ret;
312
313 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
314 address, param);
315
316 if (ar->bmi.done_sent) {
317 ath10k_warn(ar, "command disallowed\n");
318 return -EBUSY;
319 }
320
321 cmd.id = __cpu_to_le32(BMI_EXECUTE);
322 cmd.execute.addr = __cpu_to_le32(address);
323 cmd.execute.param = __cpu_to_le32(param);
324
325 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
326 if (ret) {
327 ath10k_warn(ar, "unable to read from the device\n");
328 return ret;
329 }
330
331 if (resplen < sizeof(resp.execute)) {
332 ath10k_warn(ar, "invalid execute response length (%d)\n",
333 resplen);
334 return -EIO;
335 }
336
337 *result = __le32_to_cpu(resp.execute.result);
338
339 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
340
341 return 0;
342}
343
344static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
345{
346 struct bmi_cmd *cmd;
347 u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
348 u32 txlen;
349 int ret;
350 size_t buf_len;
351
352 ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
353 buffer, length);
354
355 if (ar->bmi.done_sent) {
356 ath10k_warn(ar, "command disallowed\n");
357 return -EBUSY;
358 }
359
360 buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
361 cmd = kzalloc(buf_len, GFP_KERNEL);
362 if (!cmd)
363 return -ENOMEM;
364
365 while (length) {
366 txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
367
368 WARN_ON_ONCE(txlen & 3);
369
370 cmd->id = __cpu_to_le32(BMI_LZ_DATA);
371 cmd->lz_data.len = __cpu_to_le32(txlen);
372 memcpy(cmd->lz_data.payload, buffer, txlen);
373
374 ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
375 NULL, NULL);
376 if (ret) {
377 ath10k_warn(ar, "unable to write to the device\n");
378 kfree(cmd);
379 return ret;
380 }
381
382 buffer += txlen;
383 length -= txlen;
384 }
385
386 kfree(cmd);
387
388 return 0;
389}
390
391int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
392{
393 struct bmi_cmd cmd;
394 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
395 u32 txlen;
396 int ret;
397
398 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
399 buffer, length);
400
401 if (ar->bmi.done_sent) {
402 ath10k_warn(ar, "command disallowed\n");
403 return -EBUSY;
404 }
405
406 while (length) {
407 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
408
409 WARN_ON_ONCE(txlen & 3);
410
411 cmd.id = __cpu_to_le32(BMI_LZ_DATA);
412 cmd.lz_data.len = __cpu_to_le32(txlen);
413 memcpy(cmd.lz_data.payload, buffer, txlen);
414
415 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
416 NULL, NULL);
417 if (ret) {
418 ath10k_warn(ar, "unable to write to the device\n");
419 return ret;
420 }
421
422 buffer += txlen;
423 length -= txlen;
424 }
425
426 return 0;
427}
428
429int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
430{
431 struct bmi_cmd cmd;
432 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
433 int ret;
434
435 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
436 address);
437
438 if (ar->bmi.done_sent) {
439 ath10k_warn(ar, "command disallowed\n");
440 return -EBUSY;
441 }
442
443 cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
444 cmd.lz_start.addr = __cpu_to_le32(address);
445
446 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
447 if (ret) {
448 ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
449 return ret;
450 }
451
452 return 0;
453}
454
455int ath10k_bmi_fast_download(struct ath10k *ar,
456 u32 address, const void *buffer, u32 length)
457{
458 u8 trailer[4] = {};
459 u32 head_len = rounddown(length, 4);
460 u32 trailer_len = length - head_len;
461 int ret;
462
463 ath10k_dbg(ar, ATH10K_DBG_BMI,
464 "bmi fast download address 0x%x buffer 0x%pK length %d\n",
465 address, buffer, length);
466
467 ret = ath10k_bmi_lz_stream_start(ar, address);
468 if (ret)
469 return ret;
470
471 /* copy the last word into a zero padded buffer */
472 if (trailer_len > 0)
473 memcpy(trailer, buffer + head_len, trailer_len);
474
475 if (ar->hw_params.bmi_large_size_download)
476 ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
477 else
478 ret = ath10k_bmi_lz_data(ar, buffer, head_len);
479
480 if (ret)
481 return ret;
482
483 if (trailer_len > 0)
484 ret = ath10k_bmi_lz_data(ar, trailer, 4);
485
486 if (ret != 0)
487 return ret;
488
489 /*
490 * Close compressed stream and open a new (fake) one.
491 * This serves mainly to flush Target caches.
492 */
493 ret = ath10k_bmi_lz_stream_start(ar, 0x00);
494
495 return ret;
496}
497
498int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
499{
500 struct bmi_cmd cmd;
501 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
502 int ret;
503
504 if (ar->bmi.done_sent) {
505 ath10k_warn(ar, "bmi set start command disallowed\n");
506 return -EBUSY;
507 }
508
509 cmd.id = __cpu_to_le32(BMI_SET_APP_START);
510 cmd.set_app_start.addr = __cpu_to_le32(address);
511
512 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
513 if (ret) {
514 ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
515 return ret;
516 }
517
518 return 0;
519}
1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
5 */
6
7#include "bmi.h"
8#include "hif.h"
9#include "debug.h"
10#include "htc.h"
11#include "hw.h"
12
13void ath10k_bmi_start(struct ath10k *ar)
14{
15 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
16
17 ar->bmi.done_sent = false;
18}
19EXPORT_SYMBOL(ath10k_bmi_start);
20
21int ath10k_bmi_done(struct ath10k *ar)
22{
23 struct bmi_cmd cmd;
24 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
25 int ret;
26
27 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
28
29 if (ar->bmi.done_sent) {
30 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
31 return 0;
32 }
33
34 ar->bmi.done_sent = true;
35 cmd.id = __cpu_to_le32(BMI_DONE);
36
37 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
38 if (ret) {
39 ath10k_warn(ar, "unable to write to the device: %d\n", ret);
40 return ret;
41 }
42
43 return 0;
44}
45
46int ath10k_bmi_get_target_info(struct ath10k *ar,
47 struct bmi_target_info *target_info)
48{
49 struct bmi_cmd cmd;
50 union bmi_resp resp;
51 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
52 u32 resplen = sizeof(resp.get_target_info);
53 int ret;
54
55 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
56
57 if (ar->bmi.done_sent) {
58 ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
59 return -EBUSY;
60 }
61
62 cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
63
64 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
65 if (ret) {
66 ath10k_warn(ar, "unable to get target info from device\n");
67 return ret;
68 }
69
70 if (resplen < sizeof(resp.get_target_info)) {
71 ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
72 resplen);
73 return -EIO;
74 }
75
76 target_info->version = __le32_to_cpu(resp.get_target_info.version);
77 target_info->type = __le32_to_cpu(resp.get_target_info.type);
78
79 return 0;
80}
81
82#define TARGET_VERSION_SENTINAL 0xffffffffu
83
84int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
85 struct bmi_target_info *target_info)
86{
87 struct bmi_cmd cmd;
88 union bmi_resp resp;
89 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
90 u32 resplen, ver_len;
91 __le32 tmp;
92 int ret;
93
94 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
95
96 if (ar->bmi.done_sent) {
97 ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
98 return -EBUSY;
99 }
100
101 cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
102
103 /* Step 1: Read 4 bytes of the target info and check if it is
104 * the special sentinel version word or the first word in the
105 * version response.
106 */
107 resplen = sizeof(u32);
108 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
109 if (ret) {
110 ath10k_warn(ar, "unable to read from device\n");
111 return ret;
112 }
113
114 /* Some SDIO boards have a special sentinel byte before the real
115 * version response.
116 */
117 if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
118 /* Step 1b: Read the version length */
119 resplen = sizeof(u32);
120 ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
121 &resplen);
122 if (ret) {
123 ath10k_warn(ar, "unable to read from device\n");
124 return ret;
125 }
126 }
127
128 ver_len = __le32_to_cpu(tmp);
129
130 /* Step 2: Check the target info length */
131 if (ver_len != sizeof(resp.get_target_info)) {
132 ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
133 ver_len, sizeof(resp.get_target_info));
134 return -EINVAL;
135 }
136
137 /* Step 3: Read the rest of the version response */
138 resplen = sizeof(resp.get_target_info) - sizeof(u32);
139 ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
140 &resp.get_target_info.version,
141 &resplen);
142 if (ret) {
143 ath10k_warn(ar, "unable to read from device\n");
144 return ret;
145 }
146
147 target_info->version = __le32_to_cpu(resp.get_target_info.version);
148 target_info->type = __le32_to_cpu(resp.get_target_info.type);
149
150 return 0;
151}
152
153int ath10k_bmi_read_memory(struct ath10k *ar,
154 u32 address, void *buffer, u32 length)
155{
156 struct bmi_cmd cmd;
157 union bmi_resp resp;
158 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
159 u32 rxlen;
160 int ret;
161
162 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
163 address, length);
164
165 if (ar->bmi.done_sent) {
166 ath10k_warn(ar, "command disallowed\n");
167 return -EBUSY;
168 }
169
170 while (length) {
171 rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
172
173 cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
174 cmd.read_mem.addr = __cpu_to_le32(address);
175 cmd.read_mem.len = __cpu_to_le32(rxlen);
176
177 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
178 &resp, &rxlen);
179 if (ret) {
180 ath10k_warn(ar, "unable to read from the device (%d)\n",
181 ret);
182 return ret;
183 }
184
185 memcpy(buffer, resp.read_mem.payload, rxlen);
186 address += rxlen;
187 buffer += rxlen;
188 length -= rxlen;
189 }
190
191 return 0;
192}
193EXPORT_SYMBOL(ath10k_bmi_read_memory);
194
195int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
196{
197 struct bmi_cmd cmd;
198 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
199 int ret;
200
201 ath10k_dbg(ar, ATH10K_DBG_BMI,
202 "bmi write soc register 0x%08x val 0x%08x\n",
203 address, reg_val);
204
205 if (ar->bmi.done_sent) {
206 ath10k_warn(ar, "bmi write soc register command in progress\n");
207 return -EBUSY;
208 }
209
210 cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
211 cmd.write_soc_reg.addr = __cpu_to_le32(address);
212 cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
213
214 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
215 if (ret) {
216 ath10k_warn(ar, "Unable to write soc register to device: %d\n",
217 ret);
218 return ret;
219 }
220
221 return 0;
222}
223
224int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
225{
226 struct bmi_cmd cmd;
227 union bmi_resp resp;
228 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
229 u32 resplen = sizeof(resp.read_soc_reg);
230 int ret;
231
232 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
233 address);
234
235 if (ar->bmi.done_sent) {
236 ath10k_warn(ar, "bmi read soc register command in progress\n");
237 return -EBUSY;
238 }
239
240 cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
241 cmd.read_soc_reg.addr = __cpu_to_le32(address);
242
243 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
244 if (ret) {
245 ath10k_warn(ar, "Unable to read soc register from device: %d\n",
246 ret);
247 return ret;
248 }
249
250 *reg_val = __le32_to_cpu(resp.read_soc_reg.value);
251
252 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
253 *reg_val);
254
255 return 0;
256}
257
258int ath10k_bmi_write_memory(struct ath10k *ar,
259 u32 address, const void *buffer, u32 length)
260{
261 struct bmi_cmd cmd;
262 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
263 u32 txlen;
264 int ret;
265
266 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
267 address, length);
268
269 if (ar->bmi.done_sent) {
270 ath10k_warn(ar, "command disallowed\n");
271 return -EBUSY;
272 }
273
274 while (length) {
275 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
276
277 /* copy before roundup to avoid reading beyond buffer*/
278 memcpy(cmd.write_mem.payload, buffer, txlen);
279 txlen = roundup(txlen, 4);
280
281 cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
282 cmd.write_mem.addr = __cpu_to_le32(address);
283 cmd.write_mem.len = __cpu_to_le32(txlen);
284
285 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
286 NULL, NULL);
287 if (ret) {
288 ath10k_warn(ar, "unable to write to the device (%d)\n",
289 ret);
290 return ret;
291 }
292
293 /* fixup roundup() so `length` zeroes out for last chunk */
294 txlen = min(txlen, length);
295
296 address += txlen;
297 buffer += txlen;
298 length -= txlen;
299 }
300
301 return 0;
302}
303
304int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
305{
306 struct bmi_cmd cmd;
307 union bmi_resp resp;
308 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
309 u32 resplen = sizeof(resp.execute);
310 int ret;
311
312 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
313 address, param);
314
315 if (ar->bmi.done_sent) {
316 ath10k_warn(ar, "command disallowed\n");
317 return -EBUSY;
318 }
319
320 cmd.id = __cpu_to_le32(BMI_EXECUTE);
321 cmd.execute.addr = __cpu_to_le32(address);
322 cmd.execute.param = __cpu_to_le32(param);
323
324 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
325 if (ret) {
326 ath10k_warn(ar, "unable to read from the device\n");
327 return ret;
328 }
329
330 if (resplen < sizeof(resp.execute)) {
331 ath10k_warn(ar, "invalid execute response length (%d)\n",
332 resplen);
333 return -EIO;
334 }
335
336 *result = __le32_to_cpu(resp.execute.result);
337
338 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
339
340 return 0;
341}
342
343static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
344{
345 struct bmi_cmd *cmd;
346 u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
347 u32 txlen;
348 int ret;
349 size_t buf_len;
350
351 ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
352 buffer, length);
353
354 if (ar->bmi.done_sent) {
355 ath10k_warn(ar, "command disallowed\n");
356 return -EBUSY;
357 }
358
359 buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
360 cmd = kzalloc(buf_len, GFP_KERNEL);
361 if (!cmd)
362 return -ENOMEM;
363
364 while (length) {
365 txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
366
367 WARN_ON_ONCE(txlen & 3);
368
369 cmd->id = __cpu_to_le32(BMI_LZ_DATA);
370 cmd->lz_data.len = __cpu_to_le32(txlen);
371 memcpy(cmd->lz_data.payload, buffer, txlen);
372
373 ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
374 NULL, NULL);
375 if (ret) {
376 ath10k_warn(ar, "unable to write to the device\n");
377 kfree(cmd);
378 return ret;
379 }
380
381 buffer += txlen;
382 length -= txlen;
383 }
384
385 kfree(cmd);
386
387 return 0;
388}
389
390int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
391{
392 struct bmi_cmd cmd;
393 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
394 u32 txlen;
395 int ret;
396
397 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
398 buffer, length);
399
400 if (ar->bmi.done_sent) {
401 ath10k_warn(ar, "command disallowed\n");
402 return -EBUSY;
403 }
404
405 while (length) {
406 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
407
408 WARN_ON_ONCE(txlen & 3);
409
410 cmd.id = __cpu_to_le32(BMI_LZ_DATA);
411 cmd.lz_data.len = __cpu_to_le32(txlen);
412 memcpy(cmd.lz_data.payload, buffer, txlen);
413
414 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
415 NULL, NULL);
416 if (ret) {
417 ath10k_warn(ar, "unable to write to the device\n");
418 return ret;
419 }
420
421 buffer += txlen;
422 length -= txlen;
423 }
424
425 return 0;
426}
427
428int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
429{
430 struct bmi_cmd cmd;
431 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
432 int ret;
433
434 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
435 address);
436
437 if (ar->bmi.done_sent) {
438 ath10k_warn(ar, "command disallowed\n");
439 return -EBUSY;
440 }
441
442 cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
443 cmd.lz_start.addr = __cpu_to_le32(address);
444
445 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
446 if (ret) {
447 ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
448 return ret;
449 }
450
451 return 0;
452}
453
454int ath10k_bmi_fast_download(struct ath10k *ar,
455 u32 address, const void *buffer, u32 length)
456{
457 u8 trailer[4] = {};
458 u32 head_len = rounddown(length, 4);
459 u32 trailer_len = length - head_len;
460 int ret;
461
462 ath10k_dbg(ar, ATH10K_DBG_BMI,
463 "bmi fast download address 0x%x buffer 0x%pK length %d\n",
464 address, buffer, length);
465
466 ret = ath10k_bmi_lz_stream_start(ar, address);
467 if (ret)
468 return ret;
469
470 /* copy the last word into a zero padded buffer */
471 if (trailer_len > 0)
472 memcpy(trailer, buffer + head_len, trailer_len);
473
474 if (ar->hw_params.bmi_large_size_download)
475 ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
476 else
477 ret = ath10k_bmi_lz_data(ar, buffer, head_len);
478
479 if (ret)
480 return ret;
481
482 if (trailer_len > 0)
483 ret = ath10k_bmi_lz_data(ar, trailer, 4);
484
485 if (ret != 0)
486 return ret;
487
488 /*
489 * Close compressed stream and open a new (fake) one.
490 * This serves mainly to flush Target caches.
491 */
492 ret = ath10k_bmi_lz_stream_start(ar, 0x00);
493
494 return ret;
495}
496
497int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
498{
499 struct bmi_cmd cmd;
500 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
501 int ret;
502
503 if (ar->bmi.done_sent) {
504 ath10k_warn(ar, "bmi set start command disallowed\n");
505 return -EBUSY;
506 }
507
508 cmd.id = __cpu_to_le32(BMI_SET_APP_START);
509 cmd.set_app_start.addr = __cpu_to_le32(address);
510
511 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
512 if (ret) {
513 ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
514 return ret;
515 }
516
517 return 0;
518}