Loading...
1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7#include <linux/completion.h>
8#include <linux/device.h>
9#include <linux/debugfs.h>
10#include <linux/idr.h>
11#include <linux/kernel.h>
12#include <linux/of.h>
13#include <linux/of_address.h>
14#include <linux/module.h>
15#include <linux/net.h>
16#include <linux/platform_device.h>
17#include <linux/firmware/qcom/qcom_scm.h>
18#include <linux/soc/qcom/smem.h>
19#include <linux/string.h>
20#include <net/sock.h>
21
22#include "debug.h"
23#include "snoc.h"
24
25#define ATH10K_QMI_CLIENT_ID 0x4b4e454c
26#define ATH10K_QMI_TIMEOUT 30
27#define SMEM_IMAGE_VERSION_TABLE 469
28#define SMEM_IMAGE_TABLE_CNSS_INDEX 13
29#define SMEM_IMAGE_VERSION_ENTRY_SIZE 128
30#define SMEM_IMAGE_VERSION_NAME_SIZE 75
31
32static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi,
33 struct ath10k_msa_mem_info *mem_info)
34{
35 struct qcom_scm_vmperm dst_perms[3];
36 struct ath10k *ar = qmi->ar;
37 u64 src_perms;
38 u32 perm_count;
39 int ret;
40
41 src_perms = BIT(QCOM_SCM_VMID_HLOS);
42
43 dst_perms[0].vmid = QCOM_SCM_VMID_MSS_MSA;
44 dst_perms[0].perm = QCOM_SCM_PERM_RW;
45 dst_perms[1].vmid = QCOM_SCM_VMID_WLAN;
46 dst_perms[1].perm = QCOM_SCM_PERM_RW;
47
48 if (mem_info->secure) {
49 perm_count = 2;
50 } else {
51 dst_perms[2].vmid = QCOM_SCM_VMID_WLAN_CE;
52 dst_perms[2].perm = QCOM_SCM_PERM_RW;
53 perm_count = 3;
54 }
55
56 ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
57 &src_perms, dst_perms, perm_count);
58 if (ret < 0)
59 ath10k_err(ar, "failed to assign msa map permissions: %d\n", ret);
60
61 return ret;
62}
63
64static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi,
65 struct ath10k_msa_mem_info *mem_info)
66{
67 struct qcom_scm_vmperm dst_perms;
68 struct ath10k *ar = qmi->ar;
69 u64 src_perms;
70 int ret;
71
72 src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN);
73
74 if (!mem_info->secure)
75 src_perms |= BIT(QCOM_SCM_VMID_WLAN_CE);
76
77 dst_perms.vmid = QCOM_SCM_VMID_HLOS;
78 dst_perms.perm = QCOM_SCM_PERM_RW;
79
80 ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
81 &src_perms, &dst_perms, 1);
82 if (ret < 0)
83 ath10k_err(ar, "failed to unmap msa permissions: %d\n", ret);
84
85 return ret;
86}
87
88static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi)
89{
90 int ret;
91 int i;
92
93 if (qmi->msa_fixed_perm)
94 return 0;
95
96 for (i = 0; i < qmi->nr_mem_region; i++) {
97 ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]);
98 if (ret)
99 goto err_unmap;
100 }
101
102 return 0;
103
104err_unmap:
105 for (i--; i >= 0; i--)
106 ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
107 return ret;
108}
109
110static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi)
111{
112 int i;
113
114 if (qmi->msa_fixed_perm)
115 return;
116
117 for (i = 0; i < qmi->nr_mem_region; i++)
118 ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
119}
120
121static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
122{
123 struct wlfw_msa_info_resp_msg_v01 resp = {};
124 struct wlfw_msa_info_req_msg_v01 req = {};
125 struct ath10k *ar = qmi->ar;
126 phys_addr_t max_mapped_addr;
127 struct qmi_txn txn;
128 int ret;
129 int i;
130
131 req.msa_addr = ar->msa.paddr;
132 req.size = ar->msa.mem_size;
133
134 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
135 wlfw_msa_info_resp_msg_v01_ei, &resp);
136 if (ret < 0)
137 goto out;
138
139 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
140 QMI_WLFW_MSA_INFO_REQ_V01,
141 WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN,
142 wlfw_msa_info_req_msg_v01_ei, &req);
143 if (ret < 0) {
144 qmi_txn_cancel(&txn);
145 ath10k_err(ar, "failed to send msa mem info req: %d\n", ret);
146 goto out;
147 }
148
149 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
150 if (ret < 0)
151 goto out;
152
153 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
154 ath10k_err(ar, "msa info req rejected: %d\n", resp.resp.error);
155 ret = -EINVAL;
156 goto out;
157 }
158
159 if (resp.mem_region_info_len > QMI_WLFW_MAX_MEM_REG_V01) {
160 ath10k_err(ar, "invalid memory region length received: %d\n",
161 resp.mem_region_info_len);
162 ret = -EINVAL;
163 goto out;
164 }
165
166 max_mapped_addr = ar->msa.paddr + ar->msa.mem_size;
167 qmi->nr_mem_region = resp.mem_region_info_len;
168 for (i = 0; i < resp.mem_region_info_len; i++) {
169 if (resp.mem_region_info[i].size > ar->msa.mem_size ||
170 resp.mem_region_info[i].region_addr > max_mapped_addr ||
171 resp.mem_region_info[i].region_addr < ar->msa.paddr ||
172 resp.mem_region_info[i].size +
173 resp.mem_region_info[i].region_addr > max_mapped_addr) {
174 ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
175 resp.mem_region_info[i].region_addr,
176 resp.mem_region_info[i].size);
177 ret = -EINVAL;
178 goto fail_unwind;
179 }
180 qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr;
181 qmi->mem_region[i].size = resp.mem_region_info[i].size;
182 qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag;
183 ath10k_dbg(ar, ATH10K_DBG_QMI,
184 "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n",
185 i, &qmi->mem_region[i].addr,
186 qmi->mem_region[i].size,
187 qmi->mem_region[i].secure);
188 }
189
190 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n");
191 return 0;
192
193fail_unwind:
194 memset(&qmi->mem_region[0], 0, sizeof(qmi->mem_region[0]) * i);
195out:
196 return ret;
197}
198
199static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi *qmi)
200{
201 struct wlfw_msa_ready_resp_msg_v01 resp = {};
202 struct wlfw_msa_ready_req_msg_v01 req = {};
203 struct ath10k *ar = qmi->ar;
204 struct qmi_txn txn;
205 int ret;
206
207 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
208 wlfw_msa_ready_resp_msg_v01_ei, &resp);
209 if (ret < 0)
210 goto out;
211
212 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
213 QMI_WLFW_MSA_READY_REQ_V01,
214 WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN,
215 wlfw_msa_ready_req_msg_v01_ei, &req);
216 if (ret < 0) {
217 qmi_txn_cancel(&txn);
218 ath10k_err(ar, "failed to send msa mem ready request: %d\n", ret);
219 goto out;
220 }
221
222 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
223 if (ret < 0)
224 goto out;
225
226 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
227 ath10k_err(ar, "msa ready request rejected: %d\n", resp.resp.error);
228 ret = -EINVAL;
229 }
230
231 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem ready request completed\n");
232 return 0;
233
234out:
235 return ret;
236}
237
238static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi)
239{
240 struct wlfw_bdf_download_resp_msg_v01 resp = {};
241 struct wlfw_bdf_download_req_msg_v01 *req;
242 struct ath10k *ar = qmi->ar;
243 unsigned int remaining;
244 struct qmi_txn txn;
245 const u8 *temp;
246 int ret;
247
248 req = kzalloc(sizeof(*req), GFP_KERNEL);
249 if (!req)
250 return -ENOMEM;
251
252 temp = ar->normal_mode_fw.board_data;
253 remaining = ar->normal_mode_fw.board_len;
254
255 while (remaining) {
256 req->valid = 1;
257 req->file_id_valid = 1;
258 req->file_id = 0;
259 req->total_size_valid = 1;
260 req->total_size = ar->normal_mode_fw.board_len;
261 req->seg_id_valid = 1;
262 req->data_valid = 1;
263 req->end_valid = 1;
264
265 if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
266 req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
267 } else {
268 req->data_len = remaining;
269 req->end = 1;
270 }
271
272 memcpy(req->data, temp, req->data_len);
273
274 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
275 wlfw_bdf_download_resp_msg_v01_ei,
276 &resp);
277 if (ret < 0)
278 goto out;
279
280 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
281 QMI_WLFW_BDF_DOWNLOAD_REQ_V01,
282 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
283 wlfw_bdf_download_req_msg_v01_ei, req);
284 if (ret < 0) {
285 qmi_txn_cancel(&txn);
286 goto out;
287 }
288
289 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
290
291 if (ret < 0)
292 goto out;
293
294 /* end = 1 triggers a CRC check on the BDF. If this fails, we
295 * get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still
296 * willing to use the BDF. For some platforms, all the valid
297 * released BDFs fail this CRC check, so attempt to detect this
298 * scenario and treat it as non-fatal.
299 */
300 if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
301 !(req->end == 1 &&
302 resp.resp.result == QMI_ERR_MALFORMED_MSG_V01)) {
303 ath10k_err(ar, "failed to download board data file: %d\n",
304 resp.resp.error);
305 ret = -EINVAL;
306 goto out;
307 }
308
309 remaining -= req->data_len;
310 temp += req->data_len;
311 req->seg_id++;
312 }
313
314 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi bdf download request completed\n");
315
316 kfree(req);
317 return 0;
318
319out:
320 kfree(req);
321 return ret;
322}
323
324static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi)
325{
326 struct wlfw_cal_report_resp_msg_v01 resp = {};
327 struct wlfw_cal_report_req_msg_v01 req = {};
328 struct ath10k *ar = qmi->ar;
329 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
330 struct qmi_txn txn;
331 int i, j = 0;
332 int ret;
333
334 if (ar_snoc->xo_cal_supported) {
335 req.xo_cal_data_valid = 1;
336 req.xo_cal_data = ar_snoc->xo_cal_data;
337 }
338
339 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei,
340 &resp);
341 if (ret < 0)
342 goto out;
343
344 for (i = 0; i < QMI_WLFW_MAX_NUM_CAL_V01; i++) {
345 if (qmi->cal_data[i].total_size &&
346 qmi->cal_data[i].data) {
347 req.meta_data[j] = qmi->cal_data[i].cal_id;
348 j++;
349 }
350 }
351 req.meta_data_len = j;
352
353 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
354 QMI_WLFW_CAL_REPORT_REQ_V01,
355 WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN,
356 wlfw_cal_report_req_msg_v01_ei, &req);
357 if (ret < 0) {
358 qmi_txn_cancel(&txn);
359 ath10k_err(ar, "failed to send calibration request: %d\n", ret);
360 goto out;
361 }
362
363 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
364 if (ret < 0)
365 goto out;
366
367 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
368 ath10k_err(ar, "calibration request rejected: %d\n", resp.resp.error);
369 ret = -EINVAL;
370 goto out;
371 }
372
373 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi cal report request completed\n");
374 return 0;
375
376out:
377 return ret;
378}
379
380static int
381ath10k_qmi_mode_send_sync_msg(struct ath10k *ar, enum wlfw_driver_mode_enum_v01 mode)
382{
383 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
384 struct ath10k_qmi *qmi = ar_snoc->qmi;
385 struct wlfw_wlan_mode_resp_msg_v01 resp = {};
386 struct wlfw_wlan_mode_req_msg_v01 req = {};
387 struct qmi_txn txn;
388 int ret;
389
390 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
391 wlfw_wlan_mode_resp_msg_v01_ei,
392 &resp);
393 if (ret < 0)
394 goto out;
395
396 req.mode = mode;
397 req.hw_debug_valid = 1;
398 req.hw_debug = 0;
399
400 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
401 QMI_WLFW_WLAN_MODE_REQ_V01,
402 WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN,
403 wlfw_wlan_mode_req_msg_v01_ei, &req);
404 if (ret < 0) {
405 qmi_txn_cancel(&txn);
406 ath10k_err(ar, "failed to send wlan mode %d request: %d\n", mode, ret);
407 goto out;
408 }
409
410 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
411 if (ret < 0)
412 goto out;
413
414 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
415 ath10k_err(ar, "more request rejected: %d\n", resp.resp.error);
416 ret = -EINVAL;
417 goto out;
418 }
419
420 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wlan mode req completed: %d\n", mode);
421 return 0;
422
423out:
424 return ret;
425}
426
427static int
428ath10k_qmi_cfg_send_sync_msg(struct ath10k *ar,
429 struct ath10k_qmi_wlan_enable_cfg *config,
430 const char *version)
431{
432 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
433 struct ath10k_qmi *qmi = ar_snoc->qmi;
434 struct wlfw_wlan_cfg_resp_msg_v01 resp = {};
435 struct wlfw_wlan_cfg_req_msg_v01 *req;
436 struct qmi_txn txn;
437 int ret;
438 u32 i;
439
440 req = kzalloc(sizeof(*req), GFP_KERNEL);
441 if (!req)
442 return -ENOMEM;
443
444 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
445 wlfw_wlan_cfg_resp_msg_v01_ei,
446 &resp);
447 if (ret < 0)
448 goto out;
449
450 req->host_version_valid = 0;
451
452 req->tgt_cfg_valid = 1;
453 if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
454 req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
455 else
456 req->tgt_cfg_len = config->num_ce_tgt_cfg;
457 for (i = 0; i < req->tgt_cfg_len; i++) {
458 req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
459 req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
460 req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
461 req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
462 req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
463 }
464
465 req->svc_cfg_valid = 1;
466 if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
467 req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
468 else
469 req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
470 for (i = 0; i < req->svc_cfg_len; i++) {
471 req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
472 req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
473 req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
474 }
475
476 req->shadow_reg_valid = 1;
477 if (config->num_shadow_reg_cfg >
478 QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
479 req->shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
480 else
481 req->shadow_reg_len = config->num_shadow_reg_cfg;
482
483 memcpy(req->shadow_reg, config->shadow_reg_cfg,
484 sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req->shadow_reg_len);
485
486 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
487 QMI_WLFW_WLAN_CFG_REQ_V01,
488 WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN,
489 wlfw_wlan_cfg_req_msg_v01_ei, req);
490 if (ret < 0) {
491 qmi_txn_cancel(&txn);
492 ath10k_err(ar, "failed to send config request: %d\n", ret);
493 goto out;
494 }
495
496 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
497 if (ret < 0)
498 goto out;
499
500 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
501 ath10k_err(ar, "config request rejected: %d\n", resp.resp.error);
502 ret = -EINVAL;
503 goto out;
504 }
505
506 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi config request completed\n");
507 kfree(req);
508 return 0;
509
510out:
511 kfree(req);
512 return ret;
513}
514
515int ath10k_qmi_wlan_enable(struct ath10k *ar,
516 struct ath10k_qmi_wlan_enable_cfg *config,
517 enum wlfw_driver_mode_enum_v01 mode,
518 const char *version)
519{
520 int ret;
521
522 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi mode %d config %p\n",
523 mode, config);
524
525 ret = ath10k_qmi_cfg_send_sync_msg(ar, config, version);
526 if (ret) {
527 ath10k_err(ar, "failed to send qmi config: %d\n", ret);
528 return ret;
529 }
530
531 ret = ath10k_qmi_mode_send_sync_msg(ar, mode);
532 if (ret) {
533 ath10k_err(ar, "failed to send qmi mode: %d\n", ret);
534 return ret;
535 }
536
537 return 0;
538}
539
540int ath10k_qmi_wlan_disable(struct ath10k *ar)
541{
542 return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01);
543}
544
545static void ath10k_qmi_add_wlan_ver_smem(struct ath10k *ar, const char *fw_build_id)
546{
547 u8 *table_ptr;
548 size_t smem_item_size;
549 const u32 smem_img_idx_wlan = SMEM_IMAGE_TABLE_CNSS_INDEX *
550 SMEM_IMAGE_VERSION_ENTRY_SIZE;
551
552 table_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY,
553 SMEM_IMAGE_VERSION_TABLE,
554 &smem_item_size);
555
556 if (IS_ERR(table_ptr)) {
557 ath10k_err(ar, "smem image version table not found\n");
558 return;
559 }
560
561 if (smem_img_idx_wlan + SMEM_IMAGE_VERSION_ENTRY_SIZE >
562 smem_item_size) {
563 ath10k_err(ar, "smem block size too small: %zu\n",
564 smem_item_size);
565 return;
566 }
567
568 strscpy(table_ptr + smem_img_idx_wlan, fw_build_id,
569 SMEM_IMAGE_VERSION_NAME_SIZE);
570}
571
572static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
573{
574 struct wlfw_cap_resp_msg_v01 *resp;
575 struct wlfw_cap_req_msg_v01 req = {};
576 struct ath10k *ar = qmi->ar;
577 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
578 struct qmi_txn txn;
579 int ret;
580
581 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
582 if (!resp)
583 return -ENOMEM;
584
585 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cap_resp_msg_v01_ei, resp);
586 if (ret < 0)
587 goto out;
588
589 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
590 QMI_WLFW_CAP_REQ_V01,
591 WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN,
592 wlfw_cap_req_msg_v01_ei, &req);
593 if (ret < 0) {
594 qmi_txn_cancel(&txn);
595 ath10k_err(ar, "failed to send capability request: %d\n", ret);
596 goto out;
597 }
598
599 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
600 if (ret < 0)
601 goto out;
602
603 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
604 ath10k_err(ar, "capability req rejected: %d\n", resp->resp.error);
605 ret = -EINVAL;
606 goto out;
607 }
608
609 if (resp->chip_info_valid) {
610 qmi->chip_info.chip_id = resp->chip_info.chip_id;
611 qmi->chip_info.chip_family = resp->chip_info.chip_family;
612 } else {
613 qmi->chip_info.chip_id = 0xFF;
614 }
615
616 if (resp->board_info_valid)
617 qmi->board_info.board_id = resp->board_info.board_id;
618 else
619 qmi->board_info.board_id = 0xFF;
620
621 if (resp->soc_info_valid)
622 qmi->soc_info.soc_id = resp->soc_info.soc_id;
623
624 if (resp->fw_version_info_valid) {
625 qmi->fw_version = resp->fw_version_info.fw_version;
626 strscpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp,
627 sizeof(qmi->fw_build_timestamp));
628 }
629
630 if (resp->fw_build_id_valid)
631 strscpy(qmi->fw_build_id, resp->fw_build_id,
632 MAX_BUILD_ID_LEN + 1);
633
634 if (!test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
635 ath10k_info(ar, "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
636 qmi->chip_info.chip_id, qmi->chip_info.chip_family,
637 qmi->board_info.board_id, qmi->soc_info.soc_id);
638 ath10k_info(ar, "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
639 qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id);
640 }
641
642 if (resp->fw_build_id_valid)
643 ath10k_qmi_add_wlan_ver_smem(ar, qmi->fw_build_id);
644
645 kfree(resp);
646 return 0;
647
648out:
649 kfree(resp);
650 return ret;
651}
652
653static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
654{
655 struct wlfw_host_cap_resp_msg_v01 resp = {};
656 struct wlfw_host_cap_req_msg_v01 req = {};
657 const struct qmi_elem_info *req_ei;
658 struct ath10k *ar = qmi->ar;
659 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
660 struct qmi_txn txn;
661 int ret;
662
663 req.daemon_support_valid = 1;
664 req.daemon_support = 0;
665
666 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei,
667 &resp);
668 if (ret < 0)
669 goto out;
670
671 if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags))
672 req_ei = wlfw_host_cap_8bit_req_msg_v01_ei;
673 else
674 req_ei = wlfw_host_cap_req_msg_v01_ei;
675
676 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
677 QMI_WLFW_HOST_CAP_REQ_V01,
678 WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
679 req_ei, &req);
680 if (ret < 0) {
681 qmi_txn_cancel(&txn);
682 ath10k_err(ar, "failed to send host capability request: %d\n", ret);
683 goto out;
684 }
685
686 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
687 if (ret < 0)
688 goto out;
689
690 /* older FW didn't support this request, which is not fatal */
691 if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
692 resp.resp.error != QMI_ERR_NOT_SUPPORTED_V01) {
693 ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error);
694 ret = -EINVAL;
695 goto out;
696 }
697
698 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capability request completed\n");
699 return 0;
700
701out:
702 return ret;
703}
704
705int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode)
706{
707 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
708 struct wlfw_ini_resp_msg_v01 resp = {};
709 struct ath10k_qmi *qmi = ar_snoc->qmi;
710 struct wlfw_ini_req_msg_v01 req = {};
711 struct qmi_txn txn;
712 int ret;
713
714 req.enablefwlog_valid = 1;
715 req.enablefwlog = fw_log_mode;
716
717 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_ini_resp_msg_v01_ei,
718 &resp);
719 if (ret < 0)
720 goto out;
721
722 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
723 QMI_WLFW_INI_REQ_V01,
724 WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
725 wlfw_ini_req_msg_v01_ei, &req);
726 if (ret < 0) {
727 qmi_txn_cancel(&txn);
728 ath10k_err(ar, "failed to send fw log request: %d\n", ret);
729 goto out;
730 }
731
732 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
733 if (ret < 0)
734 goto out;
735
736 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
737 ath10k_err(ar, "fw log request rejected: %d\n",
738 resp.resp.error);
739 ret = -EINVAL;
740 goto out;
741 }
742 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi fw log request completed, mode: %d\n",
743 fw_log_mode);
744 return 0;
745
746out:
747 return ret;
748}
749
750static int
751ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
752{
753 struct wlfw_ind_register_resp_msg_v01 resp = {};
754 struct wlfw_ind_register_req_msg_v01 req = {};
755 struct ath10k *ar = qmi->ar;
756 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
757 struct qmi_txn txn;
758 int ret;
759
760 req.client_id_valid = 1;
761 req.client_id = ATH10K_QMI_CLIENT_ID;
762 req.fw_ready_enable_valid = 1;
763 req.fw_ready_enable = 1;
764 req.msa_ready_enable_valid = 1;
765 req.msa_ready_enable = 1;
766
767 if (ar_snoc->xo_cal_supported) {
768 req.xo_cal_enable_valid = 1;
769 req.xo_cal_enable = 1;
770 }
771
772 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
773 wlfw_ind_register_resp_msg_v01_ei, &resp);
774 if (ret < 0)
775 goto out;
776
777 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
778 QMI_WLFW_IND_REGISTER_REQ_V01,
779 WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
780 wlfw_ind_register_req_msg_v01_ei, &req);
781 if (ret < 0) {
782 qmi_txn_cancel(&txn);
783 ath10k_err(ar, "failed to send indication registered request: %d\n", ret);
784 goto out;
785 }
786
787 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
788 if (ret < 0)
789 goto out;
790
791 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
792 ath10k_err(ar, "indication request rejected: %d\n", resp.resp.error);
793 ret = -EINVAL;
794 goto out;
795 }
796
797 if (resp.fw_status_valid) {
798 if (resp.fw_status & QMI_WLFW_FW_READY_V01)
799 qmi->fw_ready = true;
800 }
801 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi indication register request completed\n");
802 return 0;
803
804out:
805 return ret;
806}
807
808static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
809{
810 struct ath10k *ar = qmi->ar;
811 int ret;
812
813 ret = ath10k_qmi_ind_register_send_sync_msg(qmi);
814 if (ret)
815 return;
816
817 if (qmi->fw_ready) {
818 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
819 return;
820 }
821
822 ret = ath10k_qmi_host_cap_send_sync(qmi);
823 if (ret)
824 return;
825
826 ret = ath10k_qmi_msa_mem_info_send_sync_msg(qmi);
827 if (ret)
828 return;
829
830 /*
831 * HACK: sleep for a while between receiving the msa info response
832 * and the XPU update to prevent SDM845 from crashing due to a security
833 * violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
834 */
835 msleep(20);
836
837 ret = ath10k_qmi_setup_msa_permissions(qmi);
838 if (ret)
839 return;
840
841 ret = ath10k_qmi_msa_ready_send_sync_msg(qmi);
842 if (ret)
843 goto err_setup_msa;
844
845 ret = ath10k_qmi_cap_send_sync_msg(qmi);
846 if (ret)
847 goto err_setup_msa;
848
849 return;
850
851err_setup_msa:
852 ath10k_qmi_remove_msa_permission(qmi);
853}
854
855static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi)
856{
857 struct ath10k *ar = qmi->ar;
858 int ret;
859
860 ar->hif.bus = ATH10K_BUS_SNOC;
861 ar->id.qmi_ids_valid = true;
862 ar->id.qmi_board_id = qmi->board_info.board_id;
863 ar->id.qmi_chip_id = qmi->chip_info.chip_id;
864 ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR;
865
866 ret = ath10k_core_check_dt(ar);
867 if (ret)
868 ath10k_dbg(ar, ATH10K_DBG_QMI, "DT bdf variant name not set.\n");
869
870 return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD);
871}
872
873static int
874ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi,
875 enum ath10k_qmi_driver_event_type type,
876 void *data)
877{
878 struct ath10k_qmi_driver_event *event;
879
880 event = kzalloc(sizeof(*event), GFP_ATOMIC);
881 if (!event)
882 return -ENOMEM;
883
884 event->type = type;
885 event->data = data;
886
887 spin_lock(&qmi->event_lock);
888 list_add_tail(&event->list, &qmi->event_list);
889 spin_unlock(&qmi->event_lock);
890
891 queue_work(qmi->event_wq, &qmi->event_work);
892
893 return 0;
894}
895
896static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi)
897{
898 struct ath10k *ar = qmi->ar;
899 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
900
901 ath10k_qmi_remove_msa_permission(qmi);
902 ath10k_core_free_board_files(ar);
903 if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags) &&
904 !test_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags))
905 ath10k_snoc_fw_crashed_dump(ar);
906
907 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND);
908 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n");
909}
910
911static void ath10k_qmi_event_msa_ready(struct ath10k_qmi *qmi)
912{
913 int ret;
914
915 ret = ath10k_qmi_fetch_board_file(qmi);
916 if (ret)
917 goto out;
918
919 ret = ath10k_qmi_bdf_dnld_send_sync(qmi);
920 if (ret)
921 goto out;
922
923 ret = ath10k_qmi_send_cal_report_req(qmi);
924
925out:
926 return;
927}
928
929static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi *qmi)
930{
931 struct ath10k *ar = qmi->ar;
932
933 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw ready event received\n");
934 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
935
936 return 0;
937}
938
939static void ath10k_qmi_fw_ready_ind(struct qmi_handle *qmi_hdl,
940 struct sockaddr_qrtr *sq,
941 struct qmi_txn *txn, const void *data)
942{
943 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
944
945 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_FW_READY_IND, NULL);
946}
947
948static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl,
949 struct sockaddr_qrtr *sq,
950 struct qmi_txn *txn, const void *data)
951{
952 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
953
954 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL);
955}
956
957static const struct qmi_msg_handler qmi_msg_handler[] = {
958 {
959 .type = QMI_INDICATION,
960 .msg_id = QMI_WLFW_FW_READY_IND_V01,
961 .ei = wlfw_fw_ready_ind_msg_v01_ei,
962 .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01),
963 .fn = ath10k_qmi_fw_ready_ind,
964 },
965 {
966 .type = QMI_INDICATION,
967 .msg_id = QMI_WLFW_MSA_READY_IND_V01,
968 .ei = wlfw_msa_ready_ind_msg_v01_ei,
969 .decoded_size = sizeof(struct wlfw_msa_ready_ind_msg_v01),
970 .fn = ath10k_qmi_msa_ready_ind,
971 },
972 {}
973};
974
975static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl,
976 struct qmi_service *service)
977{
978 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
979 struct sockaddr_qrtr *sq = &qmi->sq;
980 struct ath10k *ar = qmi->ar;
981 int ret;
982
983 sq->sq_family = AF_QIPCRTR;
984 sq->sq_node = service->node;
985 sq->sq_port = service->port;
986
987 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n");
988
989 ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq,
990 sizeof(qmi->sq), 0);
991 if (ret) {
992 ath10k_err(ar, "failed to connect to a remote QMI service port\n");
993 return ret;
994 }
995
996 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wifi fw qmi service connected\n");
997 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_ARRIVE, NULL);
998
999 return ret;
1000}
1001
1002static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl,
1003 struct qmi_service *service)
1004{
1005 struct ath10k_qmi *qmi =
1006 container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
1007
1008 qmi->fw_ready = false;
1009
1010 /*
1011 * The del_server event is to be processed only if coming from
1012 * the qmi server. The qmi infrastructure sends del_server, when
1013 * any client releases the qmi handle. In this case do not process
1014 * this del_server event.
1015 */
1016 if (qmi->state == ATH10K_QMI_STATE_INIT_DONE)
1017 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT,
1018 NULL);
1019}
1020
1021static const struct qmi_ops ath10k_qmi_ops = {
1022 .new_server = ath10k_qmi_new_server,
1023 .del_server = ath10k_qmi_del_server,
1024};
1025
1026static void ath10k_qmi_driver_event_work(struct work_struct *work)
1027{
1028 struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi,
1029 event_work);
1030 struct ath10k_qmi_driver_event *event;
1031 struct ath10k *ar = qmi->ar;
1032
1033 spin_lock(&qmi->event_lock);
1034 while (!list_empty(&qmi->event_list)) {
1035 event = list_first_entry(&qmi->event_list,
1036 struct ath10k_qmi_driver_event, list);
1037 list_del(&event->list);
1038 spin_unlock(&qmi->event_lock);
1039
1040 switch (event->type) {
1041 case ATH10K_QMI_EVENT_SERVER_ARRIVE:
1042 ath10k_qmi_event_server_arrive(qmi);
1043 break;
1044 case ATH10K_QMI_EVENT_SERVER_EXIT:
1045 ath10k_qmi_event_server_exit(qmi);
1046 break;
1047 case ATH10K_QMI_EVENT_FW_READY_IND:
1048 ath10k_qmi_event_fw_ready_ind(qmi);
1049 break;
1050 case ATH10K_QMI_EVENT_MSA_READY_IND:
1051 ath10k_qmi_event_msa_ready(qmi);
1052 break;
1053 default:
1054 ath10k_warn(ar, "invalid event type: %d", event->type);
1055 break;
1056 }
1057 kfree(event);
1058 spin_lock(&qmi->event_lock);
1059 }
1060 spin_unlock(&qmi->event_lock);
1061}
1062
1063int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
1064{
1065 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1066 struct device *dev = ar->dev;
1067 struct ath10k_qmi *qmi;
1068 int ret;
1069
1070 qmi = kzalloc(sizeof(*qmi), GFP_KERNEL);
1071 if (!qmi)
1072 return -ENOMEM;
1073
1074 qmi->ar = ar;
1075 ar_snoc->qmi = qmi;
1076
1077 if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
1078 qmi->msa_fixed_perm = true;
1079
1080 ret = qmi_handle_init(&qmi->qmi_hdl,
1081 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
1082 &ath10k_qmi_ops, qmi_msg_handler);
1083 if (ret)
1084 goto err;
1085
1086 qmi->event_wq = alloc_ordered_workqueue("ath10k_qmi_driver_event", 0);
1087 if (!qmi->event_wq) {
1088 ath10k_err(ar, "failed to allocate workqueue\n");
1089 ret = -EFAULT;
1090 goto err_release_qmi_handle;
1091 }
1092
1093 INIT_LIST_HEAD(&qmi->event_list);
1094 spin_lock_init(&qmi->event_lock);
1095 INIT_WORK(&qmi->event_work, ath10k_qmi_driver_event_work);
1096
1097 ret = qmi_add_lookup(&qmi->qmi_hdl, WLFW_SERVICE_ID_V01,
1098 WLFW_SERVICE_VERS_V01, 0);
1099 if (ret)
1100 goto err_qmi_lookup;
1101
1102 qmi->state = ATH10K_QMI_STATE_INIT_DONE;
1103 return 0;
1104
1105err_qmi_lookup:
1106 destroy_workqueue(qmi->event_wq);
1107
1108err_release_qmi_handle:
1109 qmi_handle_release(&qmi->qmi_hdl);
1110
1111err:
1112 kfree(qmi);
1113 return ret;
1114}
1115
1116int ath10k_qmi_deinit(struct ath10k *ar)
1117{
1118 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1119 struct ath10k_qmi *qmi = ar_snoc->qmi;
1120
1121 qmi->state = ATH10K_QMI_STATE_DEINIT;
1122 qmi_handle_release(&qmi->qmi_hdl);
1123 cancel_work_sync(&qmi->event_work);
1124 destroy_workqueue(qmi->event_wq);
1125 kfree(qmi);
1126 ar_snoc->qmi = NULL;
1127
1128 return 0;
1129}
1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/completion.h>
7#include <linux/device.h>
8#include <linux/debugfs.h>
9#include <linux/idr.h>
10#include <linux/kernel.h>
11#include <linux/of.h>
12#include <linux/of_address.h>
13#include <linux/module.h>
14#include <linux/net.h>
15#include <linux/platform_device.h>
16#include <linux/qcom_scm.h>
17#include <linux/string.h>
18#include <net/sock.h>
19
20#include "debug.h"
21#include "snoc.h"
22
23#define ATH10K_QMI_CLIENT_ID 0x4b4e454c
24#define ATH10K_QMI_TIMEOUT 30
25
26static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi,
27 struct ath10k_msa_mem_info *mem_info)
28{
29 struct qcom_scm_vmperm dst_perms[3];
30 struct ath10k *ar = qmi->ar;
31 unsigned int src_perms;
32 u32 perm_count;
33 int ret;
34
35 src_perms = BIT(QCOM_SCM_VMID_HLOS);
36
37 dst_perms[0].vmid = QCOM_SCM_VMID_MSS_MSA;
38 dst_perms[0].perm = QCOM_SCM_PERM_RW;
39 dst_perms[1].vmid = QCOM_SCM_VMID_WLAN;
40 dst_perms[1].perm = QCOM_SCM_PERM_RW;
41
42 if (mem_info->secure) {
43 perm_count = 2;
44 } else {
45 dst_perms[2].vmid = QCOM_SCM_VMID_WLAN_CE;
46 dst_perms[2].perm = QCOM_SCM_PERM_RW;
47 perm_count = 3;
48 }
49
50 ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
51 &src_perms, dst_perms, perm_count);
52 if (ret < 0)
53 ath10k_err(ar, "failed to assign msa map permissions: %d\n", ret);
54
55 return ret;
56}
57
58static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi,
59 struct ath10k_msa_mem_info *mem_info)
60{
61 struct qcom_scm_vmperm dst_perms;
62 struct ath10k *ar = qmi->ar;
63 unsigned int src_perms;
64 int ret;
65
66 src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN);
67
68 if (!mem_info->secure)
69 src_perms |= BIT(QCOM_SCM_VMID_WLAN_CE);
70
71 dst_perms.vmid = QCOM_SCM_VMID_HLOS;
72 dst_perms.perm = QCOM_SCM_PERM_RW;
73
74 ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
75 &src_perms, &dst_perms, 1);
76 if (ret < 0)
77 ath10k_err(ar, "failed to unmap msa permissions: %d\n", ret);
78
79 return ret;
80}
81
82static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi)
83{
84 int ret;
85 int i;
86
87 if (qmi->msa_fixed_perm)
88 return 0;
89
90 for (i = 0; i < qmi->nr_mem_region; i++) {
91 ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]);
92 if (ret)
93 goto err_unmap;
94 }
95
96 return 0;
97
98err_unmap:
99 for (i--; i >= 0; i--)
100 ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
101 return ret;
102}
103
104static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi)
105{
106 int i;
107
108 if (qmi->msa_fixed_perm)
109 return;
110
111 for (i = 0; i < qmi->nr_mem_region; i++)
112 ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
113}
114
115static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
116{
117 struct wlfw_msa_info_resp_msg_v01 resp = {};
118 struct wlfw_msa_info_req_msg_v01 req = {};
119 struct ath10k *ar = qmi->ar;
120 phys_addr_t max_mapped_addr;
121 struct qmi_txn txn;
122 int ret;
123 int i;
124
125 req.msa_addr = ar->msa.paddr;
126 req.size = ar->msa.mem_size;
127
128 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
129 wlfw_msa_info_resp_msg_v01_ei, &resp);
130 if (ret < 0)
131 goto out;
132
133 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
134 QMI_WLFW_MSA_INFO_REQ_V01,
135 WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN,
136 wlfw_msa_info_req_msg_v01_ei, &req);
137 if (ret < 0) {
138 qmi_txn_cancel(&txn);
139 ath10k_err(ar, "failed to send msa mem info req: %d\n", ret);
140 goto out;
141 }
142
143 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
144 if (ret < 0)
145 goto out;
146
147 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
148 ath10k_err(ar, "msa info req rejected: %d\n", resp.resp.error);
149 ret = -EINVAL;
150 goto out;
151 }
152
153 if (resp.mem_region_info_len > QMI_WLFW_MAX_MEM_REG_V01) {
154 ath10k_err(ar, "invalid memory region length received: %d\n",
155 resp.mem_region_info_len);
156 ret = -EINVAL;
157 goto out;
158 }
159
160 max_mapped_addr = ar->msa.paddr + ar->msa.mem_size;
161 qmi->nr_mem_region = resp.mem_region_info_len;
162 for (i = 0; i < resp.mem_region_info_len; i++) {
163 if (resp.mem_region_info[i].size > ar->msa.mem_size ||
164 resp.mem_region_info[i].region_addr > max_mapped_addr ||
165 resp.mem_region_info[i].region_addr < ar->msa.paddr ||
166 resp.mem_region_info[i].size +
167 resp.mem_region_info[i].region_addr > max_mapped_addr) {
168 ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
169 resp.mem_region_info[i].region_addr,
170 resp.mem_region_info[i].size);
171 ret = -EINVAL;
172 goto fail_unwind;
173 }
174 qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr;
175 qmi->mem_region[i].size = resp.mem_region_info[i].size;
176 qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag;
177 ath10k_dbg(ar, ATH10K_DBG_QMI,
178 "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n",
179 i, &qmi->mem_region[i].addr,
180 qmi->mem_region[i].size,
181 qmi->mem_region[i].secure);
182 }
183
184 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n");
185 return 0;
186
187fail_unwind:
188 memset(&qmi->mem_region[0], 0, sizeof(qmi->mem_region[0]) * i);
189out:
190 return ret;
191}
192
193static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi *qmi)
194{
195 struct wlfw_msa_ready_resp_msg_v01 resp = {};
196 struct wlfw_msa_ready_req_msg_v01 req = {};
197 struct ath10k *ar = qmi->ar;
198 struct qmi_txn txn;
199 int ret;
200
201 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
202 wlfw_msa_ready_resp_msg_v01_ei, &resp);
203 if (ret < 0)
204 goto out;
205
206 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
207 QMI_WLFW_MSA_READY_REQ_V01,
208 WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN,
209 wlfw_msa_ready_req_msg_v01_ei, &req);
210 if (ret < 0) {
211 qmi_txn_cancel(&txn);
212 ath10k_err(ar, "failed to send msa mem ready request: %d\n", ret);
213 goto out;
214 }
215
216 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
217 if (ret < 0)
218 goto out;
219
220 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
221 ath10k_err(ar, "msa ready request rejected: %d\n", resp.resp.error);
222 ret = -EINVAL;
223 }
224
225 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem ready request completed\n");
226 return 0;
227
228out:
229 return ret;
230}
231
232static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi)
233{
234 struct wlfw_bdf_download_resp_msg_v01 resp = {};
235 struct wlfw_bdf_download_req_msg_v01 *req;
236 struct ath10k *ar = qmi->ar;
237 unsigned int remaining;
238 struct qmi_txn txn;
239 const u8 *temp;
240 int ret;
241
242 req = kzalloc(sizeof(*req), GFP_KERNEL);
243 if (!req)
244 return -ENOMEM;
245
246 temp = ar->normal_mode_fw.board_data;
247 remaining = ar->normal_mode_fw.board_len;
248
249 while (remaining) {
250 req->valid = 1;
251 req->file_id_valid = 1;
252 req->file_id = 0;
253 req->total_size_valid = 1;
254 req->total_size = ar->normal_mode_fw.board_len;
255 req->seg_id_valid = 1;
256 req->data_valid = 1;
257 req->end_valid = 1;
258
259 if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
260 req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
261 } else {
262 req->data_len = remaining;
263 req->end = 1;
264 }
265
266 memcpy(req->data, temp, req->data_len);
267
268 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
269 wlfw_bdf_download_resp_msg_v01_ei,
270 &resp);
271 if (ret < 0)
272 goto out;
273
274 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
275 QMI_WLFW_BDF_DOWNLOAD_REQ_V01,
276 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
277 wlfw_bdf_download_req_msg_v01_ei, req);
278 if (ret < 0) {
279 qmi_txn_cancel(&txn);
280 goto out;
281 }
282
283 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
284
285 if (ret < 0)
286 goto out;
287
288 /* end = 1 triggers a CRC check on the BDF. If this fails, we
289 * get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still
290 * willing to use the BDF. For some platforms, all the valid
291 * released BDFs fail this CRC check, so attempt to detect this
292 * scenario and treat it as non-fatal.
293 */
294 if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
295 !(req->end == 1 &&
296 resp.resp.result == QMI_ERR_MALFORMED_MSG_V01)) {
297 ath10k_err(ar, "failed to download board data file: %d\n",
298 resp.resp.error);
299 ret = -EINVAL;
300 goto out;
301 }
302
303 remaining -= req->data_len;
304 temp += req->data_len;
305 req->seg_id++;
306 }
307
308 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi bdf download request completed\n");
309
310 kfree(req);
311 return 0;
312
313out:
314 kfree(req);
315 return ret;
316}
317
318static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi)
319{
320 struct wlfw_cal_report_resp_msg_v01 resp = {};
321 struct wlfw_cal_report_req_msg_v01 req = {};
322 struct ath10k *ar = qmi->ar;
323 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
324 struct qmi_txn txn;
325 int i, j = 0;
326 int ret;
327
328 if (ar_snoc->xo_cal_supported) {
329 req.xo_cal_data_valid = 1;
330 req.xo_cal_data = ar_snoc->xo_cal_data;
331 }
332
333 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei,
334 &resp);
335 if (ret < 0)
336 goto out;
337
338 for (i = 0; i < QMI_WLFW_MAX_NUM_CAL_V01; i++) {
339 if (qmi->cal_data[i].total_size &&
340 qmi->cal_data[i].data) {
341 req.meta_data[j] = qmi->cal_data[i].cal_id;
342 j++;
343 }
344 }
345 req.meta_data_len = j;
346
347 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
348 QMI_WLFW_CAL_REPORT_REQ_V01,
349 WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN,
350 wlfw_cal_report_req_msg_v01_ei, &req);
351 if (ret < 0) {
352 qmi_txn_cancel(&txn);
353 ath10k_err(ar, "failed to send calibration request: %d\n", ret);
354 goto out;
355 }
356
357 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
358 if (ret < 0)
359 goto out;
360
361 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
362 ath10k_err(ar, "calibration request rejected: %d\n", resp.resp.error);
363 ret = -EINVAL;
364 goto out;
365 }
366
367 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi cal report request completed\n");
368 return 0;
369
370out:
371 return ret;
372}
373
374static int
375ath10k_qmi_mode_send_sync_msg(struct ath10k *ar, enum wlfw_driver_mode_enum_v01 mode)
376{
377 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
378 struct ath10k_qmi *qmi = ar_snoc->qmi;
379 struct wlfw_wlan_mode_resp_msg_v01 resp = {};
380 struct wlfw_wlan_mode_req_msg_v01 req = {};
381 struct qmi_txn txn;
382 int ret;
383
384 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
385 wlfw_wlan_mode_resp_msg_v01_ei,
386 &resp);
387 if (ret < 0)
388 goto out;
389
390 req.mode = mode;
391 req.hw_debug_valid = 1;
392 req.hw_debug = 0;
393
394 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
395 QMI_WLFW_WLAN_MODE_REQ_V01,
396 WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN,
397 wlfw_wlan_mode_req_msg_v01_ei, &req);
398 if (ret < 0) {
399 qmi_txn_cancel(&txn);
400 ath10k_err(ar, "failed to send wlan mode %d request: %d\n", mode, ret);
401 goto out;
402 }
403
404 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
405 if (ret < 0)
406 goto out;
407
408 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
409 ath10k_err(ar, "more request rejected: %d\n", resp.resp.error);
410 ret = -EINVAL;
411 goto out;
412 }
413
414 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wlan mode req completed: %d\n", mode);
415 return 0;
416
417out:
418 return ret;
419}
420
421static int
422ath10k_qmi_cfg_send_sync_msg(struct ath10k *ar,
423 struct ath10k_qmi_wlan_enable_cfg *config,
424 const char *version)
425{
426 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
427 struct ath10k_qmi *qmi = ar_snoc->qmi;
428 struct wlfw_wlan_cfg_resp_msg_v01 resp = {};
429 struct wlfw_wlan_cfg_req_msg_v01 *req;
430 struct qmi_txn txn;
431 int ret;
432 u32 i;
433
434 req = kzalloc(sizeof(*req), GFP_KERNEL);
435 if (!req)
436 return -ENOMEM;
437
438 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
439 wlfw_wlan_cfg_resp_msg_v01_ei,
440 &resp);
441 if (ret < 0)
442 goto out;
443
444 req->host_version_valid = 0;
445
446 req->tgt_cfg_valid = 1;
447 if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
448 req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
449 else
450 req->tgt_cfg_len = config->num_ce_tgt_cfg;
451 for (i = 0; i < req->tgt_cfg_len; i++) {
452 req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
453 req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
454 req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
455 req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
456 req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
457 }
458
459 req->svc_cfg_valid = 1;
460 if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
461 req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
462 else
463 req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
464 for (i = 0; i < req->svc_cfg_len; i++) {
465 req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
466 req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
467 req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
468 }
469
470 req->shadow_reg_valid = 1;
471 if (config->num_shadow_reg_cfg >
472 QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
473 req->shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
474 else
475 req->shadow_reg_len = config->num_shadow_reg_cfg;
476
477 memcpy(req->shadow_reg, config->shadow_reg_cfg,
478 sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req->shadow_reg_len);
479
480 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
481 QMI_WLFW_WLAN_CFG_REQ_V01,
482 WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN,
483 wlfw_wlan_cfg_req_msg_v01_ei, req);
484 if (ret < 0) {
485 qmi_txn_cancel(&txn);
486 ath10k_err(ar, "failed to send config request: %d\n", ret);
487 goto out;
488 }
489
490 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
491 if (ret < 0)
492 goto out;
493
494 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
495 ath10k_err(ar, "config request rejected: %d\n", resp.resp.error);
496 ret = -EINVAL;
497 goto out;
498 }
499
500 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi config request completed\n");
501 kfree(req);
502 return 0;
503
504out:
505 kfree(req);
506 return ret;
507}
508
509int ath10k_qmi_wlan_enable(struct ath10k *ar,
510 struct ath10k_qmi_wlan_enable_cfg *config,
511 enum wlfw_driver_mode_enum_v01 mode,
512 const char *version)
513{
514 int ret;
515
516 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi mode %d config %p\n",
517 mode, config);
518
519 ret = ath10k_qmi_cfg_send_sync_msg(ar, config, version);
520 if (ret) {
521 ath10k_err(ar, "failed to send qmi config: %d\n", ret);
522 return ret;
523 }
524
525 ret = ath10k_qmi_mode_send_sync_msg(ar, mode);
526 if (ret) {
527 ath10k_err(ar, "failed to send qmi mode: %d\n", ret);
528 return ret;
529 }
530
531 return 0;
532}
533
534int ath10k_qmi_wlan_disable(struct ath10k *ar)
535{
536 return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01);
537}
538
539static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
540{
541 struct wlfw_cap_resp_msg_v01 *resp;
542 struct wlfw_cap_req_msg_v01 req = {};
543 struct ath10k *ar = qmi->ar;
544 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
545 struct qmi_txn txn;
546 int ret;
547
548 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
549 if (!resp)
550 return -ENOMEM;
551
552 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cap_resp_msg_v01_ei, resp);
553 if (ret < 0)
554 goto out;
555
556 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
557 QMI_WLFW_CAP_REQ_V01,
558 WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN,
559 wlfw_cap_req_msg_v01_ei, &req);
560 if (ret < 0) {
561 qmi_txn_cancel(&txn);
562 ath10k_err(ar, "failed to send capability request: %d\n", ret);
563 goto out;
564 }
565
566 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
567 if (ret < 0)
568 goto out;
569
570 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
571 ath10k_err(ar, "capability req rejected: %d\n", resp->resp.error);
572 ret = -EINVAL;
573 goto out;
574 }
575
576 if (resp->chip_info_valid) {
577 qmi->chip_info.chip_id = resp->chip_info.chip_id;
578 qmi->chip_info.chip_family = resp->chip_info.chip_family;
579 } else {
580 qmi->chip_info.chip_id = 0xFF;
581 }
582
583 if (resp->board_info_valid)
584 qmi->board_info.board_id = resp->board_info.board_id;
585 else
586 qmi->board_info.board_id = 0xFF;
587
588 if (resp->soc_info_valid)
589 qmi->soc_info.soc_id = resp->soc_info.soc_id;
590
591 if (resp->fw_version_info_valid) {
592 qmi->fw_version = resp->fw_version_info.fw_version;
593 strlcpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp,
594 sizeof(qmi->fw_build_timestamp));
595 }
596
597 if (resp->fw_build_id_valid)
598 strlcpy(qmi->fw_build_id, resp->fw_build_id,
599 MAX_BUILD_ID_LEN + 1);
600
601 if (!test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
602 ath10k_info(ar, "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
603 qmi->chip_info.chip_id, qmi->chip_info.chip_family,
604 qmi->board_info.board_id, qmi->soc_info.soc_id);
605 ath10k_info(ar, "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
606 qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id);
607 }
608
609 kfree(resp);
610 return 0;
611
612out:
613 kfree(resp);
614 return ret;
615}
616
617static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
618{
619 struct wlfw_host_cap_resp_msg_v01 resp = {};
620 struct wlfw_host_cap_req_msg_v01 req = {};
621 struct qmi_elem_info *req_ei;
622 struct ath10k *ar = qmi->ar;
623 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
624 struct qmi_txn txn;
625 int ret;
626
627 req.daemon_support_valid = 1;
628 req.daemon_support = 0;
629
630 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei,
631 &resp);
632 if (ret < 0)
633 goto out;
634
635 if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags))
636 req_ei = wlfw_host_cap_8bit_req_msg_v01_ei;
637 else
638 req_ei = wlfw_host_cap_req_msg_v01_ei;
639
640 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
641 QMI_WLFW_HOST_CAP_REQ_V01,
642 WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
643 req_ei, &req);
644 if (ret < 0) {
645 qmi_txn_cancel(&txn);
646 ath10k_err(ar, "failed to send host capability request: %d\n", ret);
647 goto out;
648 }
649
650 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
651 if (ret < 0)
652 goto out;
653
654 /* older FW didn't support this request, which is not fatal */
655 if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
656 resp.resp.error != QMI_ERR_NOT_SUPPORTED_V01) {
657 ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error);
658 ret = -EINVAL;
659 goto out;
660 }
661
662 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capability request completed\n");
663 return 0;
664
665out:
666 return ret;
667}
668
669int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode)
670{
671 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
672 struct wlfw_ini_resp_msg_v01 resp = {};
673 struct ath10k_qmi *qmi = ar_snoc->qmi;
674 struct wlfw_ini_req_msg_v01 req = {};
675 struct qmi_txn txn;
676 int ret;
677
678 req.enablefwlog_valid = 1;
679 req.enablefwlog = fw_log_mode;
680
681 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_ini_resp_msg_v01_ei,
682 &resp);
683 if (ret < 0)
684 goto out;
685
686 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
687 QMI_WLFW_INI_REQ_V01,
688 WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
689 wlfw_ini_req_msg_v01_ei, &req);
690 if (ret < 0) {
691 qmi_txn_cancel(&txn);
692 ath10k_err(ar, "failed to send fw log request: %d\n", ret);
693 goto out;
694 }
695
696 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
697 if (ret < 0)
698 goto out;
699
700 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
701 ath10k_err(ar, "fw log request rejected: %d\n",
702 resp.resp.error);
703 ret = -EINVAL;
704 goto out;
705 }
706 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi fw log request completed, mode: %d\n",
707 fw_log_mode);
708 return 0;
709
710out:
711 return ret;
712}
713
714static int
715ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
716{
717 struct wlfw_ind_register_resp_msg_v01 resp = {};
718 struct wlfw_ind_register_req_msg_v01 req = {};
719 struct ath10k *ar = qmi->ar;
720 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
721 struct qmi_txn txn;
722 int ret;
723
724 req.client_id_valid = 1;
725 req.client_id = ATH10K_QMI_CLIENT_ID;
726 req.fw_ready_enable_valid = 1;
727 req.fw_ready_enable = 1;
728 req.msa_ready_enable_valid = 1;
729 req.msa_ready_enable = 1;
730
731 if (ar_snoc->xo_cal_supported) {
732 req.xo_cal_enable_valid = 1;
733 req.xo_cal_enable = 1;
734 }
735
736 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
737 wlfw_ind_register_resp_msg_v01_ei, &resp);
738 if (ret < 0)
739 goto out;
740
741 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
742 QMI_WLFW_IND_REGISTER_REQ_V01,
743 WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
744 wlfw_ind_register_req_msg_v01_ei, &req);
745 if (ret < 0) {
746 qmi_txn_cancel(&txn);
747 ath10k_err(ar, "failed to send indication registered request: %d\n", ret);
748 goto out;
749 }
750
751 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
752 if (ret < 0)
753 goto out;
754
755 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
756 ath10k_err(ar, "indication request rejected: %d\n", resp.resp.error);
757 ret = -EINVAL;
758 goto out;
759 }
760
761 if (resp.fw_status_valid) {
762 if (resp.fw_status & QMI_WLFW_FW_READY_V01)
763 qmi->fw_ready = true;
764 }
765 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi indication register request completed\n");
766 return 0;
767
768out:
769 return ret;
770}
771
772static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
773{
774 struct ath10k *ar = qmi->ar;
775 int ret;
776
777 ret = ath10k_qmi_ind_register_send_sync_msg(qmi);
778 if (ret)
779 return;
780
781 if (qmi->fw_ready) {
782 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
783 return;
784 }
785
786 ret = ath10k_qmi_host_cap_send_sync(qmi);
787 if (ret)
788 return;
789
790 ret = ath10k_qmi_msa_mem_info_send_sync_msg(qmi);
791 if (ret)
792 return;
793
794 /*
795 * HACK: sleep for a while inbetween receiving the msa info response
796 * and the XPU update to prevent SDM845 from crashing due to a security
797 * violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
798 */
799 msleep(20);
800
801 ret = ath10k_qmi_setup_msa_permissions(qmi);
802 if (ret)
803 return;
804
805 ret = ath10k_qmi_msa_ready_send_sync_msg(qmi);
806 if (ret)
807 goto err_setup_msa;
808
809 ret = ath10k_qmi_cap_send_sync_msg(qmi);
810 if (ret)
811 goto err_setup_msa;
812
813 return;
814
815err_setup_msa:
816 ath10k_qmi_remove_msa_permission(qmi);
817}
818
819static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi)
820{
821 struct ath10k *ar = qmi->ar;
822 int ret;
823
824 ar->hif.bus = ATH10K_BUS_SNOC;
825 ar->id.qmi_ids_valid = true;
826 ar->id.qmi_board_id = qmi->board_info.board_id;
827 ar->id.qmi_chip_id = qmi->chip_info.chip_id;
828 ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR;
829
830 ret = ath10k_core_check_dt(ar);
831 if (ret)
832 ath10k_dbg(ar, ATH10K_DBG_QMI, "DT bdf variant name not set.\n");
833
834 return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD);
835}
836
837static int
838ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi,
839 enum ath10k_qmi_driver_event_type type,
840 void *data)
841{
842 struct ath10k_qmi_driver_event *event;
843
844 event = kzalloc(sizeof(*event), GFP_ATOMIC);
845 if (!event)
846 return -ENOMEM;
847
848 event->type = type;
849 event->data = data;
850
851 spin_lock(&qmi->event_lock);
852 list_add_tail(&event->list, &qmi->event_list);
853 spin_unlock(&qmi->event_lock);
854
855 queue_work(qmi->event_wq, &qmi->event_work);
856
857 return 0;
858}
859
860static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi)
861{
862 struct ath10k *ar = qmi->ar;
863 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
864
865 ath10k_qmi_remove_msa_permission(qmi);
866 ath10k_core_free_board_files(ar);
867 if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
868 ath10k_snoc_fw_crashed_dump(ar);
869
870 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND);
871 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n");
872}
873
874static void ath10k_qmi_event_msa_ready(struct ath10k_qmi *qmi)
875{
876 int ret;
877
878 ret = ath10k_qmi_fetch_board_file(qmi);
879 if (ret)
880 goto out;
881
882 ret = ath10k_qmi_bdf_dnld_send_sync(qmi);
883 if (ret)
884 goto out;
885
886 ret = ath10k_qmi_send_cal_report_req(qmi);
887
888out:
889 return;
890}
891
892static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi *qmi)
893{
894 struct ath10k *ar = qmi->ar;
895
896 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw ready event received\n");
897 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
898
899 return 0;
900}
901
902static void ath10k_qmi_fw_ready_ind(struct qmi_handle *qmi_hdl,
903 struct sockaddr_qrtr *sq,
904 struct qmi_txn *txn, const void *data)
905{
906 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
907
908 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_FW_READY_IND, NULL);
909}
910
911static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl,
912 struct sockaddr_qrtr *sq,
913 struct qmi_txn *txn, const void *data)
914{
915 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
916
917 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL);
918}
919
920static const struct qmi_msg_handler qmi_msg_handler[] = {
921 {
922 .type = QMI_INDICATION,
923 .msg_id = QMI_WLFW_FW_READY_IND_V01,
924 .ei = wlfw_fw_ready_ind_msg_v01_ei,
925 .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01),
926 .fn = ath10k_qmi_fw_ready_ind,
927 },
928 {
929 .type = QMI_INDICATION,
930 .msg_id = QMI_WLFW_MSA_READY_IND_V01,
931 .ei = wlfw_msa_ready_ind_msg_v01_ei,
932 .decoded_size = sizeof(struct wlfw_msa_ready_ind_msg_v01),
933 .fn = ath10k_qmi_msa_ready_ind,
934 },
935 {}
936};
937
938static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl,
939 struct qmi_service *service)
940{
941 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
942 struct sockaddr_qrtr *sq = &qmi->sq;
943 struct ath10k *ar = qmi->ar;
944 int ret;
945
946 sq->sq_family = AF_QIPCRTR;
947 sq->sq_node = service->node;
948 sq->sq_port = service->port;
949
950 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n");
951
952 ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq,
953 sizeof(qmi->sq), 0);
954 if (ret) {
955 ath10k_err(ar, "failed to connect to a remote QMI service port\n");
956 return ret;
957 }
958
959 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wifi fw qmi service connected\n");
960 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_ARRIVE, NULL);
961
962 return ret;
963}
964
965static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl,
966 struct qmi_service *service)
967{
968 struct ath10k_qmi *qmi =
969 container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
970
971 qmi->fw_ready = false;
972
973 /*
974 * The del_server event is to be processed only if coming from
975 * the qmi server. The qmi infrastructure sends del_server, when
976 * any client releases the qmi handle. In this case do not process
977 * this del_server event.
978 */
979 if (qmi->state == ATH10K_QMI_STATE_INIT_DONE)
980 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT,
981 NULL);
982}
983
984static const struct qmi_ops ath10k_qmi_ops = {
985 .new_server = ath10k_qmi_new_server,
986 .del_server = ath10k_qmi_del_server,
987};
988
989static void ath10k_qmi_driver_event_work(struct work_struct *work)
990{
991 struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi,
992 event_work);
993 struct ath10k_qmi_driver_event *event;
994 struct ath10k *ar = qmi->ar;
995
996 spin_lock(&qmi->event_lock);
997 while (!list_empty(&qmi->event_list)) {
998 event = list_first_entry(&qmi->event_list,
999 struct ath10k_qmi_driver_event, list);
1000 list_del(&event->list);
1001 spin_unlock(&qmi->event_lock);
1002
1003 switch (event->type) {
1004 case ATH10K_QMI_EVENT_SERVER_ARRIVE:
1005 ath10k_qmi_event_server_arrive(qmi);
1006 break;
1007 case ATH10K_QMI_EVENT_SERVER_EXIT:
1008 ath10k_qmi_event_server_exit(qmi);
1009 break;
1010 case ATH10K_QMI_EVENT_FW_READY_IND:
1011 ath10k_qmi_event_fw_ready_ind(qmi);
1012 break;
1013 case ATH10K_QMI_EVENT_MSA_READY_IND:
1014 ath10k_qmi_event_msa_ready(qmi);
1015 break;
1016 default:
1017 ath10k_warn(ar, "invalid event type: %d", event->type);
1018 break;
1019 }
1020 kfree(event);
1021 spin_lock(&qmi->event_lock);
1022 }
1023 spin_unlock(&qmi->event_lock);
1024}
1025
1026int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
1027{
1028 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1029 struct device *dev = ar->dev;
1030 struct ath10k_qmi *qmi;
1031 int ret;
1032
1033 qmi = kzalloc(sizeof(*qmi), GFP_KERNEL);
1034 if (!qmi)
1035 return -ENOMEM;
1036
1037 qmi->ar = ar;
1038 ar_snoc->qmi = qmi;
1039
1040 if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
1041 qmi->msa_fixed_perm = true;
1042
1043 ret = qmi_handle_init(&qmi->qmi_hdl,
1044 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
1045 &ath10k_qmi_ops, qmi_msg_handler);
1046 if (ret)
1047 goto err;
1048
1049 qmi->event_wq = alloc_workqueue("ath10k_qmi_driver_event",
1050 WQ_UNBOUND, 1);
1051 if (!qmi->event_wq) {
1052 ath10k_err(ar, "failed to allocate workqueue\n");
1053 ret = -EFAULT;
1054 goto err_release_qmi_handle;
1055 }
1056
1057 INIT_LIST_HEAD(&qmi->event_list);
1058 spin_lock_init(&qmi->event_lock);
1059 INIT_WORK(&qmi->event_work, ath10k_qmi_driver_event_work);
1060
1061 ret = qmi_add_lookup(&qmi->qmi_hdl, WLFW_SERVICE_ID_V01,
1062 WLFW_SERVICE_VERS_V01, 0);
1063 if (ret)
1064 goto err_qmi_lookup;
1065
1066 qmi->state = ATH10K_QMI_STATE_INIT_DONE;
1067 return 0;
1068
1069err_qmi_lookup:
1070 destroy_workqueue(qmi->event_wq);
1071
1072err_release_qmi_handle:
1073 qmi_handle_release(&qmi->qmi_hdl);
1074
1075err:
1076 kfree(qmi);
1077 return ret;
1078}
1079
1080int ath10k_qmi_deinit(struct ath10k *ar)
1081{
1082 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1083 struct ath10k_qmi *qmi = ar_snoc->qmi;
1084
1085 qmi->state = ATH10K_QMI_STATE_DEINIT;
1086 qmi_handle_release(&qmi->qmi_hdl);
1087 cancel_work_sync(&qmi->event_work);
1088 destroy_workqueue(qmi->event_wq);
1089 kfree(qmi);
1090 ar_snoc->qmi = NULL;
1091
1092 return 0;
1093}