Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/clk.h>
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/of.h>
10#include <linux/of_device.h>
11#include <linux/platform_device.h>
12#include <linux/property.h>
13#include <linux/regulator/consumer.h>
14#include <linux/of_address.h>
15#include <linux/iommu.h>
16
17#include "ce.h"
18#include "coredump.h"
19#include "debug.h"
20#include "hif.h"
21#include "htc.h"
22#include "snoc.h"
23
24#define ATH10K_SNOC_RX_POST_RETRY_MS 50
25#define CE_POLL_PIPE 4
26#define ATH10K_SNOC_WAKE_IRQ 2
27
28static char *const ce_name[] = {
29 "WLAN_CE_0",
30 "WLAN_CE_1",
31 "WLAN_CE_2",
32 "WLAN_CE_3",
33 "WLAN_CE_4",
34 "WLAN_CE_5",
35 "WLAN_CE_6",
36 "WLAN_CE_7",
37 "WLAN_CE_8",
38 "WLAN_CE_9",
39 "WLAN_CE_10",
40 "WLAN_CE_11",
41};
42
43static const char * const ath10k_regulators[] = {
44 "vdd-0.8-cx-mx",
45 "vdd-1.8-xo",
46 "vdd-1.3-rfa",
47 "vdd-3.3-ch0",
48};
49
50static const char * const ath10k_clocks[] = {
51 "cxo_ref_clk_pin", "qdss",
52};
53
54static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
55static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
56static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
57static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
58static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
59static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
60
61static const struct ath10k_snoc_drv_priv drv_priv = {
62 .hw_rev = ATH10K_HW_WCN3990,
63 .dma_mask = DMA_BIT_MASK(35),
64 .msa_size = 0x100000,
65};
66
67#define WCN3990_SRC_WR_IDX_OFFSET 0x3C
68#define WCN3990_DST_WR_IDX_OFFSET 0x40
69
70static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
71 {
72 .ce_id = __cpu_to_le16(0),
73 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
74 },
75
76 {
77 .ce_id = __cpu_to_le16(3),
78 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
79 },
80
81 {
82 .ce_id = __cpu_to_le16(4),
83 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
84 },
85
86 {
87 .ce_id = __cpu_to_le16(5),
88 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
89 },
90
91 {
92 .ce_id = __cpu_to_le16(7),
93 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
94 },
95
96 {
97 .ce_id = __cpu_to_le16(1),
98 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
99 },
100
101 {
102 .ce_id = __cpu_to_le16(2),
103 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
104 },
105
106 {
107 .ce_id = __cpu_to_le16(7),
108 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
109 },
110
111 {
112 .ce_id = __cpu_to_le16(8),
113 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
114 },
115
116 {
117 .ce_id = __cpu_to_le16(9),
118 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
119 },
120
121 {
122 .ce_id = __cpu_to_le16(10),
123 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
124 },
125
126 {
127 .ce_id = __cpu_to_le16(11),
128 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
129 },
130};
131
132static struct ce_attr host_ce_config_wlan[] = {
133 /* CE0: host->target HTC control streams */
134 {
135 .flags = CE_ATTR_FLAGS,
136 .src_nentries = 16,
137 .src_sz_max = 2048,
138 .dest_nentries = 0,
139 .send_cb = ath10k_snoc_htc_tx_cb,
140 },
141
142 /* CE1: target->host HTT + HTC control */
143 {
144 .flags = CE_ATTR_FLAGS,
145 .src_nentries = 0,
146 .src_sz_max = 2048,
147 .dest_nentries = 512,
148 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
149 },
150
151 /* CE2: target->host WMI */
152 {
153 .flags = CE_ATTR_FLAGS,
154 .src_nentries = 0,
155 .src_sz_max = 2048,
156 .dest_nentries = 64,
157 .recv_cb = ath10k_snoc_htc_rx_cb,
158 },
159
160 /* CE3: host->target WMI */
161 {
162 .flags = CE_ATTR_FLAGS,
163 .src_nentries = 32,
164 .src_sz_max = 2048,
165 .dest_nentries = 0,
166 .send_cb = ath10k_snoc_htc_tx_cb,
167 },
168
169 /* CE4: host->target HTT */
170 {
171 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
172 .src_nentries = 2048,
173 .src_sz_max = 256,
174 .dest_nentries = 0,
175 .send_cb = ath10k_snoc_htt_tx_cb,
176 },
177
178 /* CE5: target->host HTT (ipa_uc->target ) */
179 {
180 .flags = CE_ATTR_FLAGS,
181 .src_nentries = 0,
182 .src_sz_max = 512,
183 .dest_nentries = 512,
184 .recv_cb = ath10k_snoc_htt_rx_cb,
185 },
186
187 /* CE6: target autonomous hif_memcpy */
188 {
189 .flags = CE_ATTR_FLAGS,
190 .src_nentries = 0,
191 .src_sz_max = 0,
192 .dest_nentries = 0,
193 },
194
195 /* CE7: ce_diag, the Diagnostic Window */
196 {
197 .flags = CE_ATTR_FLAGS,
198 .src_nentries = 2,
199 .src_sz_max = 2048,
200 .dest_nentries = 2,
201 },
202
203 /* CE8: Target to uMC */
204 {
205 .flags = CE_ATTR_FLAGS,
206 .src_nentries = 0,
207 .src_sz_max = 2048,
208 .dest_nentries = 128,
209 },
210
211 /* CE9 target->host HTT */
212 {
213 .flags = CE_ATTR_FLAGS,
214 .src_nentries = 0,
215 .src_sz_max = 2048,
216 .dest_nentries = 512,
217 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
218 },
219
220 /* CE10: target->host HTT */
221 {
222 .flags = CE_ATTR_FLAGS,
223 .src_nentries = 0,
224 .src_sz_max = 2048,
225 .dest_nentries = 512,
226 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
227 },
228
229 /* CE11: target -> host PKTLOG */
230 {
231 .flags = CE_ATTR_FLAGS,
232 .src_nentries = 0,
233 .src_sz_max = 2048,
234 .dest_nentries = 512,
235 .recv_cb = ath10k_snoc_pktlog_rx_cb,
236 },
237};
238
239static struct ce_pipe_config target_ce_config_wlan[] = {
240 /* CE0: host->target HTC control and raw streams */
241 {
242 .pipenum = __cpu_to_le32(0),
243 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
244 .nentries = __cpu_to_le32(32),
245 .nbytes_max = __cpu_to_le32(2048),
246 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
247 .reserved = __cpu_to_le32(0),
248 },
249
250 /* CE1: target->host HTT + HTC control */
251 {
252 .pipenum = __cpu_to_le32(1),
253 .pipedir = __cpu_to_le32(PIPEDIR_IN),
254 .nentries = __cpu_to_le32(32),
255 .nbytes_max = __cpu_to_le32(2048),
256 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
257 .reserved = __cpu_to_le32(0),
258 },
259
260 /* CE2: target->host WMI */
261 {
262 .pipenum = __cpu_to_le32(2),
263 .pipedir = __cpu_to_le32(PIPEDIR_IN),
264 .nentries = __cpu_to_le32(64),
265 .nbytes_max = __cpu_to_le32(2048),
266 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
267 .reserved = __cpu_to_le32(0),
268 },
269
270 /* CE3: host->target WMI */
271 {
272 .pipenum = __cpu_to_le32(3),
273 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
274 .nentries = __cpu_to_le32(32),
275 .nbytes_max = __cpu_to_le32(2048),
276 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
277 .reserved = __cpu_to_le32(0),
278 },
279
280 /* CE4: host->target HTT */
281 {
282 .pipenum = __cpu_to_le32(4),
283 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
284 .nentries = __cpu_to_le32(256),
285 .nbytes_max = __cpu_to_le32(256),
286 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
287 .reserved = __cpu_to_le32(0),
288 },
289
290 /* CE5: target->host HTT (HIF->HTT) */
291 {
292 .pipenum = __cpu_to_le32(5),
293 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
294 .nentries = __cpu_to_le32(1024),
295 .nbytes_max = __cpu_to_le32(64),
296 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
297 .reserved = __cpu_to_le32(0),
298 },
299
300 /* CE6: Reserved for target autonomous hif_memcpy */
301 {
302 .pipenum = __cpu_to_le32(6),
303 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
304 .nentries = __cpu_to_le32(32),
305 .nbytes_max = __cpu_to_le32(16384),
306 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
307 .reserved = __cpu_to_le32(0),
308 },
309
310 /* CE7 used only by Host */
311 {
312 .pipenum = __cpu_to_le32(7),
313 .pipedir = __cpu_to_le32(4),
314 .nentries = __cpu_to_le32(0),
315 .nbytes_max = __cpu_to_le32(0),
316 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
317 .reserved = __cpu_to_le32(0),
318 },
319
320 /* CE8 Target to uMC */
321 {
322 .pipenum = __cpu_to_le32(8),
323 .pipedir = __cpu_to_le32(PIPEDIR_IN),
324 .nentries = __cpu_to_le32(32),
325 .nbytes_max = __cpu_to_le32(2048),
326 .flags = __cpu_to_le32(0),
327 .reserved = __cpu_to_le32(0),
328 },
329
330 /* CE9 target->host HTT */
331 {
332 .pipenum = __cpu_to_le32(9),
333 .pipedir = __cpu_to_le32(PIPEDIR_IN),
334 .nentries = __cpu_to_le32(32),
335 .nbytes_max = __cpu_to_le32(2048),
336 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
337 .reserved = __cpu_to_le32(0),
338 },
339
340 /* CE10 target->host HTT */
341 {
342 .pipenum = __cpu_to_le32(10),
343 .pipedir = __cpu_to_le32(PIPEDIR_IN),
344 .nentries = __cpu_to_le32(32),
345 .nbytes_max = __cpu_to_le32(2048),
346 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
347 .reserved = __cpu_to_le32(0),
348 },
349
350 /* CE11 target autonomous qcache memcpy */
351 {
352 .pipenum = __cpu_to_le32(11),
353 .pipedir = __cpu_to_le32(PIPEDIR_IN),
354 .nentries = __cpu_to_le32(32),
355 .nbytes_max = __cpu_to_le32(2048),
356 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
357 .reserved = __cpu_to_le32(0),
358 },
359};
360
361static struct ce_service_to_pipe target_service_to_ce_map_wlan[] = {
362 {
363 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
364 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
365 __cpu_to_le32(3),
366 },
367 {
368 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
369 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
370 __cpu_to_le32(2),
371 },
372 {
373 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
374 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
375 __cpu_to_le32(3),
376 },
377 {
378 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
379 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
380 __cpu_to_le32(2),
381 },
382 {
383 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
384 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
385 __cpu_to_le32(3),
386 },
387 {
388 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
389 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
390 __cpu_to_le32(2),
391 },
392 {
393 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
394 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
395 __cpu_to_le32(3),
396 },
397 {
398 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
399 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
400 __cpu_to_le32(2),
401 },
402 {
403 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
404 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
405 __cpu_to_le32(3),
406 },
407 {
408 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
409 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
410 __cpu_to_le32(2),
411 },
412 {
413 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
414 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
415 __cpu_to_le32(0),
416 },
417 {
418 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
419 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
420 __cpu_to_le32(2),
421 },
422 { /* not used */
423 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
424 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
425 __cpu_to_le32(0),
426 },
427 { /* not used */
428 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
429 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
430 __cpu_to_le32(2),
431 },
432 {
433 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
434 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
435 __cpu_to_le32(4),
436 },
437 {
438 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
439 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
440 __cpu_to_le32(1),
441 },
442 { /* not used */
443 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
444 __cpu_to_le32(PIPEDIR_OUT),
445 __cpu_to_le32(5),
446 },
447 { /* in = DL = target -> host */
448 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
449 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
450 __cpu_to_le32(9),
451 },
452 { /* in = DL = target -> host */
453 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
454 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
455 __cpu_to_le32(10),
456 },
457 { /* in = DL = target -> host pktlog */
458 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
459 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
460 __cpu_to_le32(11),
461 },
462 /* (Additions here) */
463
464 { /* must be last */
465 __cpu_to_le32(0),
466 __cpu_to_le32(0),
467 __cpu_to_le32(0),
468 },
469};
470
471static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
472{
473 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
474
475 iowrite32(value, ar_snoc->mem + offset);
476}
477
478static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
479{
480 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
481 u32 val;
482
483 val = ioread32(ar_snoc->mem + offset);
484
485 return val;
486}
487
488static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
489{
490 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
491 struct ath10k *ar = pipe->hif_ce_state;
492 struct ath10k_ce *ce = ath10k_ce_priv(ar);
493 struct sk_buff *skb;
494 dma_addr_t paddr;
495 int ret;
496
497 skb = dev_alloc_skb(pipe->buf_sz);
498 if (!skb)
499 return -ENOMEM;
500
501 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
502
503 paddr = dma_map_single(ar->dev, skb->data,
504 skb->len + skb_tailroom(skb),
505 DMA_FROM_DEVICE);
506 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
507 ath10k_warn(ar, "failed to dma map snoc rx buf\n");
508 dev_kfree_skb_any(skb);
509 return -EIO;
510 }
511
512 ATH10K_SKB_RXCB(skb)->paddr = paddr;
513
514 spin_lock_bh(&ce->ce_lock);
515 ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
516 spin_unlock_bh(&ce->ce_lock);
517 if (ret) {
518 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
519 DMA_FROM_DEVICE);
520 dev_kfree_skb_any(skb);
521 return ret;
522 }
523
524 return 0;
525}
526
527static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
528{
529 struct ath10k *ar = pipe->hif_ce_state;
530 struct ath10k_ce *ce = ath10k_ce_priv(ar);
531 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
532 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
533 int ret, num;
534
535 if (pipe->buf_sz == 0)
536 return;
537
538 if (!ce_pipe->dest_ring)
539 return;
540
541 spin_lock_bh(&ce->ce_lock);
542 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
543 spin_unlock_bh(&ce->ce_lock);
544 while (num--) {
545 ret = __ath10k_snoc_rx_post_buf(pipe);
546 if (ret) {
547 if (ret == -ENOSPC)
548 break;
549 ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
550 mod_timer(&ar_snoc->rx_post_retry, jiffies +
551 ATH10K_SNOC_RX_POST_RETRY_MS);
552 break;
553 }
554 }
555}
556
557static void ath10k_snoc_rx_post(struct ath10k *ar)
558{
559 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
560 int i;
561
562 for (i = 0; i < CE_COUNT; i++)
563 ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
564}
565
566static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
567 void (*callback)(struct ath10k *ar,
568 struct sk_buff *skb))
569{
570 struct ath10k *ar = ce_state->ar;
571 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
572 struct ath10k_snoc_pipe *pipe_info = &ar_snoc->pipe_info[ce_state->id];
573 struct sk_buff *skb;
574 struct sk_buff_head list;
575 void *transfer_context;
576 unsigned int nbytes, max_nbytes;
577
578 __skb_queue_head_init(&list);
579 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
580 &nbytes) == 0) {
581 skb = transfer_context;
582 max_nbytes = skb->len + skb_tailroom(skb);
583 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
584 max_nbytes, DMA_FROM_DEVICE);
585
586 if (unlikely(max_nbytes < nbytes)) {
587 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)\n",
588 nbytes, max_nbytes);
589 dev_kfree_skb_any(skb);
590 continue;
591 }
592
593 skb_put(skb, nbytes);
594 __skb_queue_tail(&list, skb);
595 }
596
597 while ((skb = __skb_dequeue(&list))) {
598 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
599 ce_state->id, skb->len);
600
601 callback(ar, skb);
602 }
603
604 ath10k_snoc_rx_post_pipe(pipe_info);
605}
606
607static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
608{
609 ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
610}
611
612static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
613{
614 /* CE4 polling needs to be done whenever CE pipe which transports
615 * HTT Rx (target->host) is processed.
616 */
617 ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
618
619 ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
620}
621
622/* Called by lower (CE) layer when data is received from the Target.
623 * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
624 */
625static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
626{
627 ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
628}
629
630static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
631{
632 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
633 ath10k_htt_t2h_msg_handler(ar, skb);
634}
635
636static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
637{
638 ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
639 ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
640}
641
642static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
643{
644 struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
645 struct ath10k *ar = ar_snoc->ar;
646
647 ath10k_snoc_rx_post(ar);
648}
649
650static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
651{
652 struct ath10k *ar = ce_state->ar;
653 struct sk_buff_head list;
654 struct sk_buff *skb;
655
656 __skb_queue_head_init(&list);
657 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
658 if (!skb)
659 continue;
660
661 __skb_queue_tail(&list, skb);
662 }
663
664 while ((skb = __skb_dequeue(&list)))
665 ath10k_htc_tx_completion_handler(ar, skb);
666}
667
668static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
669{
670 struct ath10k *ar = ce_state->ar;
671 struct sk_buff *skb;
672
673 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
674 if (!skb)
675 continue;
676
677 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
678 skb->len, DMA_TO_DEVICE);
679 ath10k_htt_hif_tx_complete(ar, skb);
680 }
681}
682
683static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
684 struct ath10k_hif_sg_item *items, int n_items)
685{
686 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
687 struct ath10k_ce *ce = ath10k_ce_priv(ar);
688 struct ath10k_snoc_pipe *snoc_pipe;
689 struct ath10k_ce_pipe *ce_pipe;
690 int err, i = 0;
691
692 snoc_pipe = &ar_snoc->pipe_info[pipe_id];
693 ce_pipe = snoc_pipe->ce_hdl;
694 spin_lock_bh(&ce->ce_lock);
695
696 for (i = 0; i < n_items - 1; i++) {
697 ath10k_dbg(ar, ATH10K_DBG_SNOC,
698 "snoc tx item %d paddr %pad len %d n_items %d\n",
699 i, &items[i].paddr, items[i].len, n_items);
700
701 err = ath10k_ce_send_nolock(ce_pipe,
702 items[i].transfer_context,
703 items[i].paddr,
704 items[i].len,
705 items[i].transfer_id,
706 CE_SEND_FLAG_GATHER);
707 if (err)
708 goto err;
709 }
710
711 ath10k_dbg(ar, ATH10K_DBG_SNOC,
712 "snoc tx item %d paddr %pad len %d n_items %d\n",
713 i, &items[i].paddr, items[i].len, n_items);
714
715 err = ath10k_ce_send_nolock(ce_pipe,
716 items[i].transfer_context,
717 items[i].paddr,
718 items[i].len,
719 items[i].transfer_id,
720 0);
721 if (err)
722 goto err;
723
724 spin_unlock_bh(&ce->ce_lock);
725
726 return 0;
727
728err:
729 for (; i > 0; i--)
730 __ath10k_ce_send_revert(ce_pipe);
731
732 spin_unlock_bh(&ce->ce_lock);
733 return err;
734}
735
736static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
737 struct bmi_target_info *target_info)
738{
739 target_info->version = ATH10K_HW_WCN3990;
740 target_info->type = ATH10K_HW_WCN3990;
741
742 return 0;
743}
744
745static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
746{
747 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
748
749 ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
750
751 return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
752}
753
754static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
755 int force)
756{
757 int resources;
758
759 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
760
761 if (!force) {
762 resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
763
764 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
765 return;
766 }
767 ath10k_ce_per_engine_service(ar, pipe);
768}
769
770static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
771 u16 service_id,
772 u8 *ul_pipe, u8 *dl_pipe)
773{
774 const struct ce_service_to_pipe *entry;
775 bool ul_set = false, dl_set = false;
776 int i;
777
778 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
779
780 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
781 entry = &target_service_to_ce_map_wlan[i];
782
783 if (__le32_to_cpu(entry->service_id) != service_id)
784 continue;
785
786 switch (__le32_to_cpu(entry->pipedir)) {
787 case PIPEDIR_NONE:
788 break;
789 case PIPEDIR_IN:
790 WARN_ON(dl_set);
791 *dl_pipe = __le32_to_cpu(entry->pipenum);
792 dl_set = true;
793 break;
794 case PIPEDIR_OUT:
795 WARN_ON(ul_set);
796 *ul_pipe = __le32_to_cpu(entry->pipenum);
797 ul_set = true;
798 break;
799 case PIPEDIR_INOUT:
800 WARN_ON(dl_set);
801 WARN_ON(ul_set);
802 *dl_pipe = __le32_to_cpu(entry->pipenum);
803 *ul_pipe = __le32_to_cpu(entry->pipenum);
804 dl_set = true;
805 ul_set = true;
806 break;
807 }
808 }
809
810 if (!ul_set || !dl_set)
811 return -ENOENT;
812
813 return 0;
814}
815
816static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
817 u8 *ul_pipe, u8 *dl_pipe)
818{
819 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
820
821 (void)ath10k_snoc_hif_map_service_to_pipe(ar,
822 ATH10K_HTC_SVC_ID_RSVD_CTRL,
823 ul_pipe, dl_pipe);
824}
825
826static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
827{
828 ath10k_ce_disable_interrupts(ar);
829}
830
831static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
832{
833 ath10k_ce_enable_interrupts(ar);
834}
835
836static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
837{
838 struct ath10k_ce_pipe *ce_pipe;
839 struct ath10k_ce_ring *ce_ring;
840 struct sk_buff *skb;
841 struct ath10k *ar;
842 int i;
843
844 ar = snoc_pipe->hif_ce_state;
845 ce_pipe = snoc_pipe->ce_hdl;
846 ce_ring = ce_pipe->dest_ring;
847
848 if (!ce_ring)
849 return;
850
851 if (!snoc_pipe->buf_sz)
852 return;
853
854 for (i = 0; i < ce_ring->nentries; i++) {
855 skb = ce_ring->per_transfer_context[i];
856 if (!skb)
857 continue;
858
859 ce_ring->per_transfer_context[i] = NULL;
860
861 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
862 skb->len + skb_tailroom(skb),
863 DMA_FROM_DEVICE);
864 dev_kfree_skb_any(skb);
865 }
866}
867
868static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
869{
870 struct ath10k_ce_pipe *ce_pipe;
871 struct ath10k_ce_ring *ce_ring;
872 struct sk_buff *skb;
873 struct ath10k *ar;
874 int i;
875
876 ar = snoc_pipe->hif_ce_state;
877 ce_pipe = snoc_pipe->ce_hdl;
878 ce_ring = ce_pipe->src_ring;
879
880 if (!ce_ring)
881 return;
882
883 if (!snoc_pipe->buf_sz)
884 return;
885
886 for (i = 0; i < ce_ring->nentries; i++) {
887 skb = ce_ring->per_transfer_context[i];
888 if (!skb)
889 continue;
890
891 ce_ring->per_transfer_context[i] = NULL;
892
893 ath10k_htc_tx_completion_handler(ar, skb);
894 }
895}
896
897static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
898{
899 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
900 struct ath10k_snoc_pipe *pipe_info;
901 int pipe_num;
902
903 del_timer_sync(&ar_snoc->rx_post_retry);
904 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
905 pipe_info = &ar_snoc->pipe_info[pipe_num];
906 ath10k_snoc_rx_pipe_cleanup(pipe_info);
907 ath10k_snoc_tx_pipe_cleanup(pipe_info);
908 }
909}
910
911static void ath10k_snoc_hif_stop(struct ath10k *ar)
912{
913 if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
914 ath10k_snoc_irq_disable(ar);
915
916 napi_synchronize(&ar->napi);
917 napi_disable(&ar->napi);
918 ath10k_snoc_buffer_cleanup(ar);
919 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
920}
921
922static int ath10k_snoc_hif_start(struct ath10k *ar)
923{
924 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
925
926 napi_enable(&ar->napi);
927 ath10k_snoc_irq_enable(ar);
928 ath10k_snoc_rx_post(ar);
929
930 clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
931
932 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
933
934 return 0;
935}
936
937static int ath10k_snoc_init_pipes(struct ath10k *ar)
938{
939 int i, ret;
940
941 for (i = 0; i < CE_COUNT; i++) {
942 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
943 if (ret) {
944 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
945 i, ret);
946 return ret;
947 }
948 }
949
950 return 0;
951}
952
953static int ath10k_snoc_wlan_enable(struct ath10k *ar,
954 enum ath10k_firmware_mode fw_mode)
955{
956 struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
957 struct ath10k_qmi_wlan_enable_cfg cfg;
958 enum wlfw_driver_mode_enum_v01 mode;
959 int pipe_num;
960
961 for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
962 tgt_cfg[pipe_num].pipe_num =
963 target_ce_config_wlan[pipe_num].pipenum;
964 tgt_cfg[pipe_num].pipe_dir =
965 target_ce_config_wlan[pipe_num].pipedir;
966 tgt_cfg[pipe_num].nentries =
967 target_ce_config_wlan[pipe_num].nentries;
968 tgt_cfg[pipe_num].nbytes_max =
969 target_ce_config_wlan[pipe_num].nbytes_max;
970 tgt_cfg[pipe_num].flags =
971 target_ce_config_wlan[pipe_num].flags;
972 tgt_cfg[pipe_num].reserved = 0;
973 }
974
975 cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
976 sizeof(struct ath10k_tgt_pipe_cfg);
977 cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
978 &tgt_cfg;
979 cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
980 sizeof(struct ath10k_svc_pipe_cfg);
981 cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
982 &target_service_to_ce_map_wlan;
983 cfg.num_shadow_reg_cfg = ARRAY_SIZE(target_shadow_reg_cfg_map);
984 cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
985 &target_shadow_reg_cfg_map;
986
987 switch (fw_mode) {
988 case ATH10K_FIRMWARE_MODE_NORMAL:
989 mode = QMI_WLFW_MISSION_V01;
990 break;
991 case ATH10K_FIRMWARE_MODE_UTF:
992 mode = QMI_WLFW_FTM_V01;
993 break;
994 default:
995 ath10k_err(ar, "invalid firmware mode %d\n", fw_mode);
996 return -EINVAL;
997 }
998
999 return ath10k_qmi_wlan_enable(ar, &cfg, mode,
1000 NULL);
1001}
1002
1003static void ath10k_snoc_wlan_disable(struct ath10k *ar)
1004{
1005 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1006
1007 /* If both ATH10K_FLAG_CRASH_FLUSH and ATH10K_SNOC_FLAG_RECOVERY
1008 * flags are not set, it means that the driver has restarted
1009 * due to a crash inject via debugfs. In this case, the driver
1010 * needs to restart the firmware and hence send qmi wlan disable,
1011 * during the driver restart sequence.
1012 */
1013 if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags) ||
1014 !test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1015 ath10k_qmi_wlan_disable(ar);
1016}
1017
1018static void ath10k_snoc_hif_power_down(struct ath10k *ar)
1019{
1020 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
1021
1022 ath10k_snoc_wlan_disable(ar);
1023 ath10k_ce_free_rri(ar);
1024}
1025
1026static int ath10k_snoc_hif_power_up(struct ath10k *ar,
1027 enum ath10k_firmware_mode fw_mode)
1028{
1029 int ret;
1030
1031 ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
1032 __func__, ar->state);
1033
1034 ret = ath10k_snoc_wlan_enable(ar, fw_mode);
1035 if (ret) {
1036 ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
1037 return ret;
1038 }
1039
1040 ath10k_ce_alloc_rri(ar);
1041
1042 ret = ath10k_snoc_init_pipes(ar);
1043 if (ret) {
1044 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
1045 goto err_wlan_enable;
1046 }
1047
1048 return 0;
1049
1050err_wlan_enable:
1051 ath10k_snoc_wlan_disable(ar);
1052
1053 return ret;
1054}
1055
1056static int ath10k_snoc_hif_set_target_log_mode(struct ath10k *ar,
1057 u8 fw_log_mode)
1058{
1059 u8 fw_dbg_mode;
1060
1061 if (fw_log_mode)
1062 fw_dbg_mode = ATH10K_ENABLE_FW_LOG_CE;
1063 else
1064 fw_dbg_mode = ATH10K_ENABLE_FW_LOG_DIAG;
1065
1066 return ath10k_qmi_set_fw_log_mode(ar, fw_dbg_mode);
1067}
1068
1069#ifdef CONFIG_PM
1070static int ath10k_snoc_hif_suspend(struct ath10k *ar)
1071{
1072 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1073 int ret;
1074
1075 if (!device_may_wakeup(ar->dev))
1076 return -EPERM;
1077
1078 ret = enable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1079 if (ret) {
1080 ath10k_err(ar, "failed to enable wakeup irq :%d\n", ret);
1081 return ret;
1082 }
1083
1084 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device suspended\n");
1085
1086 return ret;
1087}
1088
1089static int ath10k_snoc_hif_resume(struct ath10k *ar)
1090{
1091 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1092 int ret;
1093
1094 if (!device_may_wakeup(ar->dev))
1095 return -EPERM;
1096
1097 ret = disable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1098 if (ret) {
1099 ath10k_err(ar, "failed to disable wakeup irq: %d\n", ret);
1100 return ret;
1101 }
1102
1103 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device resumed\n");
1104
1105 return ret;
1106}
1107#endif
1108
1109static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
1110 .read32 = ath10k_snoc_read32,
1111 .write32 = ath10k_snoc_write32,
1112 .start = ath10k_snoc_hif_start,
1113 .stop = ath10k_snoc_hif_stop,
1114 .map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe,
1115 .get_default_pipe = ath10k_snoc_hif_get_default_pipe,
1116 .power_up = ath10k_snoc_hif_power_up,
1117 .power_down = ath10k_snoc_hif_power_down,
1118 .tx_sg = ath10k_snoc_hif_tx_sg,
1119 .send_complete_check = ath10k_snoc_hif_send_complete_check,
1120 .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number,
1121 .get_target_info = ath10k_snoc_hif_get_target_info,
1122 .set_target_log_mode = ath10k_snoc_hif_set_target_log_mode,
1123
1124#ifdef CONFIG_PM
1125 .suspend = ath10k_snoc_hif_suspend,
1126 .resume = ath10k_snoc_hif_resume,
1127#endif
1128};
1129
1130static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
1131 .read32 = ath10k_snoc_read32,
1132 .write32 = ath10k_snoc_write32,
1133};
1134
1135static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
1136{
1137 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1138 int i;
1139
1140 for (i = 0; i < CE_COUNT_MAX; i++) {
1141 if (ar_snoc->ce_irqs[i].irq_line == irq)
1142 return i;
1143 }
1144 ath10k_err(ar, "No matching CE id for irq %d\n", irq);
1145
1146 return -EINVAL;
1147}
1148
1149static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
1150{
1151 struct ath10k *ar = arg;
1152 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1153 int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
1154
1155 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
1156 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
1157 ce_id);
1158 return IRQ_HANDLED;
1159 }
1160
1161 ath10k_snoc_irq_disable(ar);
1162 napi_schedule(&ar->napi);
1163
1164 return IRQ_HANDLED;
1165}
1166
1167static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
1168{
1169 struct ath10k *ar = container_of(ctx, struct ath10k, napi);
1170 int done = 0;
1171
1172 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
1173 napi_complete(ctx);
1174 return done;
1175 }
1176
1177 ath10k_ce_per_engine_service_any(ar);
1178 done = ath10k_htt_txrx_compl_task(ar, budget);
1179
1180 if (done < budget) {
1181 napi_complete(ctx);
1182 ath10k_snoc_irq_enable(ar);
1183 }
1184
1185 return done;
1186}
1187
1188static void ath10k_snoc_init_napi(struct ath10k *ar)
1189{
1190 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
1191 ATH10K_NAPI_BUDGET);
1192}
1193
1194static int ath10k_snoc_request_irq(struct ath10k *ar)
1195{
1196 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1197 int irqflags = IRQF_TRIGGER_RISING;
1198 int ret, id;
1199
1200 for (id = 0; id < CE_COUNT_MAX; id++) {
1201 ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
1202 ath10k_snoc_per_engine_handler,
1203 irqflags, ce_name[id], ar);
1204 if (ret) {
1205 ath10k_err(ar,
1206 "failed to register IRQ handler for CE %d: %d\n",
1207 id, ret);
1208 goto err_irq;
1209 }
1210 }
1211
1212 return 0;
1213
1214err_irq:
1215 for (id -= 1; id >= 0; id--)
1216 free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1217
1218 return ret;
1219}
1220
1221static void ath10k_snoc_free_irq(struct ath10k *ar)
1222{
1223 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1224 int id;
1225
1226 for (id = 0; id < CE_COUNT_MAX; id++)
1227 free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1228}
1229
1230static int ath10k_snoc_resource_init(struct ath10k *ar)
1231{
1232 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1233 struct platform_device *pdev;
1234 struct resource *res;
1235 int i, ret = 0;
1236
1237 pdev = ar_snoc->dev;
1238 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
1239 if (!res) {
1240 ath10k_err(ar, "Memory base not found in DT\n");
1241 return -EINVAL;
1242 }
1243
1244 ar_snoc->mem_pa = res->start;
1245 ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
1246 resource_size(res));
1247 if (!ar_snoc->mem) {
1248 ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
1249 &ar_snoc->mem_pa);
1250 return -EINVAL;
1251 }
1252
1253 for (i = 0; i < CE_COUNT; i++) {
1254 res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
1255 if (!res) {
1256 ath10k_err(ar, "failed to get IRQ%d\n", i);
1257 ret = -ENODEV;
1258 goto out;
1259 }
1260 ar_snoc->ce_irqs[i].irq_line = res->start;
1261 }
1262
1263 ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
1264 &ar_snoc->xo_cal_data);
1265 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc xo-cal-data return %d\n", ret);
1266 if (ret == 0) {
1267 ar_snoc->xo_cal_supported = true;
1268 ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
1269 ar_snoc->xo_cal_data);
1270 }
1271 ret = 0;
1272
1273out:
1274 return ret;
1275}
1276
1277static void ath10k_snoc_quirks_init(struct ath10k *ar)
1278{
1279 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1280 struct device *dev = &ar_snoc->dev->dev;
1281
1282 if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
1283 set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
1284}
1285
1286int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
1287{
1288 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1289 struct ath10k_bus_params bus_params = {};
1290 int ret;
1291
1292 if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
1293 return 0;
1294
1295 switch (type) {
1296 case ATH10K_QMI_EVENT_FW_READY_IND:
1297 if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
1298 queue_work(ar->workqueue, &ar->restart_work);
1299 break;
1300 }
1301
1302 bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1303 bus_params.chip_id = ar_snoc->target_info.soc_version;
1304 ret = ath10k_core_register(ar, &bus_params);
1305 if (ret) {
1306 ath10k_err(ar, "Failed to register driver core: %d\n",
1307 ret);
1308 return ret;
1309 }
1310 set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags);
1311 break;
1312 case ATH10K_QMI_EVENT_FW_DOWN_IND:
1313 set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
1314 set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
1315 break;
1316 default:
1317 ath10k_err(ar, "invalid fw indication: %llx\n", type);
1318 return -EINVAL;
1319 }
1320
1321 return 0;
1322}
1323
1324static int ath10k_snoc_setup_resource(struct ath10k *ar)
1325{
1326 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1327 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1328 struct ath10k_snoc_pipe *pipe;
1329 int i, ret;
1330
1331 timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
1332 spin_lock_init(&ce->ce_lock);
1333 for (i = 0; i < CE_COUNT; i++) {
1334 pipe = &ar_snoc->pipe_info[i];
1335 pipe->ce_hdl = &ce->ce_states[i];
1336 pipe->pipe_num = i;
1337 pipe->hif_ce_state = ar;
1338
1339 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1340 if (ret) {
1341 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1342 i, ret);
1343 return ret;
1344 }
1345
1346 pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
1347 }
1348 ath10k_snoc_init_napi(ar);
1349
1350 return 0;
1351}
1352
1353static void ath10k_snoc_release_resource(struct ath10k *ar)
1354{
1355 int i;
1356
1357 netif_napi_del(&ar->napi);
1358 for (i = 0; i < CE_COUNT; i++)
1359 ath10k_ce_free_pipe(ar, i);
1360}
1361
1362static int ath10k_hw_power_on(struct ath10k *ar)
1363{
1364 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1365 int ret;
1366
1367 ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1368
1369 ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
1370 if (ret)
1371 return ret;
1372
1373 ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks);
1374 if (ret)
1375 goto vreg_off;
1376
1377 return ret;
1378
1379vreg_off:
1380 regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
1381 return ret;
1382}
1383
1384static int ath10k_hw_power_off(struct ath10k *ar)
1385{
1386 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1387
1388 ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1389
1390 clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks);
1391
1392 return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
1393}
1394
1395static void ath10k_msa_dump_memory(struct ath10k *ar,
1396 struct ath10k_fw_crash_data *crash_data)
1397{
1398 const struct ath10k_hw_mem_layout *mem_layout;
1399 const struct ath10k_mem_region *current_region;
1400 struct ath10k_dump_ram_data_hdr *hdr;
1401 size_t buf_len;
1402 u8 *buf;
1403
1404 if (!crash_data || !crash_data->ramdump_buf)
1405 return;
1406
1407 mem_layout = ath10k_coredump_get_mem_layout(ar);
1408 if (!mem_layout)
1409 return;
1410
1411 current_region = &mem_layout->region_table.regions[0];
1412
1413 buf = crash_data->ramdump_buf;
1414 buf_len = crash_data->ramdump_buf_len;
1415 memset(buf, 0, buf_len);
1416
1417 /* Reserve space for the header. */
1418 hdr = (void *)buf;
1419 buf += sizeof(*hdr);
1420 buf_len -= sizeof(*hdr);
1421
1422 hdr->region_type = cpu_to_le32(current_region->type);
1423 hdr->start = cpu_to_le32((unsigned long)ar->msa.vaddr);
1424 hdr->length = cpu_to_le32(ar->msa.mem_size);
1425
1426 if (current_region->len < ar->msa.mem_size) {
1427 memcpy(buf, ar->msa.vaddr, current_region->len);
1428 ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
1429 current_region->len, ar->msa.mem_size);
1430 } else {
1431 memcpy(buf, ar->msa.vaddr, ar->msa.mem_size);
1432 }
1433}
1434
1435void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
1436{
1437 struct ath10k_fw_crash_data *crash_data;
1438 char guid[UUID_STRING_LEN + 1];
1439
1440 mutex_lock(&ar->dump_mutex);
1441
1442 spin_lock_bh(&ar->data_lock);
1443 ar->stats.fw_crash_counter++;
1444 spin_unlock_bh(&ar->data_lock);
1445
1446 crash_data = ath10k_coredump_new(ar);
1447
1448 if (crash_data)
1449 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1450 else
1451 scnprintf(guid, sizeof(guid), "n/a");
1452
1453 ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1454 ath10k_print_driver_info(ar);
1455 ath10k_msa_dump_memory(ar, crash_data);
1456 mutex_unlock(&ar->dump_mutex);
1457}
1458
1459static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
1460{
1461 struct device *dev = ar->dev;
1462 struct device_node *node;
1463 struct resource r;
1464 int ret;
1465
1466 node = of_parse_phandle(dev->of_node, "memory-region", 0);
1467 if (node) {
1468 ret = of_address_to_resource(node, 0, &r);
1469 if (ret) {
1470 dev_err(dev, "failed to resolve msa fixed region\n");
1471 return ret;
1472 }
1473 of_node_put(node);
1474
1475 ar->msa.paddr = r.start;
1476 ar->msa.mem_size = resource_size(&r);
1477 ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
1478 ar->msa.mem_size,
1479 MEMREMAP_WT);
1480 if (IS_ERR(ar->msa.vaddr)) {
1481 dev_err(dev, "failed to map memory region: %pa\n",
1482 &r.start);
1483 return PTR_ERR(ar->msa.vaddr);
1484 }
1485 } else {
1486 ar->msa.vaddr = dmam_alloc_coherent(dev, msa_size,
1487 &ar->msa.paddr,
1488 GFP_KERNEL);
1489 if (!ar->msa.vaddr) {
1490 ath10k_err(ar, "failed to allocate dma memory for msa region\n");
1491 return -ENOMEM;
1492 }
1493 ar->msa.mem_size = msa_size;
1494 }
1495
1496 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa.paddr: %pad , msa.vaddr: 0x%p\n",
1497 &ar->msa.paddr,
1498 ar->msa.vaddr);
1499
1500 return 0;
1501}
1502
1503static int ath10k_fw_init(struct ath10k *ar)
1504{
1505 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1506 struct device *host_dev = &ar_snoc->dev->dev;
1507 struct platform_device_info info;
1508 struct iommu_domain *iommu_dom;
1509 struct platform_device *pdev;
1510 struct device_node *node;
1511 int ret;
1512
1513 node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
1514 if (!node) {
1515 ar_snoc->use_tz = true;
1516 return 0;
1517 }
1518
1519 memset(&info, 0, sizeof(info));
1520 info.fwnode = &node->fwnode;
1521 info.parent = host_dev;
1522 info.name = node->name;
1523 info.dma_mask = DMA_BIT_MASK(32);
1524
1525 pdev = platform_device_register_full(&info);
1526 if (IS_ERR(pdev)) {
1527 of_node_put(node);
1528 return PTR_ERR(pdev);
1529 }
1530
1531 pdev->dev.of_node = node;
1532
1533 ret = of_dma_configure(&pdev->dev, node, true);
1534 if (ret) {
1535 ath10k_err(ar, "dma configure fail: %d\n", ret);
1536 goto err_unregister;
1537 }
1538
1539 ar_snoc->fw.dev = &pdev->dev;
1540
1541 iommu_dom = iommu_domain_alloc(&platform_bus_type);
1542 if (!iommu_dom) {
1543 ath10k_err(ar, "failed to allocate iommu domain\n");
1544 ret = -ENOMEM;
1545 goto err_unregister;
1546 }
1547
1548 ret = iommu_attach_device(iommu_dom, ar_snoc->fw.dev);
1549 if (ret) {
1550 ath10k_err(ar, "could not attach device: %d\n", ret);
1551 goto err_iommu_free;
1552 }
1553
1554 ar_snoc->fw.iommu_domain = iommu_dom;
1555 ar_snoc->fw.fw_start_addr = ar->msa.paddr;
1556
1557 ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
1558 ar->msa.paddr, ar->msa.mem_size,
1559 IOMMU_READ | IOMMU_WRITE);
1560 if (ret) {
1561 ath10k_err(ar, "failed to map firmware region: %d\n", ret);
1562 goto err_iommu_detach;
1563 }
1564
1565 of_node_put(node);
1566
1567 return 0;
1568
1569err_iommu_detach:
1570 iommu_detach_device(iommu_dom, ar_snoc->fw.dev);
1571
1572err_iommu_free:
1573 iommu_domain_free(iommu_dom);
1574
1575err_unregister:
1576 platform_device_unregister(pdev);
1577 of_node_put(node);
1578
1579 return ret;
1580}
1581
1582static int ath10k_fw_deinit(struct ath10k *ar)
1583{
1584 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1585 const size_t mapped_size = ar_snoc->fw.mapped_mem_size;
1586 struct iommu_domain *iommu;
1587 size_t unmapped_size;
1588
1589 if (ar_snoc->use_tz)
1590 return 0;
1591
1592 iommu = ar_snoc->fw.iommu_domain;
1593
1594 unmapped_size = iommu_unmap(iommu, ar_snoc->fw.fw_start_addr,
1595 mapped_size);
1596 if (unmapped_size != mapped_size)
1597 ath10k_err(ar, "failed to unmap firmware: %zu\n",
1598 unmapped_size);
1599
1600 iommu_detach_device(iommu, ar_snoc->fw.dev);
1601 iommu_domain_free(iommu);
1602
1603 platform_device_unregister(to_platform_device(ar_snoc->fw.dev));
1604
1605 return 0;
1606}
1607
1608static const struct of_device_id ath10k_snoc_dt_match[] = {
1609 { .compatible = "qcom,wcn3990-wifi",
1610 .data = &drv_priv,
1611 },
1612 { }
1613};
1614MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1615
1616static int ath10k_snoc_probe(struct platform_device *pdev)
1617{
1618 const struct ath10k_snoc_drv_priv *drv_data;
1619 struct ath10k_snoc *ar_snoc;
1620 struct device *dev;
1621 struct ath10k *ar;
1622 u32 msa_size;
1623 int ret;
1624 u32 i;
1625
1626 dev = &pdev->dev;
1627 drv_data = device_get_match_data(dev);
1628 if (!drv_data) {
1629 dev_err(dev, "failed to find matching device tree id\n");
1630 return -EINVAL;
1631 }
1632
1633 ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1634 if (ret) {
1635 dev_err(dev, "failed to set dma mask: %d\n", ret);
1636 return ret;
1637 }
1638
1639 ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1640 drv_data->hw_rev, &ath10k_snoc_hif_ops);
1641 if (!ar) {
1642 dev_err(dev, "failed to allocate core\n");
1643 return -ENOMEM;
1644 }
1645
1646 ar_snoc = ath10k_snoc_priv(ar);
1647 ar_snoc->dev = pdev;
1648 platform_set_drvdata(pdev, ar);
1649 ar_snoc->ar = ar;
1650 ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1651 ar->ce_priv = &ar_snoc->ce;
1652 msa_size = drv_data->msa_size;
1653
1654 ath10k_snoc_quirks_init(ar);
1655
1656 ret = ath10k_snoc_resource_init(ar);
1657 if (ret) {
1658 ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1659 goto err_core_destroy;
1660 }
1661
1662 ret = ath10k_snoc_setup_resource(ar);
1663 if (ret) {
1664 ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1665 goto err_core_destroy;
1666 }
1667 ret = ath10k_snoc_request_irq(ar);
1668 if (ret) {
1669 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1670 goto err_release_resource;
1671 }
1672
1673 ar_snoc->num_vregs = ARRAY_SIZE(ath10k_regulators);
1674 ar_snoc->vregs = devm_kcalloc(&pdev->dev, ar_snoc->num_vregs,
1675 sizeof(*ar_snoc->vregs), GFP_KERNEL);
1676 if (!ar_snoc->vregs) {
1677 ret = -ENOMEM;
1678 goto err_free_irq;
1679 }
1680 for (i = 0; i < ar_snoc->num_vregs; i++)
1681 ar_snoc->vregs[i].supply = ath10k_regulators[i];
1682
1683 ret = devm_regulator_bulk_get(&pdev->dev, ar_snoc->num_vregs,
1684 ar_snoc->vregs);
1685 if (ret < 0)
1686 goto err_free_irq;
1687
1688 ar_snoc->num_clks = ARRAY_SIZE(ath10k_clocks);
1689 ar_snoc->clks = devm_kcalloc(&pdev->dev, ar_snoc->num_clks,
1690 sizeof(*ar_snoc->clks), GFP_KERNEL);
1691 if (!ar_snoc->clks) {
1692 ret = -ENOMEM;
1693 goto err_free_irq;
1694 }
1695
1696 for (i = 0; i < ar_snoc->num_clks; i++)
1697 ar_snoc->clks[i].id = ath10k_clocks[i];
1698
1699 ret = devm_clk_bulk_get_optional(&pdev->dev, ar_snoc->num_clks,
1700 ar_snoc->clks);
1701 if (ret)
1702 goto err_free_irq;
1703
1704 ret = ath10k_hw_power_on(ar);
1705 if (ret) {
1706 ath10k_err(ar, "failed to power on device: %d\n", ret);
1707 goto err_free_irq;
1708 }
1709
1710 ret = ath10k_setup_msa_resources(ar, msa_size);
1711 if (ret) {
1712 ath10k_warn(ar, "failed to setup msa resources: %d\n", ret);
1713 goto err_power_off;
1714 }
1715
1716 ret = ath10k_fw_init(ar);
1717 if (ret) {
1718 ath10k_err(ar, "failed to initialize firmware: %d\n", ret);
1719 goto err_power_off;
1720 }
1721
1722 ret = ath10k_qmi_init(ar, msa_size);
1723 if (ret) {
1724 ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
1725 goto err_fw_deinit;
1726 }
1727
1728 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1729
1730 return 0;
1731
1732err_fw_deinit:
1733 ath10k_fw_deinit(ar);
1734
1735err_power_off:
1736 ath10k_hw_power_off(ar);
1737
1738err_free_irq:
1739 ath10k_snoc_free_irq(ar);
1740
1741err_release_resource:
1742 ath10k_snoc_release_resource(ar);
1743
1744err_core_destroy:
1745 ath10k_core_destroy(ar);
1746
1747 return ret;
1748}
1749
1750static int ath10k_snoc_remove(struct platform_device *pdev)
1751{
1752 struct ath10k *ar = platform_get_drvdata(pdev);
1753 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1754
1755 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1756
1757 reinit_completion(&ar->driver_recovery);
1758
1759 if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1760 wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
1761
1762 set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
1763
1764 ath10k_core_unregister(ar);
1765 ath10k_hw_power_off(ar);
1766 ath10k_fw_deinit(ar);
1767 ath10k_snoc_free_irq(ar);
1768 ath10k_snoc_release_resource(ar);
1769 ath10k_qmi_deinit(ar);
1770 ath10k_core_destroy(ar);
1771
1772 return 0;
1773}
1774
1775static struct platform_driver ath10k_snoc_driver = {
1776 .probe = ath10k_snoc_probe,
1777 .remove = ath10k_snoc_remove,
1778 .driver = {
1779 .name = "ath10k_snoc",
1780 .of_match_table = ath10k_snoc_dt_match,
1781 },
1782};
1783module_platform_driver(ath10k_snoc_driver);
1784
1785MODULE_AUTHOR("Qualcomm");
1786MODULE_LICENSE("Dual BSD/GPL");
1787MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");