Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 */
6
7#include <linux/pci.h>
8#include <linux/module.h>
9#include <linux/interrupt.h>
10#include <linux/spinlock.h>
11#include <linux/bitops.h>
12
13#include "core.h"
14#include "debug.h"
15#include "coredump.h"
16
17#include "targaddrs.h"
18#include "bmi.h"
19
20#include "hif.h"
21#include "htc.h"
22
23#include "ce.h"
24#include "pci.h"
25
26enum ath10k_pci_reset_mode {
27 ATH10K_PCI_RESET_AUTO = 0,
28 ATH10K_PCI_RESET_WARM_ONLY = 1,
29};
30
31static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
32static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
33
34module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
35MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
36
37module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
38MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
39
40/* how long wait to wait for target to initialise, in ms */
41#define ATH10K_PCI_TARGET_WAIT 3000
42#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
43
44/* Maximum number of bytes that can be handled atomically by
45 * diag read and write.
46 */
47#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
48
49#define QCA99X0_PCIE_BAR0_START_REG 0x81030
50#define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c
51#define QCA99X0_CPU_MEM_DATA_REG 0x4d010
52
53static const struct pci_device_id ath10k_pci_id_table[] = {
54 /* PCI-E QCA988X V2 (Ubiquiti branded) */
55 { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
56
57 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
58 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
59 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
60 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
61 { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
62 { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
63 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
64 { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
65 {0}
66};
67
68static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
69 /* QCA988X pre 2.0 chips are not supported because they need some nasty
70 * hacks. ath10k doesn't have them and these devices crash horribly
71 * because of that.
72 */
73 { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
74 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
75
76 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
77 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
78 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
79 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
80 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
81
82 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
83 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
84 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
85 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
86 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
87
88 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
89
90 { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
91
92 { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
93
94 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
95 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
96
97 { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
98};
99
100static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
101static int ath10k_pci_cold_reset(struct ath10k *ar);
102static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
103static int ath10k_pci_init_irq(struct ath10k *ar);
104static int ath10k_pci_deinit_irq(struct ath10k *ar);
105static int ath10k_pci_request_irq(struct ath10k *ar);
106static void ath10k_pci_free_irq(struct ath10k *ar);
107static int ath10k_pci_bmi_wait(struct ath10k *ar,
108 struct ath10k_ce_pipe *tx_pipe,
109 struct ath10k_ce_pipe *rx_pipe,
110 struct bmi_xfer *xfer);
111static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
112static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
113static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
114static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
115static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
116static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
117static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
118
119static const struct ce_attr pci_host_ce_config_wlan[] = {
120 /* CE0: host->target HTC control and raw streams */
121 {
122 .flags = CE_ATTR_FLAGS,
123 .src_nentries = 16,
124 .src_sz_max = 256,
125 .dest_nentries = 0,
126 .send_cb = ath10k_pci_htc_tx_cb,
127 },
128
129 /* CE1: target->host HTT + HTC control */
130 {
131 .flags = CE_ATTR_FLAGS,
132 .src_nentries = 0,
133 .src_sz_max = 2048,
134 .dest_nentries = 512,
135 .recv_cb = ath10k_pci_htt_htc_rx_cb,
136 },
137
138 /* CE2: target->host WMI */
139 {
140 .flags = CE_ATTR_FLAGS,
141 .src_nentries = 0,
142 .src_sz_max = 2048,
143 .dest_nentries = 128,
144 .recv_cb = ath10k_pci_htc_rx_cb,
145 },
146
147 /* CE3: host->target WMI */
148 {
149 .flags = CE_ATTR_FLAGS,
150 .src_nentries = 32,
151 .src_sz_max = 2048,
152 .dest_nentries = 0,
153 .send_cb = ath10k_pci_htc_tx_cb,
154 },
155
156 /* CE4: host->target HTT */
157 {
158 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
159 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
160 .src_sz_max = 256,
161 .dest_nentries = 0,
162 .send_cb = ath10k_pci_htt_tx_cb,
163 },
164
165 /* CE5: target->host HTT (HIF->HTT) */
166 {
167 .flags = CE_ATTR_FLAGS,
168 .src_nentries = 0,
169 .src_sz_max = 512,
170 .dest_nentries = 512,
171 .recv_cb = ath10k_pci_htt_rx_cb,
172 },
173
174 /* CE6: target autonomous hif_memcpy */
175 {
176 .flags = CE_ATTR_FLAGS,
177 .src_nentries = 0,
178 .src_sz_max = 0,
179 .dest_nentries = 0,
180 },
181
182 /* CE7: ce_diag, the Diagnostic Window */
183 {
184 .flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
185 .src_nentries = 2,
186 .src_sz_max = DIAG_TRANSFER_LIMIT,
187 .dest_nentries = 2,
188 },
189
190 /* CE8: target->host pktlog */
191 {
192 .flags = CE_ATTR_FLAGS,
193 .src_nentries = 0,
194 .src_sz_max = 2048,
195 .dest_nentries = 128,
196 .recv_cb = ath10k_pci_pktlog_rx_cb,
197 },
198
199 /* CE9 target autonomous qcache memcpy */
200 {
201 .flags = CE_ATTR_FLAGS,
202 .src_nentries = 0,
203 .src_sz_max = 0,
204 .dest_nentries = 0,
205 },
206
207 /* CE10: target autonomous hif memcpy */
208 {
209 .flags = CE_ATTR_FLAGS,
210 .src_nentries = 0,
211 .src_sz_max = 0,
212 .dest_nentries = 0,
213 },
214
215 /* CE11: target autonomous hif memcpy */
216 {
217 .flags = CE_ATTR_FLAGS,
218 .src_nentries = 0,
219 .src_sz_max = 0,
220 .dest_nentries = 0,
221 },
222};
223
224/* Target firmware's Copy Engine configuration. */
225static const struct ce_pipe_config pci_target_ce_config_wlan[] = {
226 /* CE0: host->target HTC control and raw streams */
227 {
228 .pipenum = __cpu_to_le32(0),
229 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
230 .nentries = __cpu_to_le32(32),
231 .nbytes_max = __cpu_to_le32(256),
232 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
233 .reserved = __cpu_to_le32(0),
234 },
235
236 /* CE1: target->host HTT + HTC control */
237 {
238 .pipenum = __cpu_to_le32(1),
239 .pipedir = __cpu_to_le32(PIPEDIR_IN),
240 .nentries = __cpu_to_le32(32),
241 .nbytes_max = __cpu_to_le32(2048),
242 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
243 .reserved = __cpu_to_le32(0),
244 },
245
246 /* CE2: target->host WMI */
247 {
248 .pipenum = __cpu_to_le32(2),
249 .pipedir = __cpu_to_le32(PIPEDIR_IN),
250 .nentries = __cpu_to_le32(64),
251 .nbytes_max = __cpu_to_le32(2048),
252 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
253 .reserved = __cpu_to_le32(0),
254 },
255
256 /* CE3: host->target WMI */
257 {
258 .pipenum = __cpu_to_le32(3),
259 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
260 .nentries = __cpu_to_le32(32),
261 .nbytes_max = __cpu_to_le32(2048),
262 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
263 .reserved = __cpu_to_le32(0),
264 },
265
266 /* CE4: host->target HTT */
267 {
268 .pipenum = __cpu_to_le32(4),
269 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
270 .nentries = __cpu_to_le32(256),
271 .nbytes_max = __cpu_to_le32(256),
272 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
273 .reserved = __cpu_to_le32(0),
274 },
275
276 /* NB: 50% of src nentries, since tx has 2 frags */
277
278 /* CE5: target->host HTT (HIF->HTT) */
279 {
280 .pipenum = __cpu_to_le32(5),
281 .pipedir = __cpu_to_le32(PIPEDIR_IN),
282 .nentries = __cpu_to_le32(32),
283 .nbytes_max = __cpu_to_le32(512),
284 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
285 .reserved = __cpu_to_le32(0),
286 },
287
288 /* CE6: Reserved for target autonomous hif_memcpy */
289 {
290 .pipenum = __cpu_to_le32(6),
291 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
292 .nentries = __cpu_to_le32(32),
293 .nbytes_max = __cpu_to_le32(4096),
294 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
295 .reserved = __cpu_to_le32(0),
296 },
297
298 /* CE7 used only by Host */
299 {
300 .pipenum = __cpu_to_le32(7),
301 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
302 .nentries = __cpu_to_le32(0),
303 .nbytes_max = __cpu_to_le32(0),
304 .flags = __cpu_to_le32(0),
305 .reserved = __cpu_to_le32(0),
306 },
307
308 /* CE8 target->host packtlog */
309 {
310 .pipenum = __cpu_to_le32(8),
311 .pipedir = __cpu_to_le32(PIPEDIR_IN),
312 .nentries = __cpu_to_le32(64),
313 .nbytes_max = __cpu_to_le32(2048),
314 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
315 .reserved = __cpu_to_le32(0),
316 },
317
318 /* CE9 target autonomous qcache memcpy */
319 {
320 .pipenum = __cpu_to_le32(9),
321 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
322 .nentries = __cpu_to_le32(32),
323 .nbytes_max = __cpu_to_le32(2048),
324 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
325 .reserved = __cpu_to_le32(0),
326 },
327
328 /* It not necessary to send target wlan configuration for CE10 & CE11
329 * as these CEs are not actively used in target.
330 */
331};
332
333/*
334 * Map from service/endpoint to Copy Engine.
335 * This table is derived from the CE_PCI TABLE, above.
336 * It is passed to the Target at startup for use by firmware.
337 */
338static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = {
339 {
340 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
341 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
342 __cpu_to_le32(3),
343 },
344 {
345 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
346 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
347 __cpu_to_le32(2),
348 },
349 {
350 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
351 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
352 __cpu_to_le32(3),
353 },
354 {
355 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
356 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
357 __cpu_to_le32(2),
358 },
359 {
360 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
361 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
362 __cpu_to_le32(3),
363 },
364 {
365 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
366 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
367 __cpu_to_le32(2),
368 },
369 {
370 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
371 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
372 __cpu_to_le32(3),
373 },
374 {
375 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
376 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
377 __cpu_to_le32(2),
378 },
379 {
380 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
381 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
382 __cpu_to_le32(3),
383 },
384 {
385 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
386 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
387 __cpu_to_le32(2),
388 },
389 {
390 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
391 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
392 __cpu_to_le32(0),
393 },
394 {
395 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
396 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
397 __cpu_to_le32(1),
398 },
399 { /* not used */
400 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
401 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
402 __cpu_to_le32(0),
403 },
404 { /* not used */
405 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
406 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
407 __cpu_to_le32(1),
408 },
409 {
410 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
411 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
412 __cpu_to_le32(4),
413 },
414 {
415 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
416 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
417 __cpu_to_le32(5),
418 },
419
420 /* (Additions here) */
421
422 { /* must be last */
423 __cpu_to_le32(0),
424 __cpu_to_le32(0),
425 __cpu_to_le32(0),
426 },
427};
428
429static bool ath10k_pci_is_awake(struct ath10k *ar)
430{
431 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
432 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
433 RTC_STATE_ADDRESS);
434
435 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
436}
437
438static void __ath10k_pci_wake(struct ath10k *ar)
439{
440 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
441
442 lockdep_assert_held(&ar_pci->ps_lock);
443
444 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
445 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
446
447 iowrite32(PCIE_SOC_WAKE_V_MASK,
448 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
449 PCIE_SOC_WAKE_ADDRESS);
450}
451
452static void __ath10k_pci_sleep(struct ath10k *ar)
453{
454 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
455
456 lockdep_assert_held(&ar_pci->ps_lock);
457
458 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
459 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
460
461 iowrite32(PCIE_SOC_WAKE_RESET,
462 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
463 PCIE_SOC_WAKE_ADDRESS);
464 ar_pci->ps_awake = false;
465}
466
467static int ath10k_pci_wake_wait(struct ath10k *ar)
468{
469 int tot_delay = 0;
470 int curr_delay = 5;
471
472 while (tot_delay < PCIE_WAKE_TIMEOUT) {
473 if (ath10k_pci_is_awake(ar)) {
474 if (tot_delay > PCIE_WAKE_LATE_US)
475 ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
476 tot_delay / 1000);
477 return 0;
478 }
479
480 udelay(curr_delay);
481 tot_delay += curr_delay;
482
483 if (curr_delay < 50)
484 curr_delay += 5;
485 }
486
487 return -ETIMEDOUT;
488}
489
490static int ath10k_pci_force_wake(struct ath10k *ar)
491{
492 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
493 unsigned long flags;
494 int ret = 0;
495
496 if (ar_pci->pci_ps)
497 return ret;
498
499 spin_lock_irqsave(&ar_pci->ps_lock, flags);
500
501 if (!ar_pci->ps_awake) {
502 iowrite32(PCIE_SOC_WAKE_V_MASK,
503 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
504 PCIE_SOC_WAKE_ADDRESS);
505
506 ret = ath10k_pci_wake_wait(ar);
507 if (ret == 0)
508 ar_pci->ps_awake = true;
509 }
510
511 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
512
513 return ret;
514}
515
516static void ath10k_pci_force_sleep(struct ath10k *ar)
517{
518 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
519 unsigned long flags;
520
521 spin_lock_irqsave(&ar_pci->ps_lock, flags);
522
523 iowrite32(PCIE_SOC_WAKE_RESET,
524 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
525 PCIE_SOC_WAKE_ADDRESS);
526 ar_pci->ps_awake = false;
527
528 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
529}
530
531static int ath10k_pci_wake(struct ath10k *ar)
532{
533 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
534 unsigned long flags;
535 int ret = 0;
536
537 if (ar_pci->pci_ps == 0)
538 return ret;
539
540 spin_lock_irqsave(&ar_pci->ps_lock, flags);
541
542 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
543 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
544
545 /* This function can be called very frequently. To avoid excessive
546 * CPU stalls for MMIO reads use a cache var to hold the device state.
547 */
548 if (!ar_pci->ps_awake) {
549 __ath10k_pci_wake(ar);
550
551 ret = ath10k_pci_wake_wait(ar);
552 if (ret == 0)
553 ar_pci->ps_awake = true;
554 }
555
556 if (ret == 0) {
557 ar_pci->ps_wake_refcount++;
558 WARN_ON(ar_pci->ps_wake_refcount == 0);
559 }
560
561 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
562
563 return ret;
564}
565
566static void ath10k_pci_sleep(struct ath10k *ar)
567{
568 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
569 unsigned long flags;
570
571 if (ar_pci->pci_ps == 0)
572 return;
573
574 spin_lock_irqsave(&ar_pci->ps_lock, flags);
575
576 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
577 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
578
579 if (WARN_ON(ar_pci->ps_wake_refcount == 0))
580 goto skip;
581
582 ar_pci->ps_wake_refcount--;
583
584 mod_timer(&ar_pci->ps_timer, jiffies +
585 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
586
587skip:
588 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
589}
590
591static void ath10k_pci_ps_timer(struct timer_list *t)
592{
593 struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
594 struct ath10k *ar = ar_pci->ar;
595 unsigned long flags;
596
597 spin_lock_irqsave(&ar_pci->ps_lock, flags);
598
599 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
600 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
601
602 if (ar_pci->ps_wake_refcount > 0)
603 goto skip;
604
605 __ath10k_pci_sleep(ar);
606
607skip:
608 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
609}
610
611static void ath10k_pci_sleep_sync(struct ath10k *ar)
612{
613 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
614 unsigned long flags;
615
616 if (ar_pci->pci_ps == 0) {
617 ath10k_pci_force_sleep(ar);
618 return;
619 }
620
621 del_timer_sync(&ar_pci->ps_timer);
622
623 spin_lock_irqsave(&ar_pci->ps_lock, flags);
624 WARN_ON(ar_pci->ps_wake_refcount > 0);
625 __ath10k_pci_sleep(ar);
626 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
627}
628
629static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
630{
631 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
632 int ret;
633
634 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
635 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
636 offset, offset + sizeof(value), ar_pci->mem_len);
637 return;
638 }
639
640 ret = ath10k_pci_wake(ar);
641 if (ret) {
642 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
643 value, offset, ret);
644 return;
645 }
646
647 iowrite32(value, ar_pci->mem + offset);
648 ath10k_pci_sleep(ar);
649}
650
651static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
652{
653 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
654 u32 val;
655 int ret;
656
657 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
658 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
659 offset, offset + sizeof(val), ar_pci->mem_len);
660 return 0;
661 }
662
663 ret = ath10k_pci_wake(ar);
664 if (ret) {
665 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
666 offset, ret);
667 return 0xffffffff;
668 }
669
670 val = ioread32(ar_pci->mem + offset);
671 ath10k_pci_sleep(ar);
672
673 return val;
674}
675
676inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
677{
678 struct ath10k_ce *ce = ath10k_ce_priv(ar);
679
680 ce->bus_ops->write32(ar, offset, value);
681}
682
683inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
684{
685 struct ath10k_ce *ce = ath10k_ce_priv(ar);
686
687 return ce->bus_ops->read32(ar, offset);
688}
689
690u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
691{
692 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
693}
694
695void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
696{
697 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
698}
699
700u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
701{
702 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
703}
704
705void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
706{
707 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
708}
709
710bool ath10k_pci_irq_pending(struct ath10k *ar)
711{
712 u32 cause;
713
714 /* Check if the shared legacy irq is for us */
715 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
716 PCIE_INTR_CAUSE_ADDRESS);
717 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
718 return true;
719
720 return false;
721}
722
723void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
724{
725 /* IMPORTANT: INTR_CLR register has to be set after
726 * INTR_ENABLE is set to 0, otherwise interrupt can not be
727 * really cleared.
728 */
729 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
730 0);
731 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
732 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
733
734 /* IMPORTANT: this extra read transaction is required to
735 * flush the posted write buffer.
736 */
737 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
738 PCIE_INTR_ENABLE_ADDRESS);
739}
740
741void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
742{
743 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
744 PCIE_INTR_ENABLE_ADDRESS,
745 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
746
747 /* IMPORTANT: this extra read transaction is required to
748 * flush the posted write buffer.
749 */
750 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
751 PCIE_INTR_ENABLE_ADDRESS);
752}
753
754static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
755{
756 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
757
758 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
759 return "msi";
760
761 return "legacy";
762}
763
764static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
765{
766 struct ath10k *ar = pipe->hif_ce_state;
767 struct ath10k_ce *ce = ath10k_ce_priv(ar);
768 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
769 struct sk_buff *skb;
770 dma_addr_t paddr;
771 int ret;
772
773 skb = dev_alloc_skb(pipe->buf_sz);
774 if (!skb)
775 return -ENOMEM;
776
777 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
778
779 paddr = dma_map_single(ar->dev, skb->data,
780 skb->len + skb_tailroom(skb),
781 DMA_FROM_DEVICE);
782 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
783 ath10k_warn(ar, "failed to dma map pci rx buf\n");
784 dev_kfree_skb_any(skb);
785 return -EIO;
786 }
787
788 ATH10K_SKB_RXCB(skb)->paddr = paddr;
789
790 spin_lock_bh(&ce->ce_lock);
791 ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
792 spin_unlock_bh(&ce->ce_lock);
793 if (ret) {
794 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
795 DMA_FROM_DEVICE);
796 dev_kfree_skb_any(skb);
797 return ret;
798 }
799
800 return 0;
801}
802
803static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
804{
805 struct ath10k *ar = pipe->hif_ce_state;
806 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
807 struct ath10k_ce *ce = ath10k_ce_priv(ar);
808 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
809 int ret, num;
810
811 if (pipe->buf_sz == 0)
812 return;
813
814 if (!ce_pipe->dest_ring)
815 return;
816
817 spin_lock_bh(&ce->ce_lock);
818 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
819 spin_unlock_bh(&ce->ce_lock);
820
821 while (num >= 0) {
822 ret = __ath10k_pci_rx_post_buf(pipe);
823 if (ret) {
824 if (ret == -ENOSPC)
825 break;
826 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
827 mod_timer(&ar_pci->rx_post_retry, jiffies +
828 ATH10K_PCI_RX_POST_RETRY_MS);
829 break;
830 }
831 num--;
832 }
833}
834
835void ath10k_pci_rx_post(struct ath10k *ar)
836{
837 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
838 int i;
839
840 for (i = 0; i < CE_COUNT; i++)
841 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
842}
843
844void ath10k_pci_rx_replenish_retry(struct timer_list *t)
845{
846 struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
847 struct ath10k *ar = ar_pci->ar;
848
849 ath10k_pci_rx_post(ar);
850}
851
852static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
853{
854 u32 val = 0, region = addr & 0xfffff;
855
856 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
857 & 0x7ff) << 21;
858 val |= 0x100000 | region;
859 return val;
860}
861
862/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
863 * Support to access target space below 1M for qca6174 and qca9377.
864 * If target space is below 1M, the bit[20] of converted CE addr is 0.
865 * Otherwise bit[20] of converted CE addr is 1.
866 */
867static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
868{
869 u32 val = 0, region = addr & 0xfffff;
870
871 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
872 & 0x7ff) << 21;
873 val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
874 return val;
875}
876
877static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
878{
879 u32 val = 0, region = addr & 0xfffff;
880
881 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
882 val |= 0x100000 | region;
883 return val;
884}
885
886static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
887{
888 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
889
890 if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
891 return -ENOTSUPP;
892
893 return ar_pci->targ_cpu_to_ce_addr(ar, addr);
894}
895
896/*
897 * Diagnostic read/write access is provided for startup/config/debug usage.
898 * Caller must guarantee proper alignment, when applicable, and single user
899 * at any moment.
900 */
901static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
902 int nbytes)
903{
904 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
905 int ret = 0;
906 u32 *buf;
907 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
908 struct ath10k_ce_pipe *ce_diag;
909 /* Host buffer address in CE space */
910 u32 ce_data;
911 dma_addr_t ce_data_base = 0;
912 void *data_buf;
913 int i;
914
915 mutex_lock(&ar_pci->ce_diag_mutex);
916 ce_diag = ar_pci->ce_diag;
917
918 /*
919 * Allocate a temporary bounce buffer to hold caller's data
920 * to be DMA'ed from Target. This guarantees
921 * 1) 4-byte alignment
922 * 2) Buffer in DMA-able space
923 */
924 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
925
926 data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
927 GFP_ATOMIC);
928 if (!data_buf) {
929 ret = -ENOMEM;
930 goto done;
931 }
932
933 /* The address supplied by the caller is in the
934 * Target CPU virtual address space.
935 *
936 * In order to use this address with the diagnostic CE,
937 * convert it from Target CPU virtual address space
938 * to CE address space
939 */
940 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
941
942 remaining_bytes = nbytes;
943 ce_data = ce_data_base;
944 while (remaining_bytes) {
945 nbytes = min_t(unsigned int, remaining_bytes,
946 DIAG_TRANSFER_LIMIT);
947
948 ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
949 if (ret != 0)
950 goto done;
951
952 /* Request CE to send from Target(!) address to Host buffer */
953 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0);
954 if (ret)
955 goto done;
956
957 i = 0;
958 while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
959 udelay(DIAG_ACCESS_CE_WAIT_US);
960 i += DIAG_ACCESS_CE_WAIT_US;
961
962 if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
963 ret = -EBUSY;
964 goto done;
965 }
966 }
967
968 i = 0;
969 while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
970 &completed_nbytes) != 0) {
971 udelay(DIAG_ACCESS_CE_WAIT_US);
972 i += DIAG_ACCESS_CE_WAIT_US;
973
974 if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
975 ret = -EBUSY;
976 goto done;
977 }
978 }
979
980 if (nbytes != completed_nbytes) {
981 ret = -EIO;
982 goto done;
983 }
984
985 if (*buf != ce_data) {
986 ret = -EIO;
987 goto done;
988 }
989
990 remaining_bytes -= nbytes;
991 memcpy(data, data_buf, nbytes);
992
993 address += nbytes;
994 data += nbytes;
995 }
996
997done:
998
999 if (data_buf)
1000 dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1001 ce_data_base);
1002
1003 mutex_unlock(&ar_pci->ce_diag_mutex);
1004
1005 return ret;
1006}
1007
1008static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
1009{
1010 __le32 val = 0;
1011 int ret;
1012
1013 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
1014 *value = __le32_to_cpu(val);
1015
1016 return ret;
1017}
1018
1019static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1020 u32 src, u32 len)
1021{
1022 u32 host_addr, addr;
1023 int ret;
1024
1025 host_addr = host_interest_item_address(src);
1026
1027 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1028 if (ret != 0) {
1029 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1030 src, ret);
1031 return ret;
1032 }
1033
1034 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1035 if (ret != 0) {
1036 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1037 addr, len, ret);
1038 return ret;
1039 }
1040
1041 return 0;
1042}
1043
1044#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
1045 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1046
1047int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1048 const void *data, int nbytes)
1049{
1050 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1051 int ret = 0;
1052 u32 *buf;
1053 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
1054 struct ath10k_ce_pipe *ce_diag;
1055 void *data_buf;
1056 dma_addr_t ce_data_base = 0;
1057 int i;
1058
1059 mutex_lock(&ar_pci->ce_diag_mutex);
1060 ce_diag = ar_pci->ce_diag;
1061
1062 /*
1063 * Allocate a temporary bounce buffer to hold caller's data
1064 * to be DMA'ed to Target. This guarantees
1065 * 1) 4-byte alignment
1066 * 2) Buffer in DMA-able space
1067 */
1068 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
1069
1070 data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
1071 GFP_ATOMIC);
1072 if (!data_buf) {
1073 ret = -ENOMEM;
1074 goto done;
1075 }
1076
1077 /*
1078 * The address supplied by the caller is in the
1079 * Target CPU virtual address space.
1080 *
1081 * In order to use this address with the diagnostic CE,
1082 * convert it from
1083 * Target CPU virtual address space
1084 * to
1085 * CE address space
1086 */
1087 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1088
1089 remaining_bytes = nbytes;
1090 while (remaining_bytes) {
1091 /* FIXME: check cast */
1092 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1093
1094 /* Copy caller's data to allocated DMA buf */
1095 memcpy(data_buf, data, nbytes);
1096
1097 /* Set up to receive directly into Target(!) address */
1098 ret = ath10k_ce_rx_post_buf(ce_diag, &address, address);
1099 if (ret != 0)
1100 goto done;
1101
1102 /*
1103 * Request CE to send caller-supplied data that
1104 * was copied to bounce buffer to Target(!) address.
1105 */
1106 ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0);
1107 if (ret != 0)
1108 goto done;
1109
1110 i = 0;
1111 while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
1112 udelay(DIAG_ACCESS_CE_WAIT_US);
1113 i += DIAG_ACCESS_CE_WAIT_US;
1114
1115 if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1116 ret = -EBUSY;
1117 goto done;
1118 }
1119 }
1120
1121 i = 0;
1122 while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
1123 &completed_nbytes) != 0) {
1124 udelay(DIAG_ACCESS_CE_WAIT_US);
1125 i += DIAG_ACCESS_CE_WAIT_US;
1126
1127 if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1128 ret = -EBUSY;
1129 goto done;
1130 }
1131 }
1132
1133 if (nbytes != completed_nbytes) {
1134 ret = -EIO;
1135 goto done;
1136 }
1137
1138 if (*buf != address) {
1139 ret = -EIO;
1140 goto done;
1141 }
1142
1143 remaining_bytes -= nbytes;
1144 address += nbytes;
1145 data += nbytes;
1146 }
1147
1148done:
1149 if (data_buf) {
1150 dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1151 ce_data_base);
1152 }
1153
1154 if (ret != 0)
1155 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1156 address, ret);
1157
1158 mutex_unlock(&ar_pci->ce_diag_mutex);
1159
1160 return ret;
1161}
1162
1163static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1164{
1165 __le32 val = __cpu_to_le32(value);
1166
1167 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1168}
1169
1170/* Called by lower (CE) layer when a send to Target completes. */
1171static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1172{
1173 struct ath10k *ar = ce_state->ar;
1174 struct sk_buff_head list;
1175 struct sk_buff *skb;
1176
1177 __skb_queue_head_init(&list);
1178 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1179 /* no need to call tx completion for NULL pointers */
1180 if (skb == NULL)
1181 continue;
1182
1183 __skb_queue_tail(&list, skb);
1184 }
1185
1186 while ((skb = __skb_dequeue(&list)))
1187 ath10k_htc_tx_completion_handler(ar, skb);
1188}
1189
1190static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1191 void (*callback)(struct ath10k *ar,
1192 struct sk_buff *skb))
1193{
1194 struct ath10k *ar = ce_state->ar;
1195 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1196 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1197 struct sk_buff *skb;
1198 struct sk_buff_head list;
1199 void *transfer_context;
1200 unsigned int nbytes, max_nbytes;
1201
1202 __skb_queue_head_init(&list);
1203 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1204 &nbytes) == 0) {
1205 skb = transfer_context;
1206 max_nbytes = skb->len + skb_tailroom(skb);
1207 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1208 max_nbytes, DMA_FROM_DEVICE);
1209
1210 if (unlikely(max_nbytes < nbytes)) {
1211 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1212 nbytes, max_nbytes);
1213 dev_kfree_skb_any(skb);
1214 continue;
1215 }
1216
1217 skb_put(skb, nbytes);
1218 __skb_queue_tail(&list, skb);
1219 }
1220
1221 while ((skb = __skb_dequeue(&list))) {
1222 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1223 ce_state->id, skb->len);
1224 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1225 skb->data, skb->len);
1226
1227 callback(ar, skb);
1228 }
1229
1230 ath10k_pci_rx_post_pipe(pipe_info);
1231}
1232
1233static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1234 void (*callback)(struct ath10k *ar,
1235 struct sk_buff *skb))
1236{
1237 struct ath10k *ar = ce_state->ar;
1238 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1239 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1240 struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1241 struct sk_buff *skb;
1242 struct sk_buff_head list;
1243 void *transfer_context;
1244 unsigned int nbytes, max_nbytes, nentries;
1245 int orig_len;
1246
1247 /* No need to aquire ce_lock for CE5, since this is the only place CE5
1248 * is processed other than init and deinit. Before releasing CE5
1249 * buffers, interrupts are disabled. Thus CE5 access is serialized.
1250 */
1251 __skb_queue_head_init(&list);
1252 while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1253 &nbytes) == 0) {
1254 skb = transfer_context;
1255 max_nbytes = skb->len + skb_tailroom(skb);
1256
1257 if (unlikely(max_nbytes < nbytes)) {
1258 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1259 nbytes, max_nbytes);
1260 continue;
1261 }
1262
1263 dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1264 max_nbytes, DMA_FROM_DEVICE);
1265 skb_put(skb, nbytes);
1266 __skb_queue_tail(&list, skb);
1267 }
1268
1269 nentries = skb_queue_len(&list);
1270 while ((skb = __skb_dequeue(&list))) {
1271 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1272 ce_state->id, skb->len);
1273 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1274 skb->data, skb->len);
1275
1276 orig_len = skb->len;
1277 callback(ar, skb);
1278 skb_push(skb, orig_len - skb->len);
1279 skb_reset_tail_pointer(skb);
1280 skb_trim(skb, 0);
1281
1282 /*let device gain the buffer again*/
1283 dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1284 skb->len + skb_tailroom(skb),
1285 DMA_FROM_DEVICE);
1286 }
1287 ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1288}
1289
1290/* Called by lower (CE) layer when data is received from the Target. */
1291static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1292{
1293 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1294}
1295
1296static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1297{
1298 /* CE4 polling needs to be done whenever CE pipe which transports
1299 * HTT Rx (target->host) is processed.
1300 */
1301 ath10k_ce_per_engine_service(ce_state->ar, 4);
1302
1303 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1304}
1305
1306/* Called by lower (CE) layer when data is received from the Target.
1307 * Only 10.4 firmware uses separate CE to transfer pktlog data.
1308 */
1309static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1310{
1311 ath10k_pci_process_rx_cb(ce_state,
1312 ath10k_htt_rx_pktlog_completion_handler);
1313}
1314
1315/* Called by lower (CE) layer when a send to HTT Target completes. */
1316static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1317{
1318 struct ath10k *ar = ce_state->ar;
1319 struct sk_buff *skb;
1320
1321 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1322 /* no need to call tx completion for NULL pointers */
1323 if (!skb)
1324 continue;
1325
1326 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1327 skb->len, DMA_TO_DEVICE);
1328 ath10k_htt_hif_tx_complete(ar, skb);
1329 }
1330}
1331
1332static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1333{
1334 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1335 ath10k_htt_t2h_msg_handler(ar, skb);
1336}
1337
1338/* Called by lower (CE) layer when HTT data is received from the Target. */
1339static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1340{
1341 /* CE4 polling needs to be done whenever CE pipe which transports
1342 * HTT Rx (target->host) is processed.
1343 */
1344 ath10k_ce_per_engine_service(ce_state->ar, 4);
1345
1346 ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1347}
1348
1349int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1350 struct ath10k_hif_sg_item *items, int n_items)
1351{
1352 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1353 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1354 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1355 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1356 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1357 unsigned int nentries_mask;
1358 unsigned int sw_index;
1359 unsigned int write_index;
1360 int err, i = 0;
1361
1362 spin_lock_bh(&ce->ce_lock);
1363
1364 nentries_mask = src_ring->nentries_mask;
1365 sw_index = src_ring->sw_index;
1366 write_index = src_ring->write_index;
1367
1368 if (unlikely(CE_RING_DELTA(nentries_mask,
1369 write_index, sw_index - 1) < n_items)) {
1370 err = -ENOBUFS;
1371 goto err;
1372 }
1373
1374 for (i = 0; i < n_items - 1; i++) {
1375 ath10k_dbg(ar, ATH10K_DBG_PCI,
1376 "pci tx item %d paddr %pad len %d n_items %d\n",
1377 i, &items[i].paddr, items[i].len, n_items);
1378 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1379 items[i].vaddr, items[i].len);
1380
1381 err = ath10k_ce_send_nolock(ce_pipe,
1382 items[i].transfer_context,
1383 items[i].paddr,
1384 items[i].len,
1385 items[i].transfer_id,
1386 CE_SEND_FLAG_GATHER);
1387 if (err)
1388 goto err;
1389 }
1390
1391 /* `i` is equal to `n_items -1` after for() */
1392
1393 ath10k_dbg(ar, ATH10K_DBG_PCI,
1394 "pci tx item %d paddr %pad len %d n_items %d\n",
1395 i, &items[i].paddr, items[i].len, n_items);
1396 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1397 items[i].vaddr, items[i].len);
1398
1399 err = ath10k_ce_send_nolock(ce_pipe,
1400 items[i].transfer_context,
1401 items[i].paddr,
1402 items[i].len,
1403 items[i].transfer_id,
1404 0);
1405 if (err)
1406 goto err;
1407
1408 spin_unlock_bh(&ce->ce_lock);
1409 return 0;
1410
1411err:
1412 for (; i > 0; i--)
1413 __ath10k_ce_send_revert(ce_pipe);
1414
1415 spin_unlock_bh(&ce->ce_lock);
1416 return err;
1417}
1418
1419int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1420 size_t buf_len)
1421{
1422 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1423}
1424
1425u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1426{
1427 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1428
1429 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1430
1431 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1432}
1433
1434static void ath10k_pci_dump_registers(struct ath10k *ar,
1435 struct ath10k_fw_crash_data *crash_data)
1436{
1437 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1438 int i, ret;
1439
1440 lockdep_assert_held(&ar->dump_mutex);
1441
1442 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
1443 hi_failure_state,
1444 REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1445 if (ret) {
1446 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1447 return;
1448 }
1449
1450 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1451
1452 ath10k_err(ar, "firmware register dump:\n");
1453 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1454 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1455 i,
1456 __le32_to_cpu(reg_dump_values[i]),
1457 __le32_to_cpu(reg_dump_values[i + 1]),
1458 __le32_to_cpu(reg_dump_values[i + 2]),
1459 __le32_to_cpu(reg_dump_values[i + 3]));
1460
1461 if (!crash_data)
1462 return;
1463
1464 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1465 crash_data->registers[i] = reg_dump_values[i];
1466}
1467
1468static int ath10k_pci_dump_memory_section(struct ath10k *ar,
1469 const struct ath10k_mem_region *mem_region,
1470 u8 *buf, size_t buf_len)
1471{
1472 const struct ath10k_mem_section *cur_section, *next_section;
1473 unsigned int count, section_size, skip_size;
1474 int ret, i, j;
1475
1476 if (!mem_region || !buf)
1477 return 0;
1478
1479 cur_section = &mem_region->section_table.sections[0];
1480
1481 if (mem_region->start > cur_section->start) {
1482 ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
1483 mem_region->start, cur_section->start);
1484 return 0;
1485 }
1486
1487 skip_size = cur_section->start - mem_region->start;
1488
1489 /* fill the gap between the first register section and register
1490 * start address
1491 */
1492 for (i = 0; i < skip_size; i++) {
1493 *buf = ATH10K_MAGIC_NOT_COPIED;
1494 buf++;
1495 }
1496
1497 count = 0;
1498
1499 for (i = 0; cur_section != NULL; i++) {
1500 section_size = cur_section->end - cur_section->start;
1501
1502 if (section_size <= 0) {
1503 ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
1504 cur_section->start,
1505 cur_section->end);
1506 break;
1507 }
1508
1509 if ((i + 1) == mem_region->section_table.size) {
1510 /* last section */
1511 next_section = NULL;
1512 skip_size = 0;
1513 } else {
1514 next_section = cur_section + 1;
1515
1516 if (cur_section->end > next_section->start) {
1517 ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
1518 next_section->start,
1519 cur_section->end);
1520 break;
1521 }
1522
1523 skip_size = next_section->start - cur_section->end;
1524 }
1525
1526 if (buf_len < (skip_size + section_size)) {
1527 ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
1528 break;
1529 }
1530
1531 buf_len -= skip_size + section_size;
1532
1533 /* read section to dest memory */
1534 ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
1535 buf, section_size);
1536 if (ret) {
1537 ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
1538 cur_section->start, ret);
1539 break;
1540 }
1541
1542 buf += section_size;
1543 count += section_size;
1544
1545 /* fill in the gap between this section and the next */
1546 for (j = 0; j < skip_size; j++) {
1547 *buf = ATH10K_MAGIC_NOT_COPIED;
1548 buf++;
1549 }
1550
1551 count += skip_size;
1552
1553 if (!next_section)
1554 /* this was the last section */
1555 break;
1556
1557 cur_section = next_section;
1558 }
1559
1560 return count;
1561}
1562
1563static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
1564{
1565 u32 val;
1566
1567 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1568 FW_RAM_CONFIG_ADDRESS, config);
1569
1570 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1571 FW_RAM_CONFIG_ADDRESS);
1572 if (val != config) {
1573 ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
1574 val, config);
1575 return -EIO;
1576 }
1577
1578 return 0;
1579}
1580
1581/* Always returns the length */
1582static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
1583 const struct ath10k_mem_region *region,
1584 u8 *buf)
1585{
1586 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1587 u32 base_addr, i;
1588
1589 base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG);
1590 base_addr += region->start;
1591
1592 for (i = 0; i < region->len; i += 4) {
1593 iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG);
1594 *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG);
1595 }
1596
1597 return region->len;
1598}
1599
1600/* if an error happened returns < 0, otherwise the length */
1601static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
1602 const struct ath10k_mem_region *region,
1603 u8 *buf)
1604{
1605 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1606 u32 i;
1607 int ret;
1608
1609 mutex_lock(&ar->conf_mutex);
1610 if (ar->state != ATH10K_STATE_ON) {
1611 ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
1612 ret = -EIO;
1613 goto done;
1614 }
1615
1616 for (i = 0; i < region->len; i += 4)
1617 *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
1618
1619 ret = region->len;
1620done:
1621 mutex_unlock(&ar->conf_mutex);
1622 return ret;
1623}
1624
1625/* if an error happened returns < 0, otherwise the length */
1626static int ath10k_pci_dump_memory_generic(struct ath10k *ar,
1627 const struct ath10k_mem_region *current_region,
1628 u8 *buf)
1629{
1630 int ret;
1631
1632 if (current_region->section_table.size > 0)
1633 /* Copy each section individually. */
1634 return ath10k_pci_dump_memory_section(ar,
1635 current_region,
1636 buf,
1637 current_region->len);
1638
1639 /* No individiual memory sections defined so we can
1640 * copy the entire memory region.
1641 */
1642 ret = ath10k_pci_diag_read_mem(ar,
1643 current_region->start,
1644 buf,
1645 current_region->len);
1646 if (ret) {
1647 ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
1648 current_region->name, ret);
1649 return ret;
1650 }
1651
1652 return current_region->len;
1653}
1654
1655static void ath10k_pci_dump_memory(struct ath10k *ar,
1656 struct ath10k_fw_crash_data *crash_data)
1657{
1658 const struct ath10k_hw_mem_layout *mem_layout;
1659 const struct ath10k_mem_region *current_region;
1660 struct ath10k_dump_ram_data_hdr *hdr;
1661 u32 count, shift;
1662 size_t buf_len;
1663 int ret, i;
1664 u8 *buf;
1665
1666 lockdep_assert_held(&ar->dump_mutex);
1667
1668 if (!crash_data)
1669 return;
1670
1671 mem_layout = ath10k_coredump_get_mem_layout(ar);
1672 if (!mem_layout)
1673 return;
1674
1675 current_region = &mem_layout->region_table.regions[0];
1676
1677 buf = crash_data->ramdump_buf;
1678 buf_len = crash_data->ramdump_buf_len;
1679
1680 memset(buf, 0, buf_len);
1681
1682 for (i = 0; i < mem_layout->region_table.size; i++) {
1683 count = 0;
1684
1685 if (current_region->len > buf_len) {
1686 ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
1687 current_region->name,
1688 current_region->len,
1689 buf_len);
1690 break;
1691 }
1692
1693 /* To get IRAM dump, the host driver needs to switch target
1694 * ram config from DRAM to IRAM.
1695 */
1696 if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
1697 current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
1698 shift = current_region->start >> 20;
1699
1700 ret = ath10k_pci_set_ram_config(ar, shift);
1701 if (ret) {
1702 ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
1703 current_region->name, ret);
1704 break;
1705 }
1706 }
1707
1708 /* Reserve space for the header. */
1709 hdr = (void *)buf;
1710 buf += sizeof(*hdr);
1711 buf_len -= sizeof(*hdr);
1712
1713 switch (current_region->type) {
1714 case ATH10K_MEM_REGION_TYPE_IOSRAM:
1715 count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
1716 break;
1717 case ATH10K_MEM_REGION_TYPE_IOREG:
1718 ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
1719 if (ret < 0)
1720 break;
1721
1722 count = ret;
1723 break;
1724 default:
1725 ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
1726 if (ret < 0)
1727 break;
1728
1729 count = ret;
1730 break;
1731 }
1732
1733 hdr->region_type = cpu_to_le32(current_region->type);
1734 hdr->start = cpu_to_le32(current_region->start);
1735 hdr->length = cpu_to_le32(count);
1736
1737 if (count == 0)
1738 /* Note: the header remains, just with zero length. */
1739 break;
1740
1741 buf += count;
1742 buf_len -= count;
1743
1744 current_region++;
1745 }
1746}
1747
1748static void ath10k_pci_fw_dump_work(struct work_struct *work)
1749{
1750 struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
1751 dump_work);
1752 struct ath10k_fw_crash_data *crash_data;
1753 struct ath10k *ar = ar_pci->ar;
1754 char guid[UUID_STRING_LEN + 1];
1755
1756 mutex_lock(&ar->dump_mutex);
1757
1758 spin_lock_bh(&ar->data_lock);
1759 ar->stats.fw_crash_counter++;
1760 spin_unlock_bh(&ar->data_lock);
1761
1762 crash_data = ath10k_coredump_new(ar);
1763
1764 if (crash_data)
1765 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1766 else
1767 scnprintf(guid, sizeof(guid), "n/a");
1768
1769 ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1770 ath10k_print_driver_info(ar);
1771 ath10k_pci_dump_registers(ar, crash_data);
1772 ath10k_ce_dump_registers(ar, crash_data);
1773 ath10k_pci_dump_memory(ar, crash_data);
1774
1775 mutex_unlock(&ar->dump_mutex);
1776
1777 queue_work(ar->workqueue, &ar->restart_work);
1778}
1779
1780static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1781{
1782 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1783
1784 queue_work(ar->workqueue, &ar_pci->dump_work);
1785}
1786
1787void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1788 int force)
1789{
1790 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1791
1792 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1793
1794 if (!force) {
1795 int resources;
1796 /*
1797 * Decide whether to actually poll for completions, or just
1798 * wait for a later chance.
1799 * If there seem to be plenty of resources left, then just wait
1800 * since checking involves reading a CE register, which is a
1801 * relatively expensive operation.
1802 */
1803 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1804
1805 /*
1806 * If at least 50% of the total resources are still available,
1807 * don't bother checking again yet.
1808 */
1809 if (resources > (ar_pci->attr[pipe].src_nentries >> 1))
1810 return;
1811 }
1812 ath10k_ce_per_engine_service(ar, pipe);
1813}
1814
1815static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
1816{
1817 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1818
1819 del_timer_sync(&ar_pci->rx_post_retry);
1820}
1821
1822int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1823 u8 *ul_pipe, u8 *dl_pipe)
1824{
1825 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1826 const struct ce_service_to_pipe *entry;
1827 bool ul_set = false, dl_set = false;
1828 int i;
1829
1830 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1831
1832 for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) {
1833 entry = &ar_pci->serv_to_pipe[i];
1834
1835 if (__le32_to_cpu(entry->service_id) != service_id)
1836 continue;
1837
1838 switch (__le32_to_cpu(entry->pipedir)) {
1839 case PIPEDIR_NONE:
1840 break;
1841 case PIPEDIR_IN:
1842 WARN_ON(dl_set);
1843 *dl_pipe = __le32_to_cpu(entry->pipenum);
1844 dl_set = true;
1845 break;
1846 case PIPEDIR_OUT:
1847 WARN_ON(ul_set);
1848 *ul_pipe = __le32_to_cpu(entry->pipenum);
1849 ul_set = true;
1850 break;
1851 case PIPEDIR_INOUT:
1852 WARN_ON(dl_set);
1853 WARN_ON(ul_set);
1854 *dl_pipe = __le32_to_cpu(entry->pipenum);
1855 *ul_pipe = __le32_to_cpu(entry->pipenum);
1856 dl_set = true;
1857 ul_set = true;
1858 break;
1859 }
1860 }
1861
1862 if (!ul_set || !dl_set)
1863 return -ENOENT;
1864
1865 return 0;
1866}
1867
1868void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1869 u8 *ul_pipe, u8 *dl_pipe)
1870{
1871 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1872
1873 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1874 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1875 ul_pipe, dl_pipe);
1876}
1877
1878void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1879{
1880 u32 val;
1881
1882 switch (ar->hw_rev) {
1883 case ATH10K_HW_QCA988X:
1884 case ATH10K_HW_QCA9887:
1885 case ATH10K_HW_QCA6174:
1886 case ATH10K_HW_QCA9377:
1887 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1888 CORE_CTRL_ADDRESS);
1889 val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1890 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1891 CORE_CTRL_ADDRESS, val);
1892 break;
1893 case ATH10K_HW_QCA99X0:
1894 case ATH10K_HW_QCA9984:
1895 case ATH10K_HW_QCA9888:
1896 case ATH10K_HW_QCA4019:
1897 /* TODO: Find appropriate register configuration for QCA99X0
1898 * to mask irq/MSI.
1899 */
1900 break;
1901 case ATH10K_HW_WCN3990:
1902 break;
1903 }
1904}
1905
1906static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1907{
1908 u32 val;
1909
1910 switch (ar->hw_rev) {
1911 case ATH10K_HW_QCA988X:
1912 case ATH10K_HW_QCA9887:
1913 case ATH10K_HW_QCA6174:
1914 case ATH10K_HW_QCA9377:
1915 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1916 CORE_CTRL_ADDRESS);
1917 val |= CORE_CTRL_PCIE_REG_31_MASK;
1918 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1919 CORE_CTRL_ADDRESS, val);
1920 break;
1921 case ATH10K_HW_QCA99X0:
1922 case ATH10K_HW_QCA9984:
1923 case ATH10K_HW_QCA9888:
1924 case ATH10K_HW_QCA4019:
1925 /* TODO: Find appropriate register configuration for QCA99X0
1926 * to unmask irq/MSI.
1927 */
1928 break;
1929 case ATH10K_HW_WCN3990:
1930 break;
1931 }
1932}
1933
1934static void ath10k_pci_irq_disable(struct ath10k *ar)
1935{
1936 ath10k_ce_disable_interrupts(ar);
1937 ath10k_pci_disable_and_clear_legacy_irq(ar);
1938 ath10k_pci_irq_msi_fw_mask(ar);
1939}
1940
1941static void ath10k_pci_irq_sync(struct ath10k *ar)
1942{
1943 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1944
1945 synchronize_irq(ar_pci->pdev->irq);
1946}
1947
1948static void ath10k_pci_irq_enable(struct ath10k *ar)
1949{
1950 ath10k_ce_enable_interrupts(ar);
1951 ath10k_pci_enable_legacy_irq(ar);
1952 ath10k_pci_irq_msi_fw_unmask(ar);
1953}
1954
1955static int ath10k_pci_hif_start(struct ath10k *ar)
1956{
1957 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1958
1959 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1960
1961 napi_enable(&ar->napi);
1962
1963 ath10k_pci_irq_enable(ar);
1964 ath10k_pci_rx_post(ar);
1965
1966 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1967 ar_pci->link_ctl);
1968
1969 return 0;
1970}
1971
1972static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1973{
1974 struct ath10k *ar;
1975 struct ath10k_ce_pipe *ce_pipe;
1976 struct ath10k_ce_ring *ce_ring;
1977 struct sk_buff *skb;
1978 int i;
1979
1980 ar = pci_pipe->hif_ce_state;
1981 ce_pipe = pci_pipe->ce_hdl;
1982 ce_ring = ce_pipe->dest_ring;
1983
1984 if (!ce_ring)
1985 return;
1986
1987 if (!pci_pipe->buf_sz)
1988 return;
1989
1990 for (i = 0; i < ce_ring->nentries; i++) {
1991 skb = ce_ring->per_transfer_context[i];
1992 if (!skb)
1993 continue;
1994
1995 ce_ring->per_transfer_context[i] = NULL;
1996
1997 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1998 skb->len + skb_tailroom(skb),
1999 DMA_FROM_DEVICE);
2000 dev_kfree_skb_any(skb);
2001 }
2002}
2003
2004static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
2005{
2006 struct ath10k *ar;
2007 struct ath10k_ce_pipe *ce_pipe;
2008 struct ath10k_ce_ring *ce_ring;
2009 struct sk_buff *skb;
2010 int i;
2011
2012 ar = pci_pipe->hif_ce_state;
2013 ce_pipe = pci_pipe->ce_hdl;
2014 ce_ring = ce_pipe->src_ring;
2015
2016 if (!ce_ring)
2017 return;
2018
2019 if (!pci_pipe->buf_sz)
2020 return;
2021
2022 for (i = 0; i < ce_ring->nentries; i++) {
2023 skb = ce_ring->per_transfer_context[i];
2024 if (!skb)
2025 continue;
2026
2027 ce_ring->per_transfer_context[i] = NULL;
2028
2029 ath10k_htc_tx_completion_handler(ar, skb);
2030 }
2031}
2032
2033/*
2034 * Cleanup residual buffers for device shutdown:
2035 * buffers that were enqueued for receive
2036 * buffers that were to be sent
2037 * Note: Buffers that had completed but which were
2038 * not yet processed are on a completion queue. They
2039 * are handled when the completion thread shuts down.
2040 */
2041static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
2042{
2043 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2044 int pipe_num;
2045
2046 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
2047 struct ath10k_pci_pipe *pipe_info;
2048
2049 pipe_info = &ar_pci->pipe_info[pipe_num];
2050 ath10k_pci_rx_pipe_cleanup(pipe_info);
2051 ath10k_pci_tx_pipe_cleanup(pipe_info);
2052 }
2053}
2054
2055void ath10k_pci_ce_deinit(struct ath10k *ar)
2056{
2057 int i;
2058
2059 for (i = 0; i < CE_COUNT; i++)
2060 ath10k_ce_deinit_pipe(ar, i);
2061}
2062
2063void ath10k_pci_flush(struct ath10k *ar)
2064{
2065 ath10k_pci_rx_retry_sync(ar);
2066 ath10k_pci_buffer_cleanup(ar);
2067}
2068
2069static void ath10k_pci_hif_stop(struct ath10k *ar)
2070{
2071 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2072 unsigned long flags;
2073
2074 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
2075
2076 ath10k_pci_irq_disable(ar);
2077 ath10k_pci_irq_sync(ar);
2078 napi_synchronize(&ar->napi);
2079 napi_disable(&ar->napi);
2080 cancel_work_sync(&ar_pci->dump_work);
2081
2082 /* Most likely the device has HTT Rx ring configured. The only way to
2083 * prevent the device from accessing (and possible corrupting) host
2084 * memory is to reset the chip now.
2085 *
2086 * There's also no known way of masking MSI interrupts on the device.
2087 * For ranged MSI the CE-related interrupts can be masked. However
2088 * regardless how many MSI interrupts are assigned the first one
2089 * is always used for firmware indications (crashes) and cannot be
2090 * masked. To prevent the device from asserting the interrupt reset it
2091 * before proceeding with cleanup.
2092 */
2093 ath10k_pci_safe_chip_reset(ar);
2094
2095 ath10k_pci_flush(ar);
2096
2097 spin_lock_irqsave(&ar_pci->ps_lock, flags);
2098 WARN_ON(ar_pci->ps_wake_refcount > 0);
2099 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
2100}
2101
2102int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
2103 void *req, u32 req_len,
2104 void *resp, u32 *resp_len)
2105{
2106 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2107 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
2108 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
2109 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
2110 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
2111 dma_addr_t req_paddr = 0;
2112 dma_addr_t resp_paddr = 0;
2113 struct bmi_xfer xfer = {};
2114 void *treq, *tresp = NULL;
2115 int ret = 0;
2116
2117 might_sleep();
2118
2119 if (resp && !resp_len)
2120 return -EINVAL;
2121
2122 if (resp && resp_len && *resp_len == 0)
2123 return -EINVAL;
2124
2125 treq = kmemdup(req, req_len, GFP_KERNEL);
2126 if (!treq)
2127 return -ENOMEM;
2128
2129 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
2130 ret = dma_mapping_error(ar->dev, req_paddr);
2131 if (ret) {
2132 ret = -EIO;
2133 goto err_dma;
2134 }
2135
2136 if (resp && resp_len) {
2137 tresp = kzalloc(*resp_len, GFP_KERNEL);
2138 if (!tresp) {
2139 ret = -ENOMEM;
2140 goto err_req;
2141 }
2142
2143 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
2144 DMA_FROM_DEVICE);
2145 ret = dma_mapping_error(ar->dev, resp_paddr);
2146 if (ret) {
2147 ret = -EIO;
2148 goto err_req;
2149 }
2150
2151 xfer.wait_for_resp = true;
2152 xfer.resp_len = 0;
2153
2154 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
2155 }
2156
2157 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
2158 if (ret)
2159 goto err_resp;
2160
2161 ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
2162 if (ret) {
2163 dma_addr_t unused_buffer;
2164 unsigned int unused_nbytes;
2165 unsigned int unused_id;
2166
2167 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
2168 &unused_nbytes, &unused_id);
2169 } else {
2170 /* non-zero means we did not time out */
2171 ret = 0;
2172 }
2173
2174err_resp:
2175 if (resp) {
2176 dma_addr_t unused_buffer;
2177
2178 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
2179 dma_unmap_single(ar->dev, resp_paddr,
2180 *resp_len, DMA_FROM_DEVICE);
2181 }
2182err_req:
2183 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
2184
2185 if (ret == 0 && resp_len) {
2186 *resp_len = min(*resp_len, xfer.resp_len);
2187 memcpy(resp, tresp, xfer.resp_len);
2188 }
2189err_dma:
2190 kfree(treq);
2191 kfree(tresp);
2192
2193 return ret;
2194}
2195
2196static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
2197{
2198 struct bmi_xfer *xfer;
2199
2200 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
2201 return;
2202
2203 xfer->tx_done = true;
2204}
2205
2206static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
2207{
2208 struct ath10k *ar = ce_state->ar;
2209 struct bmi_xfer *xfer;
2210 unsigned int nbytes;
2211
2212 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
2213 &nbytes))
2214 return;
2215
2216 if (WARN_ON_ONCE(!xfer))
2217 return;
2218
2219 if (!xfer->wait_for_resp) {
2220 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
2221 return;
2222 }
2223
2224 xfer->resp_len = nbytes;
2225 xfer->rx_done = true;
2226}
2227
2228static int ath10k_pci_bmi_wait(struct ath10k *ar,
2229 struct ath10k_ce_pipe *tx_pipe,
2230 struct ath10k_ce_pipe *rx_pipe,
2231 struct bmi_xfer *xfer)
2232{
2233 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
2234 unsigned long started = jiffies;
2235 unsigned long dur;
2236 int ret;
2237
2238 while (time_before_eq(jiffies, timeout)) {
2239 ath10k_pci_bmi_send_done(tx_pipe);
2240 ath10k_pci_bmi_recv_data(rx_pipe);
2241
2242 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
2243 ret = 0;
2244 goto out;
2245 }
2246
2247 schedule();
2248 }
2249
2250 ret = -ETIMEDOUT;
2251
2252out:
2253 dur = jiffies - started;
2254 if (dur > HZ)
2255 ath10k_dbg(ar, ATH10K_DBG_BMI,
2256 "bmi cmd took %lu jiffies hz %d ret %d\n",
2257 dur, HZ, ret);
2258 return ret;
2259}
2260
2261/*
2262 * Send an interrupt to the device to wake up the Target CPU
2263 * so it has an opportunity to notice any changed state.
2264 */
2265static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
2266{
2267 u32 addr, val;
2268
2269 addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
2270 val = ath10k_pci_read32(ar, addr);
2271 val |= CORE_CTRL_CPU_INTR_MASK;
2272 ath10k_pci_write32(ar, addr, val);
2273
2274 return 0;
2275}
2276
2277static int ath10k_pci_get_num_banks(struct ath10k *ar)
2278{
2279 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2280
2281 switch (ar_pci->pdev->device) {
2282 case QCA988X_2_0_DEVICE_ID_UBNT:
2283 case QCA988X_2_0_DEVICE_ID:
2284 case QCA99X0_2_0_DEVICE_ID:
2285 case QCA9888_2_0_DEVICE_ID:
2286 case QCA9984_1_0_DEVICE_ID:
2287 case QCA9887_1_0_DEVICE_ID:
2288 return 1;
2289 case QCA6164_2_1_DEVICE_ID:
2290 case QCA6174_2_1_DEVICE_ID:
2291 switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) {
2292 case QCA6174_HW_1_0_CHIP_ID_REV:
2293 case QCA6174_HW_1_1_CHIP_ID_REV:
2294 case QCA6174_HW_2_1_CHIP_ID_REV:
2295 case QCA6174_HW_2_2_CHIP_ID_REV:
2296 return 3;
2297 case QCA6174_HW_1_3_CHIP_ID_REV:
2298 return 2;
2299 case QCA6174_HW_3_0_CHIP_ID_REV:
2300 case QCA6174_HW_3_1_CHIP_ID_REV:
2301 case QCA6174_HW_3_2_CHIP_ID_REV:
2302 return 9;
2303 }
2304 break;
2305 case QCA9377_1_0_DEVICE_ID:
2306 return 9;
2307 }
2308
2309 ath10k_warn(ar, "unknown number of banks, assuming 1\n");
2310 return 1;
2311}
2312
2313static int ath10k_bus_get_num_banks(struct ath10k *ar)
2314{
2315 struct ath10k_ce *ce = ath10k_ce_priv(ar);
2316
2317 return ce->bus_ops->get_num_banks(ar);
2318}
2319
2320int ath10k_pci_init_config(struct ath10k *ar)
2321{
2322 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2323 u32 interconnect_targ_addr;
2324 u32 pcie_state_targ_addr = 0;
2325 u32 pipe_cfg_targ_addr = 0;
2326 u32 svc_to_pipe_map = 0;
2327 u32 pcie_config_flags = 0;
2328 u32 ealloc_value;
2329 u32 ealloc_targ_addr;
2330 u32 flag2_value;
2331 u32 flag2_targ_addr;
2332 int ret = 0;
2333
2334 /* Download to Target the CE Config and the service-to-CE map */
2335 interconnect_targ_addr =
2336 host_interest_item_address(HI_ITEM(hi_interconnect_state));
2337
2338 /* Supply Target-side CE configuration */
2339 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2340 &pcie_state_targ_addr);
2341 if (ret != 0) {
2342 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
2343 return ret;
2344 }
2345
2346 if (pcie_state_targ_addr == 0) {
2347 ret = -EIO;
2348 ath10k_err(ar, "Invalid pcie state addr\n");
2349 return ret;
2350 }
2351
2352 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2353 offsetof(struct pcie_state,
2354 pipe_cfg_addr)),
2355 &pipe_cfg_targ_addr);
2356 if (ret != 0) {
2357 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2358 return ret;
2359 }
2360
2361 if (pipe_cfg_targ_addr == 0) {
2362 ret = -EIO;
2363 ath10k_err(ar, "Invalid pipe cfg addr\n");
2364 return ret;
2365 }
2366
2367 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2368 ar_pci->pipe_config,
2369 sizeof(struct ce_pipe_config) *
2370 NUM_TARGET_CE_CONFIG_WLAN);
2371
2372 if (ret != 0) {
2373 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2374 return ret;
2375 }
2376
2377 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2378 offsetof(struct pcie_state,
2379 svc_to_pipe_map)),
2380 &svc_to_pipe_map);
2381 if (ret != 0) {
2382 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2383 return ret;
2384 }
2385
2386 if (svc_to_pipe_map == 0) {
2387 ret = -EIO;
2388 ath10k_err(ar, "Invalid svc_to_pipe map\n");
2389 return ret;
2390 }
2391
2392 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2393 ar_pci->serv_to_pipe,
2394 sizeof(pci_target_service_to_ce_map_wlan));
2395 if (ret != 0) {
2396 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2397 return ret;
2398 }
2399
2400 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2401 offsetof(struct pcie_state,
2402 config_flags)),
2403 &pcie_config_flags);
2404 if (ret != 0) {
2405 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2406 return ret;
2407 }
2408
2409 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2410
2411 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2412 offsetof(struct pcie_state,
2413 config_flags)),
2414 pcie_config_flags);
2415 if (ret != 0) {
2416 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2417 return ret;
2418 }
2419
2420 /* configure early allocation */
2421 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2422
2423 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2424 if (ret != 0) {
2425 ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
2426 return ret;
2427 }
2428
2429 /* first bank is switched to IRAM */
2430 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2431 HI_EARLY_ALLOC_MAGIC_MASK);
2432 ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2433 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2434 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2435
2436 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2437 if (ret != 0) {
2438 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2439 return ret;
2440 }
2441
2442 /* Tell Target to proceed with initialization */
2443 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2444
2445 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2446 if (ret != 0) {
2447 ath10k_err(ar, "Failed to get option val: %d\n", ret);
2448 return ret;
2449 }
2450
2451 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2452
2453 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2454 if (ret != 0) {
2455 ath10k_err(ar, "Failed to set option val: %d\n", ret);
2456 return ret;
2457 }
2458
2459 return 0;
2460}
2461
2462static void ath10k_pci_override_ce_config(struct ath10k *ar)
2463{
2464 struct ce_attr *attr;
2465 struct ce_pipe_config *config;
2466 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2467
2468 /* For QCA6174 we're overriding the Copy Engine 5 configuration,
2469 * since it is currently used for other feature.
2470 */
2471
2472 /* Override Host's Copy Engine 5 configuration */
2473 attr = &ar_pci->attr[5];
2474 attr->src_sz_max = 0;
2475 attr->dest_nentries = 0;
2476
2477 /* Override Target firmware's Copy Engine configuration */
2478 config = &ar_pci->pipe_config[5];
2479 config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2480 config->nbytes_max = __cpu_to_le32(2048);
2481
2482 /* Map from service/endpoint to Copy Engine */
2483 ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1);
2484}
2485
2486int ath10k_pci_alloc_pipes(struct ath10k *ar)
2487{
2488 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2489 struct ath10k_pci_pipe *pipe;
2490 struct ath10k_ce *ce = ath10k_ce_priv(ar);
2491 int i, ret;
2492
2493 for (i = 0; i < CE_COUNT; i++) {
2494 pipe = &ar_pci->pipe_info[i];
2495 pipe->ce_hdl = &ce->ce_states[i];
2496 pipe->pipe_num = i;
2497 pipe->hif_ce_state = ar;
2498
2499 ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]);
2500 if (ret) {
2501 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2502 i, ret);
2503 return ret;
2504 }
2505
2506 /* Last CE is Diagnostic Window */
2507 if (i == CE_DIAG_PIPE) {
2508 ar_pci->ce_diag = pipe->ce_hdl;
2509 continue;
2510 }
2511
2512 pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max);
2513 }
2514
2515 return 0;
2516}
2517
2518void ath10k_pci_free_pipes(struct ath10k *ar)
2519{
2520 int i;
2521
2522 for (i = 0; i < CE_COUNT; i++)
2523 ath10k_ce_free_pipe(ar, i);
2524}
2525
2526int ath10k_pci_init_pipes(struct ath10k *ar)
2527{
2528 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2529 int i, ret;
2530
2531 for (i = 0; i < CE_COUNT; i++) {
2532 ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]);
2533 if (ret) {
2534 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2535 i, ret);
2536 return ret;
2537 }
2538 }
2539
2540 return 0;
2541}
2542
2543static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2544{
2545 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2546 FW_IND_EVENT_PENDING;
2547}
2548
2549static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2550{
2551 u32 val;
2552
2553 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2554 val &= ~FW_IND_EVENT_PENDING;
2555 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2556}
2557
2558static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2559{
2560 u32 val;
2561
2562 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2563 return (val == 0xffffffff);
2564}
2565
2566/* this function effectively clears target memory controller assert line */
2567static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2568{
2569 u32 val;
2570
2571 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2572 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2573 val | SOC_RESET_CONTROL_SI0_RST_MASK);
2574 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2575
2576 msleep(10);
2577
2578 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2579 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2580 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2581 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2582
2583 msleep(10);
2584}
2585
2586static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2587{
2588 u32 val;
2589
2590 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2591
2592 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2593 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2594 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2595}
2596
2597static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2598{
2599 u32 val;
2600
2601 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2602
2603 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2604 val | SOC_RESET_CONTROL_CE_RST_MASK);
2605 msleep(10);
2606 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2607 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2608}
2609
2610static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2611{
2612 u32 val;
2613
2614 val = ath10k_pci_soc_read32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS);
2615 ath10k_pci_soc_write32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS,
2616 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2617}
2618
2619static int ath10k_pci_warm_reset(struct ath10k *ar)
2620{
2621 int ret;
2622
2623 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2624
2625 spin_lock_bh(&ar->data_lock);
2626 ar->stats.fw_warm_reset_counter++;
2627 spin_unlock_bh(&ar->data_lock);
2628
2629 ath10k_pci_irq_disable(ar);
2630
2631 /* Make sure the target CPU is not doing anything dangerous, e.g. if it
2632 * were to access copy engine while host performs copy engine reset
2633 * then it is possible for the device to confuse pci-e controller to
2634 * the point of bringing host system to a complete stop (i.e. hang).
2635 */
2636 ath10k_pci_warm_reset_si0(ar);
2637 ath10k_pci_warm_reset_cpu(ar);
2638 ath10k_pci_init_pipes(ar);
2639 ath10k_pci_wait_for_target_init(ar);
2640
2641 ath10k_pci_warm_reset_clear_lf(ar);
2642 ath10k_pci_warm_reset_ce(ar);
2643 ath10k_pci_warm_reset_cpu(ar);
2644 ath10k_pci_init_pipes(ar);
2645
2646 ret = ath10k_pci_wait_for_target_init(ar);
2647 if (ret) {
2648 ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2649 return ret;
2650 }
2651
2652 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2653
2654 return 0;
2655}
2656
2657static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2658{
2659 ath10k_pci_irq_disable(ar);
2660 return ath10k_pci_qca99x0_chip_reset(ar);
2661}
2662
2663static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2664{
2665 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2666
2667 if (!ar_pci->pci_soft_reset)
2668 return -ENOTSUPP;
2669
2670 return ar_pci->pci_soft_reset(ar);
2671}
2672
2673static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2674{
2675 int i, ret;
2676 u32 val;
2677
2678 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2679
2680 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2681 * It is thus preferred to use warm reset which is safer but may not be
2682 * able to recover the device from all possible fail scenarios.
2683 *
2684 * Warm reset doesn't always work on first try so attempt it a few
2685 * times before giving up.
2686 */
2687 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2688 ret = ath10k_pci_warm_reset(ar);
2689 if (ret) {
2690 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2691 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2692 ret);
2693 continue;
2694 }
2695
2696 /* FIXME: Sometimes copy engine doesn't recover after warm
2697 * reset. In most cases this needs cold reset. In some of these
2698 * cases the device is in such a state that a cold reset may
2699 * lock up the host.
2700 *
2701 * Reading any host interest register via copy engine is
2702 * sufficient to verify if device is capable of booting
2703 * firmware blob.
2704 */
2705 ret = ath10k_pci_init_pipes(ar);
2706 if (ret) {
2707 ath10k_warn(ar, "failed to init copy engine: %d\n",
2708 ret);
2709 continue;
2710 }
2711
2712 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2713 &val);
2714 if (ret) {
2715 ath10k_warn(ar, "failed to poke copy engine: %d\n",
2716 ret);
2717 continue;
2718 }
2719
2720 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2721 return 0;
2722 }
2723
2724 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2725 ath10k_warn(ar, "refusing cold reset as requested\n");
2726 return -EPERM;
2727 }
2728
2729 ret = ath10k_pci_cold_reset(ar);
2730 if (ret) {
2731 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2732 return ret;
2733 }
2734
2735 ret = ath10k_pci_wait_for_target_init(ar);
2736 if (ret) {
2737 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2738 ret);
2739 return ret;
2740 }
2741
2742 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2743
2744 return 0;
2745}
2746
2747static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2748{
2749 int ret;
2750
2751 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2752
2753 /* FIXME: QCA6174 requires cold + warm reset to work. */
2754
2755 ret = ath10k_pci_cold_reset(ar);
2756 if (ret) {
2757 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2758 return ret;
2759 }
2760
2761 ret = ath10k_pci_wait_for_target_init(ar);
2762 if (ret) {
2763 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2764 ret);
2765 return ret;
2766 }
2767
2768 ret = ath10k_pci_warm_reset(ar);
2769 if (ret) {
2770 ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2771 return ret;
2772 }
2773
2774 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2775
2776 return 0;
2777}
2778
2779static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2780{
2781 int ret;
2782
2783 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2784
2785 ret = ath10k_pci_cold_reset(ar);
2786 if (ret) {
2787 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2788 return ret;
2789 }
2790
2791 ret = ath10k_pci_wait_for_target_init(ar);
2792 if (ret) {
2793 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2794 ret);
2795 return ret;
2796 }
2797
2798 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2799
2800 return 0;
2801}
2802
2803static int ath10k_pci_chip_reset(struct ath10k *ar)
2804{
2805 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2806
2807 if (WARN_ON(!ar_pci->pci_hard_reset))
2808 return -ENOTSUPP;
2809
2810 return ar_pci->pci_hard_reset(ar);
2811}
2812
2813static int ath10k_pci_hif_power_up(struct ath10k *ar,
2814 enum ath10k_firmware_mode fw_mode)
2815{
2816 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2817 int ret;
2818
2819 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2820
2821 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2822 &ar_pci->link_ctl);
2823 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2824 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2825
2826 /*
2827 * Bring the target up cleanly.
2828 *
2829 * The target may be in an undefined state with an AUX-powered Target
2830 * and a Host in WoW mode. If the Host crashes, loses power, or is
2831 * restarted (without unloading the driver) then the Target is left
2832 * (aux) powered and running. On a subsequent driver load, the Target
2833 * is in an unexpected state. We try to catch that here in order to
2834 * reset the Target and retry the probe.
2835 */
2836 ret = ath10k_pci_chip_reset(ar);
2837 if (ret) {
2838 if (ath10k_pci_has_fw_crashed(ar)) {
2839 ath10k_warn(ar, "firmware crashed during chip reset\n");
2840 ath10k_pci_fw_crashed_clear(ar);
2841 ath10k_pci_fw_crashed_dump(ar);
2842 }
2843
2844 ath10k_err(ar, "failed to reset chip: %d\n", ret);
2845 goto err_sleep;
2846 }
2847
2848 ret = ath10k_pci_init_pipes(ar);
2849 if (ret) {
2850 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2851 goto err_sleep;
2852 }
2853
2854 ret = ath10k_pci_init_config(ar);
2855 if (ret) {
2856 ath10k_err(ar, "failed to setup init config: %d\n", ret);
2857 goto err_ce;
2858 }
2859
2860 ret = ath10k_pci_wake_target_cpu(ar);
2861 if (ret) {
2862 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2863 goto err_ce;
2864 }
2865
2866 return 0;
2867
2868err_ce:
2869 ath10k_pci_ce_deinit(ar);
2870
2871err_sleep:
2872 return ret;
2873}
2874
2875void ath10k_pci_hif_power_down(struct ath10k *ar)
2876{
2877 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2878
2879 /* Currently hif_power_up performs effectively a reset and hif_stop
2880 * resets the chip as well so there's no point in resetting here.
2881 */
2882}
2883
2884static int ath10k_pci_hif_suspend(struct ath10k *ar)
2885{
2886 /* Nothing to do; the important stuff is in the driver suspend. */
2887 return 0;
2888}
2889
2890static int ath10k_pci_suspend(struct ath10k *ar)
2891{
2892 /* The grace timer can still be counting down and ar->ps_awake be true.
2893 * It is known that the device may be asleep after resuming regardless
2894 * of the SoC powersave state before suspending. Hence make sure the
2895 * device is asleep before proceeding.
2896 */
2897 ath10k_pci_sleep_sync(ar);
2898
2899 return 0;
2900}
2901
2902static int ath10k_pci_hif_resume(struct ath10k *ar)
2903{
2904 /* Nothing to do; the important stuff is in the driver resume. */
2905 return 0;
2906}
2907
2908static int ath10k_pci_resume(struct ath10k *ar)
2909{
2910 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2911 struct pci_dev *pdev = ar_pci->pdev;
2912 u32 val;
2913 int ret = 0;
2914
2915 ret = ath10k_pci_force_wake(ar);
2916 if (ret) {
2917 ath10k_err(ar, "failed to wake up target: %d\n", ret);
2918 return ret;
2919 }
2920
2921 /* Suspend/Resume resets the PCI configuration space, so we have to
2922 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2923 * from interfering with C3 CPU state. pci_restore_state won't help
2924 * here since it only restores the first 64 bytes pci config header.
2925 */
2926 pci_read_config_dword(pdev, 0x40, &val);
2927 if ((val & 0x0000ff00) != 0)
2928 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2929
2930 return ret;
2931}
2932
2933static bool ath10k_pci_validate_cal(void *data, size_t size)
2934{
2935 __le16 *cal_words = data;
2936 u16 checksum = 0;
2937 size_t i;
2938
2939 if (size % 2 != 0)
2940 return false;
2941
2942 for (i = 0; i < size / 2; i++)
2943 checksum ^= le16_to_cpu(cal_words[i]);
2944
2945 return checksum == 0xffff;
2946}
2947
2948static void ath10k_pci_enable_eeprom(struct ath10k *ar)
2949{
2950 /* Enable SI clock */
2951 ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
2952
2953 /* Configure GPIOs for I2C operation */
2954 ath10k_pci_write32(ar,
2955 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2956 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
2957 SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
2958 GPIO_PIN0_CONFIG) |
2959 SM(1, GPIO_PIN0_PAD_PULL));
2960
2961 ath10k_pci_write32(ar,
2962 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2963 4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
2964 SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
2965 SM(1, GPIO_PIN0_PAD_PULL));
2966
2967 ath10k_pci_write32(ar,
2968 GPIO_BASE_ADDRESS +
2969 QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
2970 1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
2971
2972 /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
2973 ath10k_pci_write32(ar,
2974 SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
2975 SM(1, SI_CONFIG_ERR_INT) |
2976 SM(1, SI_CONFIG_BIDIR_OD_DATA) |
2977 SM(1, SI_CONFIG_I2C) |
2978 SM(1, SI_CONFIG_POS_SAMPLE) |
2979 SM(1, SI_CONFIG_INACTIVE_DATA) |
2980 SM(1, SI_CONFIG_INACTIVE_CLK) |
2981 SM(8, SI_CONFIG_DIVIDER));
2982}
2983
2984static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
2985{
2986 u32 reg;
2987 int wait_limit;
2988
2989 /* set device select byte and for the read operation */
2990 reg = QCA9887_EEPROM_SELECT_READ |
2991 SM(addr, QCA9887_EEPROM_ADDR_LO) |
2992 SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
2993 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
2994
2995 /* write transmit data, transfer length, and START bit */
2996 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
2997 SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
2998 SM(4, SI_CS_TX_CNT));
2999
3000 /* wait max 1 sec */
3001 wait_limit = 100000;
3002
3003 /* wait for SI_CS_DONE_INT */
3004 do {
3005 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
3006 if (MS(reg, SI_CS_DONE_INT))
3007 break;
3008
3009 wait_limit--;
3010 udelay(10);
3011 } while (wait_limit > 0);
3012
3013 if (!MS(reg, SI_CS_DONE_INT)) {
3014 ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
3015 addr);
3016 return -ETIMEDOUT;
3017 }
3018
3019 /* clear SI_CS_DONE_INT */
3020 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
3021
3022 if (MS(reg, SI_CS_DONE_ERR)) {
3023 ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
3024 return -EIO;
3025 }
3026
3027 /* extract receive data */
3028 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
3029 *out = reg;
3030
3031 return 0;
3032}
3033
3034static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
3035 size_t *data_len)
3036{
3037 u8 *caldata = NULL;
3038 size_t calsize, i;
3039 int ret;
3040
3041 if (!QCA_REV_9887(ar))
3042 return -EOPNOTSUPP;
3043
3044 calsize = ar->hw_params.cal_data_len;
3045 caldata = kmalloc(calsize, GFP_KERNEL);
3046 if (!caldata)
3047 return -ENOMEM;
3048
3049 ath10k_pci_enable_eeprom(ar);
3050
3051 for (i = 0; i < calsize; i++) {
3052 ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
3053 if (ret)
3054 goto err_free;
3055 }
3056
3057 if (!ath10k_pci_validate_cal(caldata, calsize))
3058 goto err_free;
3059
3060 *data = caldata;
3061 *data_len = calsize;
3062
3063 return 0;
3064
3065err_free:
3066 kfree(caldata);
3067
3068 return -EINVAL;
3069}
3070
3071static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
3072 .tx_sg = ath10k_pci_hif_tx_sg,
3073 .diag_read = ath10k_pci_hif_diag_read,
3074 .diag_write = ath10k_pci_diag_write_mem,
3075 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
3076 .start = ath10k_pci_hif_start,
3077 .stop = ath10k_pci_hif_stop,
3078 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
3079 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
3080 .send_complete_check = ath10k_pci_hif_send_complete_check,
3081 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
3082 .power_up = ath10k_pci_hif_power_up,
3083 .power_down = ath10k_pci_hif_power_down,
3084 .read32 = ath10k_pci_read32,
3085 .write32 = ath10k_pci_write32,
3086 .suspend = ath10k_pci_hif_suspend,
3087 .resume = ath10k_pci_hif_resume,
3088 .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
3089};
3090
3091/*
3092 * Top-level interrupt handler for all PCI interrupts from a Target.
3093 * When a block of MSI interrupts is allocated, this top-level handler
3094 * is not used; instead, we directly call the correct sub-handler.
3095 */
3096static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
3097{
3098 struct ath10k *ar = arg;
3099 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3100 int ret;
3101
3102 if (ath10k_pci_has_device_gone(ar))
3103 return IRQ_NONE;
3104
3105 ret = ath10k_pci_force_wake(ar);
3106 if (ret) {
3107 ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
3108 return IRQ_NONE;
3109 }
3110
3111 if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
3112 !ath10k_pci_irq_pending(ar))
3113 return IRQ_NONE;
3114
3115 ath10k_pci_disable_and_clear_legacy_irq(ar);
3116 ath10k_pci_irq_msi_fw_mask(ar);
3117 napi_schedule(&ar->napi);
3118
3119 return IRQ_HANDLED;
3120}
3121
3122static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
3123{
3124 struct ath10k *ar = container_of(ctx, struct ath10k, napi);
3125 int done = 0;
3126
3127 if (ath10k_pci_has_fw_crashed(ar)) {
3128 ath10k_pci_fw_crashed_clear(ar);
3129 ath10k_pci_fw_crashed_dump(ar);
3130 napi_complete(ctx);
3131 return done;
3132 }
3133
3134 ath10k_ce_per_engine_service_any(ar);
3135
3136 done = ath10k_htt_txrx_compl_task(ar, budget);
3137
3138 if (done < budget) {
3139 napi_complete_done(ctx, done);
3140 /* In case of MSI, it is possible that interrupts are received
3141 * while NAPI poll is inprogress. So pending interrupts that are
3142 * received after processing all copy engine pipes by NAPI poll
3143 * will not be handled again. This is causing failure to
3144 * complete boot sequence in x86 platform. So before enabling
3145 * interrupts safer to check for pending interrupts for
3146 * immediate servicing.
3147 */
3148 if (ath10k_ce_interrupt_summary(ar)) {
3149 napi_reschedule(ctx);
3150 goto out;
3151 }
3152 ath10k_pci_enable_legacy_irq(ar);
3153 ath10k_pci_irq_msi_fw_unmask(ar);
3154 }
3155
3156out:
3157 return done;
3158}
3159
3160static int ath10k_pci_request_irq_msi(struct ath10k *ar)
3161{
3162 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3163 int ret;
3164
3165 ret = request_irq(ar_pci->pdev->irq,
3166 ath10k_pci_interrupt_handler,
3167 IRQF_SHARED, "ath10k_pci", ar);
3168 if (ret) {
3169 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
3170 ar_pci->pdev->irq, ret);
3171 return ret;
3172 }
3173
3174 return 0;
3175}
3176
3177static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
3178{
3179 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3180 int ret;
3181
3182 ret = request_irq(ar_pci->pdev->irq,
3183 ath10k_pci_interrupt_handler,
3184 IRQF_SHARED, "ath10k_pci", ar);
3185 if (ret) {
3186 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
3187 ar_pci->pdev->irq, ret);
3188 return ret;
3189 }
3190
3191 return 0;
3192}
3193
3194static int ath10k_pci_request_irq(struct ath10k *ar)
3195{
3196 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3197
3198 switch (ar_pci->oper_irq_mode) {
3199 case ATH10K_PCI_IRQ_LEGACY:
3200 return ath10k_pci_request_irq_legacy(ar);
3201 case ATH10K_PCI_IRQ_MSI:
3202 return ath10k_pci_request_irq_msi(ar);
3203 default:
3204 return -EINVAL;
3205 }
3206}
3207
3208static void ath10k_pci_free_irq(struct ath10k *ar)
3209{
3210 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3211
3212 free_irq(ar_pci->pdev->irq, ar);
3213}
3214
3215void ath10k_pci_init_napi(struct ath10k *ar)
3216{
3217 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
3218 ATH10K_NAPI_BUDGET);
3219}
3220
3221static int ath10k_pci_init_irq(struct ath10k *ar)
3222{
3223 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3224 int ret;
3225
3226 ath10k_pci_init_napi(ar);
3227
3228 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
3229 ath10k_info(ar, "limiting irq mode to: %d\n",
3230 ath10k_pci_irq_mode);
3231
3232 /* Try MSI */
3233 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
3234 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
3235 ret = pci_enable_msi(ar_pci->pdev);
3236 if (ret == 0)
3237 return 0;
3238
3239 /* fall-through */
3240 }
3241
3242 /* Try legacy irq
3243 *
3244 * A potential race occurs here: The CORE_BASE write
3245 * depends on target correctly decoding AXI address but
3246 * host won't know when target writes BAR to CORE_CTRL.
3247 * This write might get lost if target has NOT written BAR.
3248 * For now, fix the race by repeating the write in below
3249 * synchronization checking.
3250 */
3251 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
3252
3253 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3254 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3255
3256 return 0;
3257}
3258
3259static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
3260{
3261 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3262 0);
3263}
3264
3265static int ath10k_pci_deinit_irq(struct ath10k *ar)
3266{
3267 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3268
3269 switch (ar_pci->oper_irq_mode) {
3270 case ATH10K_PCI_IRQ_LEGACY:
3271 ath10k_pci_deinit_irq_legacy(ar);
3272 break;
3273 default:
3274 pci_disable_msi(ar_pci->pdev);
3275 break;
3276 }
3277
3278 return 0;
3279}
3280
3281int ath10k_pci_wait_for_target_init(struct ath10k *ar)
3282{
3283 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3284 unsigned long timeout;
3285 u32 val;
3286
3287 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
3288
3289 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
3290
3291 do {
3292 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
3293
3294 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
3295 val);
3296
3297 /* target should never return this */
3298 if (val == 0xffffffff)
3299 continue;
3300
3301 /* the device has crashed so don't bother trying anymore */
3302 if (val & FW_IND_EVENT_PENDING)
3303 break;
3304
3305 if (val & FW_IND_INITIALIZED)
3306 break;
3307
3308 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
3309 /* Fix potential race by repeating CORE_BASE writes */
3310 ath10k_pci_enable_legacy_irq(ar);
3311
3312 mdelay(10);
3313 } while (time_before(jiffies, timeout));
3314
3315 ath10k_pci_disable_and_clear_legacy_irq(ar);
3316 ath10k_pci_irq_msi_fw_mask(ar);
3317
3318 if (val == 0xffffffff) {
3319 ath10k_err(ar, "failed to read device register, device is gone\n");
3320 return -EIO;
3321 }
3322
3323 if (val & FW_IND_EVENT_PENDING) {
3324 ath10k_warn(ar, "device has crashed during init\n");
3325 return -ECOMM;
3326 }
3327
3328 if (!(val & FW_IND_INITIALIZED)) {
3329 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
3330 val);
3331 return -ETIMEDOUT;
3332 }
3333
3334 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
3335 return 0;
3336}
3337
3338static int ath10k_pci_cold_reset(struct ath10k *ar)
3339{
3340 u32 val;
3341
3342 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
3343
3344 spin_lock_bh(&ar->data_lock);
3345
3346 ar->stats.fw_cold_reset_counter++;
3347
3348 spin_unlock_bh(&ar->data_lock);
3349
3350 /* Put Target, including PCIe, into RESET. */
3351 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
3352 val |= 1;
3353 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3354
3355 /* After writing into SOC_GLOBAL_RESET to put device into
3356 * reset and pulling out of reset pcie may not be stable
3357 * for any immediate pcie register access and cause bus error,
3358 * add delay before any pcie access request to fix this issue.
3359 */
3360 msleep(20);
3361
3362 /* Pull Target, including PCIe, out of RESET. */
3363 val &= ~1;
3364 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3365
3366 msleep(20);
3367
3368 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
3369
3370 return 0;
3371}
3372
3373static int ath10k_pci_claim(struct ath10k *ar)
3374{
3375 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3376 struct pci_dev *pdev = ar_pci->pdev;
3377 int ret;
3378
3379 pci_set_drvdata(pdev, ar);
3380
3381 ret = pci_enable_device(pdev);
3382 if (ret) {
3383 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
3384 return ret;
3385 }
3386
3387 ret = pci_request_region(pdev, BAR_NUM, "ath");
3388 if (ret) {
3389 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
3390 ret);
3391 goto err_device;
3392 }
3393
3394 /* Target expects 32 bit DMA. Enforce it. */
3395 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3396 if (ret) {
3397 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
3398 goto err_region;
3399 }
3400
3401 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3402 if (ret) {
3403 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
3404 ret);
3405 goto err_region;
3406 }
3407
3408 pci_set_master(pdev);
3409
3410 /* Arrange for access to Target SoC registers. */
3411 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
3412 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3413 if (!ar_pci->mem) {
3414 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
3415 ret = -EIO;
3416 goto err_master;
3417 }
3418
3419 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
3420 return 0;
3421
3422err_master:
3423 pci_clear_master(pdev);
3424
3425err_region:
3426 pci_release_region(pdev, BAR_NUM);
3427
3428err_device:
3429 pci_disable_device(pdev);
3430
3431 return ret;
3432}
3433
3434static void ath10k_pci_release(struct ath10k *ar)
3435{
3436 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3437 struct pci_dev *pdev = ar_pci->pdev;
3438
3439 pci_iounmap(pdev, ar_pci->mem);
3440 pci_release_region(pdev, BAR_NUM);
3441 pci_clear_master(pdev);
3442 pci_disable_device(pdev);
3443}
3444
3445static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3446{
3447 const struct ath10k_pci_supp_chip *supp_chip;
3448 int i;
3449 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3450
3451 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3452 supp_chip = &ath10k_pci_supp_chips[i];
3453
3454 if (supp_chip->dev_id == dev_id &&
3455 supp_chip->rev_id == rev_id)
3456 return true;
3457 }
3458
3459 return false;
3460}
3461
3462int ath10k_pci_setup_resource(struct ath10k *ar)
3463{
3464 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3465 struct ath10k_ce *ce = ath10k_ce_priv(ar);
3466 int ret;
3467
3468 spin_lock_init(&ce->ce_lock);
3469 spin_lock_init(&ar_pci->ps_lock);
3470 mutex_init(&ar_pci->ce_diag_mutex);
3471
3472 INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
3473
3474 timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
3475
3476 ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
3477 sizeof(pci_host_ce_config_wlan),
3478 GFP_KERNEL);
3479 if (!ar_pci->attr)
3480 return -ENOMEM;
3481
3482 ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
3483 sizeof(pci_target_ce_config_wlan),
3484 GFP_KERNEL);
3485 if (!ar_pci->pipe_config) {
3486 ret = -ENOMEM;
3487 goto err_free_attr;
3488 }
3489
3490 ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
3491 sizeof(pci_target_service_to_ce_map_wlan),
3492 GFP_KERNEL);
3493 if (!ar_pci->serv_to_pipe) {
3494 ret = -ENOMEM;
3495 goto err_free_pipe_config;
3496 }
3497
3498 if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
3499 ath10k_pci_override_ce_config(ar);
3500
3501 ret = ath10k_pci_alloc_pipes(ar);
3502 if (ret) {
3503 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3504 ret);
3505 goto err_free_serv_to_pipe;
3506 }
3507
3508 return 0;
3509
3510err_free_serv_to_pipe:
3511 kfree(ar_pci->serv_to_pipe);
3512err_free_pipe_config:
3513 kfree(ar_pci->pipe_config);
3514err_free_attr:
3515 kfree(ar_pci->attr);
3516 return ret;
3517}
3518
3519void ath10k_pci_release_resource(struct ath10k *ar)
3520{
3521 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3522
3523 ath10k_pci_rx_retry_sync(ar);
3524 netif_napi_del(&ar->napi);
3525 ath10k_pci_ce_deinit(ar);
3526 ath10k_pci_free_pipes(ar);
3527 kfree(ar_pci->attr);
3528 kfree(ar_pci->pipe_config);
3529 kfree(ar_pci->serv_to_pipe);
3530}
3531
3532static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3533 .read32 = ath10k_bus_pci_read32,
3534 .write32 = ath10k_bus_pci_write32,
3535 .get_num_banks = ath10k_pci_get_num_banks,
3536};
3537
3538static int ath10k_pci_probe(struct pci_dev *pdev,
3539 const struct pci_device_id *pci_dev)
3540{
3541 int ret = 0;
3542 struct ath10k *ar;
3543 struct ath10k_pci *ar_pci;
3544 enum ath10k_hw_rev hw_rev;
3545 struct ath10k_bus_params bus_params = {};
3546 bool pci_ps, is_qca988x = false;
3547 int (*pci_soft_reset)(struct ath10k *ar);
3548 int (*pci_hard_reset)(struct ath10k *ar);
3549 u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
3550
3551 switch (pci_dev->device) {
3552 case QCA988X_2_0_DEVICE_ID_UBNT:
3553 case QCA988X_2_0_DEVICE_ID:
3554 hw_rev = ATH10K_HW_QCA988X;
3555 pci_ps = false;
3556 is_qca988x = true;
3557 pci_soft_reset = ath10k_pci_warm_reset;
3558 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3559 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3560 break;
3561 case QCA9887_1_0_DEVICE_ID:
3562 hw_rev = ATH10K_HW_QCA9887;
3563 pci_ps = false;
3564 pci_soft_reset = ath10k_pci_warm_reset;
3565 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3566 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3567 break;
3568 case QCA6164_2_1_DEVICE_ID:
3569 case QCA6174_2_1_DEVICE_ID:
3570 hw_rev = ATH10K_HW_QCA6174;
3571 pci_ps = true;
3572 pci_soft_reset = ath10k_pci_warm_reset;
3573 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3574 targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3575 break;
3576 case QCA99X0_2_0_DEVICE_ID:
3577 hw_rev = ATH10K_HW_QCA99X0;
3578 pci_ps = false;
3579 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3580 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3581 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3582 break;
3583 case QCA9984_1_0_DEVICE_ID:
3584 hw_rev = ATH10K_HW_QCA9984;
3585 pci_ps = false;
3586 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3587 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3588 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3589 break;
3590 case QCA9888_2_0_DEVICE_ID:
3591 hw_rev = ATH10K_HW_QCA9888;
3592 pci_ps = false;
3593 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3594 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3595 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3596 break;
3597 case QCA9377_1_0_DEVICE_ID:
3598 hw_rev = ATH10K_HW_QCA9377;
3599 pci_ps = true;
3600 pci_soft_reset = ath10k_pci_warm_reset;
3601 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3602 targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3603 break;
3604 default:
3605 WARN_ON(1);
3606 return -ENOTSUPP;
3607 }
3608
3609 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3610 hw_rev, &ath10k_pci_hif_ops);
3611 if (!ar) {
3612 dev_err(&pdev->dev, "failed to allocate core\n");
3613 return -ENOMEM;
3614 }
3615
3616 ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3617 pdev->vendor, pdev->device,
3618 pdev->subsystem_vendor, pdev->subsystem_device);
3619
3620 ar_pci = ath10k_pci_priv(ar);
3621 ar_pci->pdev = pdev;
3622 ar_pci->dev = &pdev->dev;
3623 ar_pci->ar = ar;
3624 ar->dev_id = pci_dev->device;
3625 ar_pci->pci_ps = pci_ps;
3626 ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
3627 ar_pci->pci_soft_reset = pci_soft_reset;
3628 ar_pci->pci_hard_reset = pci_hard_reset;
3629 ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
3630 ar->ce_priv = &ar_pci->ce;
3631
3632 ar->id.vendor = pdev->vendor;
3633 ar->id.device = pdev->device;
3634 ar->id.subsystem_vendor = pdev->subsystem_vendor;
3635 ar->id.subsystem_device = pdev->subsystem_device;
3636
3637 timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
3638
3639 ret = ath10k_pci_setup_resource(ar);
3640 if (ret) {
3641 ath10k_err(ar, "failed to setup resource: %d\n", ret);
3642 goto err_core_destroy;
3643 }
3644
3645 ret = ath10k_pci_claim(ar);
3646 if (ret) {
3647 ath10k_err(ar, "failed to claim device: %d\n", ret);
3648 goto err_free_pipes;
3649 }
3650
3651 ret = ath10k_pci_force_wake(ar);
3652 if (ret) {
3653 ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3654 goto err_sleep;
3655 }
3656
3657 ath10k_pci_ce_deinit(ar);
3658 ath10k_pci_irq_disable(ar);
3659
3660 ret = ath10k_pci_init_irq(ar);
3661 if (ret) {
3662 ath10k_err(ar, "failed to init irqs: %d\n", ret);
3663 goto err_sleep;
3664 }
3665
3666 ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3667 ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3668 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3669
3670 ret = ath10k_pci_request_irq(ar);
3671 if (ret) {
3672 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3673 goto err_deinit_irq;
3674 }
3675
3676 bus_params.dev_type = ATH10K_DEV_TYPE_LL;
3677 bus_params.link_can_suspend = true;
3678 /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that
3679 * fall off the bus during chip_reset. These chips have the same pci
3680 * device id as the QCA9880 BR4A or 2R4E. So that's why the check.
3681 */
3682 if (is_qca988x) {
3683 bus_params.chip_id =
3684 ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3685 if (bus_params.chip_id != 0xffffffff) {
3686 if (!ath10k_pci_chip_is_supported(pdev->device,
3687 bus_params.chip_id))
3688 goto err_unsupported;
3689 }
3690 }
3691
3692 ret = ath10k_pci_chip_reset(ar);
3693 if (ret) {
3694 ath10k_err(ar, "failed to reset chip: %d\n", ret);
3695 goto err_free_irq;
3696 }
3697
3698 bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3699 if (bus_params.chip_id == 0xffffffff)
3700 goto err_unsupported;
3701
3702 if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id))
3703 goto err_free_irq;
3704
3705 ret = ath10k_core_register(ar, &bus_params);
3706 if (ret) {
3707 ath10k_err(ar, "failed to register driver core: %d\n", ret);
3708 goto err_free_irq;
3709 }
3710
3711 return 0;
3712
3713err_unsupported:
3714 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3715 pdev->device, bus_params.chip_id);
3716
3717err_free_irq:
3718 ath10k_pci_free_irq(ar);
3719
3720err_deinit_irq:
3721 ath10k_pci_release_resource(ar);
3722
3723err_sleep:
3724 ath10k_pci_sleep_sync(ar);
3725 ath10k_pci_release(ar);
3726
3727err_free_pipes:
3728 ath10k_pci_free_pipes(ar);
3729
3730err_core_destroy:
3731 ath10k_core_destroy(ar);
3732
3733 return ret;
3734}
3735
3736static void ath10k_pci_remove(struct pci_dev *pdev)
3737{
3738 struct ath10k *ar = pci_get_drvdata(pdev);
3739
3740 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3741
3742 if (!ar)
3743 return;
3744
3745 ath10k_core_unregister(ar);
3746 ath10k_pci_free_irq(ar);
3747 ath10k_pci_deinit_irq(ar);
3748 ath10k_pci_release_resource(ar);
3749 ath10k_pci_sleep_sync(ar);
3750 ath10k_pci_release(ar);
3751 ath10k_core_destroy(ar);
3752}
3753
3754MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3755
3756static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
3757{
3758 struct ath10k *ar = dev_get_drvdata(dev);
3759 int ret;
3760
3761 ret = ath10k_pci_suspend(ar);
3762 if (ret)
3763 ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
3764
3765 return ret;
3766}
3767
3768static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
3769{
3770 struct ath10k *ar = dev_get_drvdata(dev);
3771 int ret;
3772
3773 ret = ath10k_pci_resume(ar);
3774 if (ret)
3775 ath10k_warn(ar, "failed to resume hif: %d\n", ret);
3776
3777 return ret;
3778}
3779
3780static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3781 ath10k_pci_pm_suspend,
3782 ath10k_pci_pm_resume);
3783
3784static struct pci_driver ath10k_pci_driver = {
3785 .name = "ath10k_pci",
3786 .id_table = ath10k_pci_id_table,
3787 .probe = ath10k_pci_probe,
3788 .remove = ath10k_pci_remove,
3789#ifdef CONFIG_PM
3790 .driver.pm = &ath10k_pci_pm_ops,
3791#endif
3792};
3793
3794static int __init ath10k_pci_init(void)
3795{
3796 int ret;
3797
3798 ret = pci_register_driver(&ath10k_pci_driver);
3799 if (ret)
3800 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3801 ret);
3802
3803 ret = ath10k_ahb_init();
3804 if (ret)
3805 printk(KERN_ERR "ahb init failed: %d\n", ret);
3806
3807 return ret;
3808}
3809module_init(ath10k_pci_init);
3810
3811static void __exit ath10k_pci_exit(void)
3812{
3813 pci_unregister_driver(&ath10k_pci_driver);
3814 ath10k_ahb_exit();
3815}
3816
3817module_exit(ath10k_pci_exit);
3818
3819MODULE_AUTHOR("Qualcomm Atheros");
3820MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
3821MODULE_LICENSE("Dual BSD/GPL");
3822
3823/* QCA988x 2.0 firmware files */
3824MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3825MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3826MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3827MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3828MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3829MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3830
3831/* QCA9887 1.0 firmware files */
3832MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3833MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3834MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3835
3836/* QCA6174 2.1 firmware files */
3837MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3838MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3839MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3840MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3841
3842/* QCA6174 3.1 firmware files */
3843MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3844MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3845MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3846MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3847MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3848
3849/* QCA9377 1.0 firmware files */
3850MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3851MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3852MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);