Loading...
1/*
2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/export.h>
18#include "ath9k.h"
19#include "reg.h"
20#include "reg_wow.h"
21#include "hw-ops.h"
22
23static void ath9k_hw_set_sta_powersave(struct ath_hw *ah)
24{
25 if (!ath9k_hw_mci_is_enabled(ah))
26 goto set;
27 /*
28 * If MCI is being used, set PWR_SAV only when MCI's
29 * PS state is disabled.
30 */
31 if (ar9003_mci_state(ah, MCI_STATE_GET_WLAN_PS_STATE) != MCI_PS_DISABLE)
32 return;
33set:
34 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
35}
36
37static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
38{
39 struct ath_common *common = ath9k_hw_common(ah);
40
41 ath9k_hw_set_sta_powersave(ah);
42
43 /* set rx disable bit */
44 REG_WRITE(ah, AR_CR, AR_CR_RXD);
45
46 if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE(ah), 0, AH_WAIT_TIMEOUT)) {
47 ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
48 REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
49 return;
50 }
51
52 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
53 if (!REG_READ(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL))
54 REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
55 } else if (AR_SREV_9485(ah)){
56 if (!(REG_READ(ah, AR_NDP2_TIMER_MODE) &
57 AR_GEN_TIMERS2_MODE_ENABLE_MASK))
58 REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
59 }
60
61 if (ath9k_hw_mci_is_enabled(ah))
62 REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
63
64 REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_ON_INT);
65}
66
67static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
68{
69 struct ath_common *common = ath9k_hw_common(ah);
70 u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN];
71 u32 ctl[13] = {0};
72 u32 data_word[KAL_NUM_DATA_WORDS];
73 u8 i;
74 u32 wow_ka_data_word0;
75
76 memcpy(sta_mac_addr, common->macaddr, ETH_ALEN);
77 memcpy(ap_mac_addr, common->curbssid, ETH_ALEN);
78
79 /* set the transmit buffer */
80 ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
81 ctl[1] = 0;
82 ctl[4] = 0;
83 ctl[7] = (ah->txchainmask) << 2;
84 ctl[2] = 0xf << 16; /* tx_tries 0 */
85
86 if (IS_CHAN_2GHZ(ah->curchan))
87 ctl[3] = 0x1b; /* CCK_1M */
88 else
89 ctl[3] = 0xb; /* OFDM_6M */
90
91 for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
92 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
93
94 data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
95 (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
96 data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
97 (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
98 data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) |
99 (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
100 data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) |
101 (sta_mac_addr[3] << 8) | (sta_mac_addr[2]);
102 data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
103 (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
104 data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
105
106 if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565(ah)) {
107 /*
108 * AR9462 2.0 and AR9565 have an extra descriptor word
109 * (time based discard) compared to other chips.
110 */
111 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
112 wow_ka_data_word0 = AR_WOW_TXBUF(13);
113 } else {
114 wow_ka_data_word0 = AR_WOW_TXBUF(12);
115 }
116
117 for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
118 REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
119}
120
121int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
122 u8 *user_mask, int pattern_count,
123 int pattern_len)
124{
125 int i;
126 u32 pattern_val, mask_val;
127 u32 set, clr;
128
129 if (pattern_count >= ah->wow.max_patterns)
130 return -ENOSPC;
131
132 if (pattern_count < MAX_NUM_PATTERN_LEGACY)
133 REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
134 else
135 REG_SET_BIT(ah, AR_MAC_PCU_WOW4, BIT(pattern_count - 8));
136
137 for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
138 memcpy(&pattern_val, user_pattern, 4);
139 REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
140 pattern_val);
141 user_pattern += 4;
142 }
143
144 for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
145 memcpy(&mask_val, user_mask, 4);
146 REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
147 user_mask += 4;
148 }
149
150 if (pattern_count < MAX_NUM_PATTERN_LEGACY)
151 ah->wow.wow_event_mask |=
152 BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
153 else
154 ah->wow.wow_event_mask2 |=
155 BIT((pattern_count - 8) + AR_WOW_PAT_FOUND_SHIFT);
156
157 if (pattern_count < 4) {
158 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
159 AR_WOW_LEN1_SHIFT(pattern_count);
160 clr = AR_WOW_LENGTH1_MASK(pattern_count);
161 REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
162 } else if (pattern_count < 8) {
163 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
164 AR_WOW_LEN2_SHIFT(pattern_count);
165 clr = AR_WOW_LENGTH2_MASK(pattern_count);
166 REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
167 } else if (pattern_count < 12) {
168 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
169 AR_WOW_LEN3_SHIFT(pattern_count);
170 clr = AR_WOW_LENGTH3_MASK(pattern_count);
171 REG_RMW(ah, AR_WOW_LENGTH3, set, clr);
172 } else if (pattern_count < MAX_NUM_PATTERN) {
173 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
174 AR_WOW_LEN4_SHIFT(pattern_count);
175 clr = AR_WOW_LENGTH4_MASK(pattern_count);
176 REG_RMW(ah, AR_WOW_LENGTH4, set, clr);
177 }
178
179 return 0;
180}
181EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
182
183u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
184{
185 u32 wow_status = 0;
186 u32 val = 0, rval;
187
188 /*
189 * Read the WoW status register to know
190 * the wakeup reason.
191 */
192 rval = REG_READ(ah, AR_WOW_PATTERN);
193 val = AR_WOW_STATUS(rval);
194
195 /*
196 * Mask only the WoW events that we have enabled. Sometimes
197 * we have spurious WoW events from the AR_WOW_PATTERN
198 * register. This mask will clean it up.
199 */
200 val &= ah->wow.wow_event_mask;
201
202 if (val) {
203 if (val & AR_WOW_MAGIC_PAT_FOUND)
204 wow_status |= AH_WOW_MAGIC_PATTERN_EN;
205 if (AR_WOW_PATTERN_FOUND(val))
206 wow_status |= AH_WOW_USER_PATTERN_EN;
207 if (val & AR_WOW_KEEP_ALIVE_FAIL)
208 wow_status |= AH_WOW_LINK_CHANGE;
209 if (val & AR_WOW_BEACON_FAIL)
210 wow_status |= AH_WOW_BEACON_MISS;
211 }
212
213 rval = REG_READ(ah, AR_MAC_PCU_WOW4);
214 val = AR_WOW_STATUS2(rval);
215 val &= ah->wow.wow_event_mask2;
216
217 if (val) {
218 if (AR_WOW2_PATTERN_FOUND(val))
219 wow_status |= AH_WOW_USER_PATTERN_EN;
220 }
221
222 /*
223 * set and clear WOW_PME_CLEAR registers for the chip to
224 * generate next wow signal.
225 * disable D3 before accessing other registers ?
226 */
227
228 /* do we need to check the bit value 0x01000000 (7-10) ?? */
229 REG_RMW(ah, AR_PCIE_PM_CTRL(ah), AR_PMCTRL_WOW_PME_CLR,
230 AR_PMCTRL_PWR_STATE_D1D3);
231
232 /*
233 * Clear all events.
234 */
235 REG_WRITE(ah, AR_WOW_PATTERN,
236 AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
237 REG_WRITE(ah, AR_MAC_PCU_WOW4,
238 AR_WOW_CLEAR_EVENTS2(REG_READ(ah, AR_MAC_PCU_WOW4)));
239
240 /*
241 * restore the beacon threshold to init value
242 */
243 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
244
245 /*
246 * Restore the way the PCI-E reset, Power-On-Reset, external
247 * PCIE_POR_SHORT pins are tied to its original value.
248 * Previously just before WoW sleep, we untie the PCI-E
249 * reset to our Chip's Power On Reset so that any PCI-E
250 * reset from the bus will not reset our chip
251 */
252 if (ah->is_pciexpress)
253 ath9k_hw_configpcipowersave(ah, false);
254
255 if (AR_SREV_9462(ah) || AR_SREV_9565(ah) || AR_SREV_9485(ah)) {
256 u32 dc = REG_READ(ah, AR_DIRECT_CONNECT);
257
258 if (!(dc & AR_DC_TSF2_ENABLE))
259 ath9k_hw_gen_timer_start_tsf2(ah);
260 }
261
262 ah->wow.wow_event_mask = 0;
263 ah->wow.wow_event_mask2 = 0;
264
265 return wow_status;
266}
267EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
268
269static void ath9k_hw_wow_set_arwr_reg(struct ath_hw *ah)
270{
271 u32 wa_reg;
272
273 if (!ah->is_pciexpress)
274 return;
275
276 /*
277 * We need to untie the internal POR (power-on-reset)
278 * to the external PCI-E reset. We also need to tie
279 * the PCI-E Phy reset to the PCI-E reset.
280 */
281 wa_reg = REG_READ(ah, AR_WA(ah));
282 wa_reg &= ~AR_WA_UNTIE_RESET_EN;
283 wa_reg |= AR_WA_RESET_EN;
284 wa_reg |= AR_WA_POR_SHORT;
285
286 REG_WRITE(ah, AR_WA(ah), wa_reg);
287}
288
289void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
290{
291 u32 wow_event_mask;
292 u32 keep_alive, magic_pattern, host_pm_ctrl;
293
294 wow_event_mask = ah->wow.wow_event_mask;
295
296 /*
297 * AR_PMCTRL_HOST_PME_EN - Override PME enable in configuration
298 * space and allow MAC to generate WoW anyway.
299 *
300 * AR_PMCTRL_PWR_PM_CTRL_ENA - ???
301 *
302 * AR_PMCTRL_AUX_PWR_DET - PCI core SYS_AUX_PWR_DET signal,
303 * needs to be set for WoW in PCI mode.
304 *
305 * AR_PMCTRL_WOW_PME_CLR - WoW Clear Signal going to the MAC.
306 *
307 * Set the power states appropriately and enable PME.
308 *
309 * Set and clear WOW_PME_CLEAR for the chip
310 * to generate next wow signal.
311 */
312 REG_SET_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PMCTRL_HOST_PME_EN |
313 AR_PMCTRL_PWR_PM_CTRL_ENA |
314 AR_PMCTRL_AUX_PWR_DET |
315 AR_PMCTRL_WOW_PME_CLR);
316 REG_CLR_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PMCTRL_WOW_PME_CLR);
317
318 /*
319 * Random Backoff.
320 *
321 * 31:28 in AR_WOW_PATTERN : Indicates the number of bits used in the
322 * contention window. For value N,
323 * the random backoff will be selected between
324 * 0 and (2 ^ N) - 1.
325 */
326 REG_SET_BIT(ah, AR_WOW_PATTERN,
327 AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF));
328
329 /*
330 * AIFS time, Slot time, Keep Alive count.
331 */
332 REG_SET_BIT(ah, AR_WOW_COUNT, AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
333 AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
334 AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT));
335 /*
336 * Beacon timeout.
337 */
338 if (pattern_enable & AH_WOW_BEACON_MISS)
339 REG_WRITE(ah, AR_WOW_BCN_TIMO, AR_WOW_BEACON_TIMO);
340 else
341 REG_WRITE(ah, AR_WOW_BCN_TIMO, AR_WOW_BEACON_TIMO_MAX);
342
343 /*
344 * Keep alive timeout in ms.
345 */
346 if (!pattern_enable)
347 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, AR_WOW_KEEP_ALIVE_NEVER);
348 else
349 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, KAL_TIMEOUT * 32);
350
351 /*
352 * Keep alive delay in us.
353 */
354 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, KAL_DELAY * 1000);
355
356 /*
357 * Create keep alive pattern to respond to beacons.
358 */
359 ath9k_wow_create_keep_alive_pattern(ah);
360
361 /*
362 * Configure keep alive register.
363 */
364 keep_alive = REG_READ(ah, AR_WOW_KEEP_ALIVE);
365
366 /* Send keep alive timeouts anyway */
367 keep_alive &= ~AR_WOW_KEEP_ALIVE_AUTO_DIS;
368
369 if (pattern_enable & AH_WOW_LINK_CHANGE) {
370 keep_alive &= ~AR_WOW_KEEP_ALIVE_FAIL_DIS;
371 wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
372 } else {
373 keep_alive |= AR_WOW_KEEP_ALIVE_FAIL_DIS;
374 }
375
376 REG_WRITE(ah, AR_WOW_KEEP_ALIVE, keep_alive);
377
378 /*
379 * We are relying on a bmiss failure, ensure we have
380 * enough threshold to prevent false positives.
381 */
382 REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
383 AR_WOW_BMISSTHRESHOLD);
384
385 if (pattern_enable & AH_WOW_BEACON_MISS) {
386 wow_event_mask |= AR_WOW_BEACON_FAIL;
387 REG_SET_BIT(ah, AR_WOW_BCN_EN, AR_WOW_BEACON_FAIL_EN);
388 } else {
389 REG_CLR_BIT(ah, AR_WOW_BCN_EN, AR_WOW_BEACON_FAIL_EN);
390 }
391
392 /*
393 * Enable the magic packet registers.
394 */
395 magic_pattern = REG_READ(ah, AR_WOW_PATTERN);
396 magic_pattern |= AR_WOW_MAC_INTR_EN;
397
398 if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
399 magic_pattern |= AR_WOW_MAGIC_EN;
400 wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
401 } else {
402 magic_pattern &= ~AR_WOW_MAGIC_EN;
403 }
404
405 REG_WRITE(ah, AR_WOW_PATTERN, magic_pattern);
406
407 /*
408 * Enable pattern matching for packets which are less
409 * than 256 bytes.
410 */
411 REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
412 AR_WOW_PATTERN_SUPPORTED);
413
414 /*
415 * Set the power states appropriately and enable PME.
416 */
417 host_pm_ctrl = REG_READ(ah, AR_PCIE_PM_CTRL(ah));
418 host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3 |
419 AR_PMCTRL_HOST_PME_EN |
420 AR_PMCTRL_PWR_PM_CTRL_ENA;
421 host_pm_ctrl &= ~AR_PCIE_PM_CTRL_ENA;
422
423 if (AR_SREV_9462(ah)) {
424 /*
425 * This is needed to prevent the chip waking up
426 * the host within 3-4 seconds with certain
427 * platform/BIOS.
428 */
429 host_pm_ctrl &= ~AR_PMCTRL_PWR_STATE_D1D3;
430 host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3_REAL;
431 }
432
433 REG_WRITE(ah, AR_PCIE_PM_CTRL(ah), host_pm_ctrl);
434
435 /*
436 * Enable sequence number generation when asleep.
437 */
438 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
439
440 /* To bring down WOW power low margin */
441 REG_SET_BIT(ah, AR_PCIE_PHY_REG3, BIT(13));
442
443 ath9k_hw_wow_set_arwr_reg(ah);
444
445 if (ath9k_hw_mci_is_enabled(ah))
446 REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
447
448 /* HW WoW */
449 REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, BIT(5));
450
451 ath9k_hw_set_powermode_wow_sleep(ah);
452 ah->wow.wow_event_mask = wow_event_mask;
453}
454EXPORT_SYMBOL(ath9k_hw_wow_enable);
1/*
2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/export.h>
18#include "ath9k.h"
19#include "reg.h"
20#include "hw-ops.h"
21
22const char *ath9k_hw_wow_event_to_string(u32 wow_event)
23{
24 if (wow_event & AH_WOW_MAGIC_PATTERN_EN)
25 return "Magic pattern";
26 if (wow_event & AH_WOW_USER_PATTERN_EN)
27 return "User pattern";
28 if (wow_event & AH_WOW_LINK_CHANGE)
29 return "Link change";
30 if (wow_event & AH_WOW_BEACON_MISS)
31 return "Beacon miss";
32
33 return "unknown reason";
34}
35EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
36
37static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
38{
39 struct ath_common *common = ath9k_hw_common(ah);
40
41 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
42
43 /* set rx disable bit */
44 REG_WRITE(ah, AR_CR, AR_CR_RXD);
45
46 if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
47 ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
48 REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
49 return;
50 }
51
52 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
53}
54
55static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
56{
57 struct ath_common *common = ath9k_hw_common(ah);
58 u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN];
59 u32 ctl[13] = {0};
60 u32 data_word[KAL_NUM_DATA_WORDS];
61 u8 i;
62 u32 wow_ka_data_word0;
63
64 memcpy(sta_mac_addr, common->macaddr, ETH_ALEN);
65 memcpy(ap_mac_addr, common->curbssid, ETH_ALEN);
66
67 /* set the transmit buffer */
68 ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
69 ctl[1] = 0;
70 ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */
71 ctl[4] = 0;
72 ctl[7] = (ah->txchainmask) << 2;
73 ctl[2] = 0xf << 16; /* tx_tries 0 */
74
75 for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
76 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
77
78 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
79
80 data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
81 (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
82 data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
83 (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
84 data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) |
85 (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
86 data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) |
87 (sta_mac_addr[3] << 8) | (sta_mac_addr[2]);
88 data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
89 (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
90 data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
91
92 if (AR_SREV_9462_20(ah)) {
93 /* AR9462 2.0 has an extra descriptor word (time based
94 * discard) compared to other chips */
95 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
96 wow_ka_data_word0 = AR_WOW_TXBUF(13);
97 } else {
98 wow_ka_data_word0 = AR_WOW_TXBUF(12);
99 }
100
101 for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
102 REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
103
104}
105
106void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
107 u8 *user_mask, int pattern_count,
108 int pattern_len)
109{
110 int i;
111 u32 pattern_val, mask_val;
112 u32 set, clr;
113
114 /* FIXME: should check count by querying the hardware capability */
115 if (pattern_count >= MAX_NUM_PATTERN)
116 return;
117
118 REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
119
120 /* set the registers for pattern */
121 for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
122 memcpy(&pattern_val, user_pattern, 4);
123 REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
124 pattern_val);
125 user_pattern += 4;
126 }
127
128 /* set the registers for mask */
129 for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
130 memcpy(&mask_val, user_mask, 4);
131 REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
132 user_mask += 4;
133 }
134
135 /* set the pattern length to be matched
136 *
137 * AR_WOW_LENGTH1_REG1
138 * bit 31:24 pattern 0 length
139 * bit 23:16 pattern 1 length
140 * bit 15:8 pattern 2 length
141 * bit 7:0 pattern 3 length
142 *
143 * AR_WOW_LENGTH1_REG2
144 * bit 31:24 pattern 4 length
145 * bit 23:16 pattern 5 length
146 * bit 15:8 pattern 6 length
147 * bit 7:0 pattern 7 length
148 *
149 * the below logic writes out the new
150 * pattern length for the corresponding
151 * pattern_count, while masking out the
152 * other fields
153 */
154
155 ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
156
157 if (pattern_count < 4) {
158 /* Pattern 0-3 uses AR_WOW_LENGTH1 register */
159 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
160 AR_WOW_LEN1_SHIFT(pattern_count);
161 clr = AR_WOW_LENGTH1_MASK(pattern_count);
162 REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
163 } else {
164 /* Pattern 4-7 uses AR_WOW_LENGTH2 register */
165 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
166 AR_WOW_LEN2_SHIFT(pattern_count);
167 clr = AR_WOW_LENGTH2_MASK(pattern_count);
168 REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
169 }
170
171}
172EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
173
174u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
175{
176 u32 wow_status = 0;
177 u32 val = 0, rval;
178
179 /*
180 * read the WoW status register to know
181 * the wakeup reason
182 */
183 rval = REG_READ(ah, AR_WOW_PATTERN);
184 val = AR_WOW_STATUS(rval);
185
186 /*
187 * mask only the WoW events that we have enabled. Sometimes
188 * we have spurious WoW events from the AR_WOW_PATTERN
189 * register. This mask will clean it up.
190 */
191
192 val &= ah->wow_event_mask;
193
194 if (val) {
195 if (val & AR_WOW_MAGIC_PAT_FOUND)
196 wow_status |= AH_WOW_MAGIC_PATTERN_EN;
197 if (AR_WOW_PATTERN_FOUND(val))
198 wow_status |= AH_WOW_USER_PATTERN_EN;
199 if (val & AR_WOW_KEEP_ALIVE_FAIL)
200 wow_status |= AH_WOW_LINK_CHANGE;
201 if (val & AR_WOW_BEACON_FAIL)
202 wow_status |= AH_WOW_BEACON_MISS;
203 }
204
205 /*
206 * set and clear WOW_PME_CLEAR registers for the chip to
207 * generate next wow signal.
208 * disable D3 before accessing other registers ?
209 */
210
211 /* do we need to check the bit value 0x01000000 (7-10) ?? */
212 REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
213 AR_PMCTRL_PWR_STATE_D1D3);
214
215 /*
216 * clear all events
217 */
218 REG_WRITE(ah, AR_WOW_PATTERN,
219 AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
220
221 /*
222 * restore the beacon threshold to init value
223 */
224 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
225
226 /*
227 * Restore the way the PCI-E reset, Power-On-Reset, external
228 * PCIE_POR_SHORT pins are tied to its original value.
229 * Previously just before WoW sleep, we untie the PCI-E
230 * reset to our Chip's Power On Reset so that any PCI-E
231 * reset from the bus will not reset our chip
232 */
233 if (ah->is_pciexpress)
234 ath9k_hw_configpcipowersave(ah, false);
235
236 ah->wow_event_mask = 0;
237
238 return wow_status;
239}
240EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
241
242void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
243{
244 u32 wow_event_mask;
245 u32 set, clr;
246
247 /*
248 * wow_event_mask is a mask to the AR_WOW_PATTERN register to
249 * indicate which WoW events we have enabled. The WoW events
250 * are from the 'pattern_enable' in this function and
251 * 'pattern_count' of ath9k_hw_wow_apply_pattern()
252 */
253 wow_event_mask = ah->wow_event_mask;
254
255 /*
256 * Untie Power-on-Reset from the PCI-E-Reset. When we are in
257 * WOW sleep, we do want the Reset from the PCI-E to disturb
258 * our hw state
259 */
260 if (ah->is_pciexpress) {
261 /*
262 * we need to untie the internal POR (power-on-reset)
263 * to the external PCI-E reset. We also need to tie
264 * the PCI-E Phy reset to the PCI-E reset.
265 */
266 set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
267 clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
268 REG_RMW(ah, AR_WA, set, clr);
269 }
270
271 /*
272 * set the power states appropriately and enable PME
273 */
274 set = AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA |
275 AR_PMCTRL_AUX_PWR_DET | AR_PMCTRL_WOW_PME_CLR;
276
277 /*
278 * set and clear WOW_PME_CLEAR registers for the chip
279 * to generate next wow signal.
280 */
281 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
282 clr = AR_PMCTRL_WOW_PME_CLR;
283 REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
284
285 /*
286 * Setup for:
287 * - beacon misses
288 * - magic pattern
289 * - keep alive timeout
290 * - pattern matching
291 */
292
293 /*
294 * Program default values for pattern backoff, aifs/slot/KAL count,
295 * beacon miss timeout, KAL timeout, etc.
296 */
297 set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
298 REG_SET_BIT(ah, AR_WOW_PATTERN, set);
299
300 set = AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
301 AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
302 AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT);
303 REG_SET_BIT(ah, AR_WOW_COUNT, set);
304
305 if (pattern_enable & AH_WOW_BEACON_MISS)
306 set = AR_WOW_BEACON_TIMO;
307 /* We are not using beacon miss, program a large value */
308 else
309 set = AR_WOW_BEACON_TIMO_MAX;
310
311 REG_WRITE(ah, AR_WOW_BCN_TIMO, set);
312
313 /*
314 * Keep alive timo in ms except AR9280
315 */
316 if (!pattern_enable)
317 set = AR_WOW_KEEP_ALIVE_NEVER;
318 else
319 set = KAL_TIMEOUT * 32;
320
321 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, set);
322
323 /*
324 * Keep alive delay in us. based on 'power on clock',
325 * therefore in usec
326 */
327 set = KAL_DELAY * 1000;
328 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, set);
329
330 /*
331 * Create keep alive pattern to respond to beacons
332 */
333 ath9k_wow_create_keep_alive_pattern(ah);
334
335 /*
336 * Configure MAC WoW Registers
337 */
338 set = 0;
339 /* Send keep alive timeouts anyway */
340 clr = AR_WOW_KEEP_ALIVE_AUTO_DIS;
341
342 if (pattern_enable & AH_WOW_LINK_CHANGE)
343 wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
344 else
345 set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
346
347 set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
348 REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
349
350 /*
351 * we are relying on a bmiss failure. ensure we have
352 * enough threshold to prevent false positives
353 */
354 REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
355 AR_WOW_BMISSTHRESHOLD);
356
357 set = 0;
358 clr = 0;
359
360 if (pattern_enable & AH_WOW_BEACON_MISS) {
361 set = AR_WOW_BEACON_FAIL_EN;
362 wow_event_mask |= AR_WOW_BEACON_FAIL;
363 } else {
364 clr = AR_WOW_BEACON_FAIL_EN;
365 }
366
367 REG_RMW(ah, AR_WOW_BCN_EN, set, clr);
368
369 set = 0;
370 clr = 0;
371 /*
372 * Enable the magic packet registers
373 */
374 if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
375 set = AR_WOW_MAGIC_EN;
376 wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
377 } else {
378 clr = AR_WOW_MAGIC_EN;
379 }
380 set |= AR_WOW_MAC_INTR_EN;
381 REG_RMW(ah, AR_WOW_PATTERN, set, clr);
382
383 REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
384 AR_WOW_PATTERN_SUPPORTED);
385
386 /*
387 * Set the power states appropriately and enable PME
388 */
389 clr = 0;
390 set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN |
391 AR_PMCTRL_PWR_PM_CTRL_ENA;
392
393 clr = AR_PCIE_PM_CTRL_ENA;
394 REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
395
396 /*
397 * this is needed to prevent the chip waking up
398 * the host within 3-4 seconds with certain
399 * platform/BIOS. The fix is to enable
400 * D1 & D3 to match original definition and
401 * also match the OTP value. Anyway this
402 * is more related to SW WOW.
403 */
404 clr = AR_PMCTRL_PWR_STATE_D1D3;
405 REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
406
407 set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
408 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
409
410 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
411
412 /* to bring down WOW power low margin */
413 set = BIT(13);
414 REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
415 /* HW WoW */
416 clr = BIT(5);
417 REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
418
419 ath9k_hw_set_powermode_wow_sleep(ah);
420 ah->wow_event_mask = wow_event_mask;
421}
422EXPORT_SYMBOL(ath9k_hw_wow_enable);