Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: BSD-3-Clause-Clear
  2/*
  3 * Copyright (c) 2020 The Linux Foundation. All rights reserved.
  4 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5 */
  6
  7#include <linux/delay.h>
  8
  9#include "mac.h"
 10
 11#include <net/mac80211.h>
 12#include "core.h"
 13#include "hif.h"
 14#include "debug.h"
 15#include "wmi.h"
 16#include "wow.h"
 17#include "dp_rx.h"
 18
 19static const struct wiphy_wowlan_support ath11k_wowlan_support = {
 20	.flags = WIPHY_WOWLAN_DISCONNECT |
 21		 WIPHY_WOWLAN_MAGIC_PKT |
 22		 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
 23		 WIPHY_WOWLAN_GTK_REKEY_FAILURE,
 24	.pattern_min_len = WOW_MIN_PATTERN_SIZE,
 25	.pattern_max_len = WOW_MAX_PATTERN_SIZE,
 26	.max_pkt_offset = WOW_MAX_PKT_OFFSET,
 27};
 28
 29int ath11k_wow_enable(struct ath11k_base *ab)
 30{
 31	struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
 32	int i, ret;
 33
 34	clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
 35
 36	for (i = 0; i < ATH11K_WOW_RETRY_NUM; i++) {
 37		reinit_completion(&ab->htc_suspend);
 38
 39		ret = ath11k_wmi_wow_enable(ar);
 40		if (ret) {
 41			ath11k_warn(ab, "failed to issue wow enable: %d\n", ret);
 42			return ret;
 43		}
 44
 45		ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
 46		if (ret == 0) {
 47			ath11k_warn(ab,
 48				    "timed out while waiting for htc suspend completion\n");
 49			return -ETIMEDOUT;
 50		}
 51
 52		if (test_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
 53			/* success, suspend complete received */
 54			return 0;
 55
 56		ath11k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
 57			    i);
 58		msleep(ATH11K_WOW_RETRY_WAIT_MS);
 59	}
 60
 61	ath11k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
 62
 63	return -ETIMEDOUT;
 64}
 65
 66int ath11k_wow_wakeup(struct ath11k_base *ab)
 67{
 68	struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
 69	int ret;
 70
 71	/* In the case of WCN6750, WoW wakeup is done
 72	 * by sending SMP2P power save exit message
 73	 * to the target processor.
 74	 */
 75	if (ab->hw_params.smp2p_wow_exit)
 76		return 0;
 77
 78	reinit_completion(&ab->wow.wakeup_completed);
 79
 80	ret = ath11k_wmi_wow_host_wakeup_ind(ar);
 81	if (ret) {
 82		ath11k_warn(ab, "failed to send wow wakeup indication: %d\n",
 83			    ret);
 84		return ret;
 85	}
 86
 87	ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
 88	if (ret == 0) {
 89		ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
 90		return -ETIMEDOUT;
 91	}
 92
 93	return 0;
 94}
 95
 96static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
 97{
 98	struct ath11k *ar = arvif->ar;
 99	int i, ret;
100
101	for (i = 0; i < WOW_EVENT_MAX; i++) {
102		ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
103		if (ret) {
104			ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
105				    wow_wakeup_event(i), arvif->vdev_id, ret);
106			return ret;
107		}
108	}
109
110	for (i = 0; i < ar->wow.max_num_patterns; i++) {
111		ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
112		if (ret) {
113			ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
114				    i, arvif->vdev_id, ret);
115			return ret;
116		}
117	}
118
119	return 0;
120}
121
122static int ath11k_wow_cleanup(struct ath11k *ar)
123{
124	struct ath11k_vif *arvif;
125	int ret;
126
127	lockdep_assert_held(&ar->conf_mutex);
128
129	list_for_each_entry(arvif, &ar->arvifs, list) {
130		ret = ath11k_wow_vif_cleanup(arvif);
131		if (ret) {
132			ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
133				    arvif->vdev_id, ret);
134			return ret;
135		}
136	}
137
138	return 0;
139}
140
141/* Convert a 802.3 format to a 802.11 format.
142 *         +------------+-----------+--------+----------------+
143 * 802.3:  |dest mac(6B)|src mac(6B)|type(2B)|     body...    |
144 *         +------------+-----------+--------+----------------+
145 *                |__         |_______    |____________  |________
146 *                   |                |                |          |
147 *         +--+------------+----+-----------+---------------+-----------+
148 * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)|  8B  |type(2B)|  body...  |
149 *         +--+------------+----+-----------+---------------+-----------+
150 */
151static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
152					     const struct cfg80211_pkt_pattern *old)
153{
154	u8 hdr_8023_pattern[ETH_HLEN] = {};
155	u8 hdr_8023_bit_mask[ETH_HLEN] = {};
156	u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
157	u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
 
158
159	int total_len = old->pkt_offset + old->pattern_len;
160	int hdr_80211_end_offset;
161
162	struct ieee80211_hdr_3addr *new_hdr_pattern =
163		(struct ieee80211_hdr_3addr *)hdr_80211_pattern;
164	struct ieee80211_hdr_3addr *new_hdr_mask =
165		(struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
166	struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
167	struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
168	int hdr_len = sizeof(*new_hdr_pattern);
169
170	struct rfc1042_hdr *new_rfc_pattern =
171		(struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
172	struct rfc1042_hdr *new_rfc_mask =
173		(struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
174	int rfc_len = sizeof(*new_rfc_pattern);
 
 
 
 
 
 
175
176	memcpy(hdr_8023_pattern + old->pkt_offset,
177	       old->pattern, ETH_HLEN - old->pkt_offset);
178	memcpy(hdr_8023_bit_mask + old->pkt_offset,
179	       old->mask, ETH_HLEN - old->pkt_offset);
180
181	/* Copy destination address */
182	memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
183	memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
184
185	/* Copy source address */
186	memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
187	memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
188
189	/* Copy logic link type */
190	memcpy(&new_rfc_pattern->snap_type,
191	       &old_hdr_pattern->h_proto,
192	       sizeof(old_hdr_pattern->h_proto));
193	memcpy(&new_rfc_mask->snap_type,
194	       &old_hdr_mask->h_proto,
195	       sizeof(old_hdr_mask->h_proto));
196
197	/* Compute new pkt_offset */
198	if (old->pkt_offset < ETH_ALEN)
199		new->pkt_offset = old->pkt_offset +
200			offsetof(struct ieee80211_hdr_3addr, addr1);
201	else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
202		new->pkt_offset = old->pkt_offset +
203			offsetof(struct ieee80211_hdr_3addr, addr3) -
204			offsetof(struct ethhdr, h_source);
205	else
206		new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
207
208	/* Compute new hdr end offset */
209	if (total_len > ETH_HLEN)
210		hdr_80211_end_offset = hdr_len + rfc_len;
211	else if (total_len > offsetof(struct ethhdr, h_proto))
212		hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
213	else if (total_len > ETH_ALEN)
214		hdr_80211_end_offset = total_len - ETH_ALEN +
215			offsetof(struct ieee80211_hdr_3addr, addr3);
216	else
217		hdr_80211_end_offset = total_len +
218			offsetof(struct ieee80211_hdr_3addr, addr1);
219
220	new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
221
222	memcpy((u8 *)new->pattern,
223	       hdr_80211_pattern + new->pkt_offset,
224	       new->pattern_len);
225	memcpy((u8 *)new->mask,
226	       hdr_80211_bit_mask + new->pkt_offset,
227	       new->pattern_len);
228
229	if (total_len > ETH_HLEN) {
230		/* Copy frame body */
231		memcpy((u8 *)new->pattern + new->pattern_len,
232		       (void *)old->pattern + ETH_HLEN - old->pkt_offset,
233		       total_len - ETH_HLEN);
234		memcpy((u8 *)new->mask + new->pattern_len,
235		       (void *)old->mask + ETH_HLEN - old->pkt_offset,
236		       total_len - ETH_HLEN);
237
238		new->pattern_len += total_len - ETH_HLEN;
239	}
240}
241
242static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
243					    struct cfg80211_sched_scan_request *nd_config,
244					    struct wmi_pno_scan_req *pno)
245{
246	int i, j;
247	u8 ssid_len;
248
249	pno->enable = 1;
250	pno->vdev_id = vdev_id;
251	pno->uc_networks_count = nd_config->n_match_sets;
252
253	if (!pno->uc_networks_count ||
254	    pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
255		return -EINVAL;
256
257	if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
258		return -EINVAL;
259
260	/* Filling per profile params */
261	for (i = 0; i < pno->uc_networks_count; i++) {
262		ssid_len = nd_config->match_sets[i].ssid.ssid_len;
263
264		if (ssid_len == 0 || ssid_len > 32)
265			return -EINVAL;
266
267		pno->a_networks[i].ssid.ssid_len = ssid_len;
268
269		memcpy(pno->a_networks[i].ssid.ssid,
270		       nd_config->match_sets[i].ssid.ssid,
271		       nd_config->match_sets[i].ssid.ssid_len);
272		pno->a_networks[i].authentication = 0;
273		pno->a_networks[i].encryption     = 0;
274		pno->a_networks[i].bcast_nw_type  = 0;
275
276		/* Copying list of valid channel into request */
277		pno->a_networks[i].channel_count = nd_config->n_channels;
278		pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
279
280		for (j = 0; j < nd_config->n_channels; j++) {
281			pno->a_networks[i].channels[j] =
282					nd_config->channels[j]->center_freq;
283		}
284	}
285
286	/* set scan to passive if no SSIDs are specified in the request */
287	if (nd_config->n_ssids == 0)
288		pno->do_passive_scan = true;
289	else
290		pno->do_passive_scan = false;
291
292	for (i = 0; i < nd_config->n_ssids; i++) {
293		j = 0;
294		while (j < pno->uc_networks_count) {
295			if (pno->a_networks[j].ssid.ssid_len ==
296				nd_config->ssids[i].ssid_len &&
297			(memcmp(pno->a_networks[j].ssid.ssid,
298				nd_config->ssids[i].ssid,
299				pno->a_networks[j].ssid.ssid_len) == 0)) {
300				pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
301				break;
302			}
303			j++;
304		}
305	}
306
307	if (nd_config->n_scan_plans == 2) {
308		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
309		pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
310		pno->slow_scan_period =
311			nd_config->scan_plans[1].interval * MSEC_PER_SEC;
312	} else if (nd_config->n_scan_plans == 1) {
313		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
314		pno->fast_scan_max_cycles = 1;
315		pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
316	} else {
317		ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
318			    nd_config->n_scan_plans);
319	}
320
321	if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
322		/* enable mac randomization */
323		pno->enable_pno_scan_randomization = 1;
324		memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
325		memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
326	}
327
328	pno->delay_start_time = nd_config->delay;
329
330	/* Current FW does not support min-max range for dwell time */
331	pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
332	pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
333
334	return 0;
335}
336
337static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
338				      struct cfg80211_wowlan *wowlan)
339{
340	int ret, i;
341	unsigned long wow_mask = 0;
342	struct ath11k *ar = arvif->ar;
343	const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
344	int pattern_id = 0;
345
346	/* Setup requested WOW features */
347	switch (arvif->vdev_type) {
348	case WMI_VDEV_TYPE_IBSS:
349		__set_bit(WOW_BEACON_EVENT, &wow_mask);
350		fallthrough;
351	case WMI_VDEV_TYPE_AP:
352		__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
353		__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
354		__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
355		__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
356		__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
357		__set_bit(WOW_HTT_EVENT, &wow_mask);
358		__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
359		break;
360	case WMI_VDEV_TYPE_STA:
361		if (wowlan->disconnect) {
362			__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
363			__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
364			__set_bit(WOW_BMISS_EVENT, &wow_mask);
365			__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
366		}
367
368		if (wowlan->magic_pkt)
369			__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
370
371		if (wowlan->nd_config) {
372			struct wmi_pno_scan_req *pno;
373			int ret;
374
375			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
376			if (!pno)
377				return -ENOMEM;
378
379			ar->nlo_enabled = true;
380
381			ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
382							       wowlan->nd_config, pno);
383			if (!ret) {
384				ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
385				__set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
386			}
387
388			kfree(pno);
389		}
390		break;
391	default:
392		break;
393	}
394
395	for (i = 0; i < wowlan->n_patterns; i++) {
396		u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
397		u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
398		u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
399		struct cfg80211_pkt_pattern new_pattern = {};
400		struct cfg80211_pkt_pattern old_pattern = patterns[i];
401		int j;
402
403		new_pattern.pattern = ath_pattern;
404		new_pattern.mask = ath_bitmask;
405		if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
406			continue;
407		/* convert bytemask to bitmask */
408		for (j = 0; j < patterns[i].pattern_len; j++)
409			if (patterns[i].mask[j / 8] & BIT(j % 8))
410				bitmask[j] = 0xff;
411		old_pattern.mask = bitmask;
412
413		if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
414		    ATH11K_HW_TXRX_NATIVE_WIFI) {
415			if (patterns[i].pkt_offset < ETH_HLEN) {
416				u8 pattern_ext[WOW_MAX_PATTERN_SIZE] = {};
417
418				memcpy(pattern_ext, old_pattern.pattern,
419				       old_pattern.pattern_len);
420				old_pattern.pattern = pattern_ext;
421				ath11k_wow_convert_8023_to_80211(&new_pattern,
422								 &old_pattern);
423			} else {
424				new_pattern = old_pattern;
 
 
 
 
 
 
 
 
 
425				new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
426			}
427		}
428
429		if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
430			return -EINVAL;
431
432		ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
433						 pattern_id,
434						 new_pattern.pattern,
435						 new_pattern.mask,
436						 new_pattern.pattern_len,
437						 new_pattern.pkt_offset);
438		if (ret) {
439			ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
440				    pattern_id,
441				    arvif->vdev_id, ret);
442			return ret;
443		}
444
445		pattern_id++;
446		__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
447	}
448
449	for (i = 0; i < WOW_EVENT_MAX; i++) {
450		if (!test_bit(i, &wow_mask))
451			continue;
452		ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
453		if (ret) {
454			ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
455				    wow_wakeup_event(i), arvif->vdev_id, ret);
456			return ret;
457		}
458	}
459
460	return 0;
461}
462
463static int ath11k_wow_set_wakeups(struct ath11k *ar,
464				  struct cfg80211_wowlan *wowlan)
465{
466	struct ath11k_vif *arvif;
467	int ret;
468
469	lockdep_assert_held(&ar->conf_mutex);
470
471	list_for_each_entry(arvif, &ar->arvifs, list) {
472		ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
473		if (ret) {
474			ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
475				    arvif->vdev_id, ret);
476			return ret;
477		}
478	}
479
480	return 0;
481}
482
483static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
484{
485	int ret = 0;
486	struct ath11k *ar = arvif->ar;
487
488	switch (arvif->vdev_type) {
489	case WMI_VDEV_TYPE_STA:
490		if (ar->nlo_enabled) {
491			struct wmi_pno_scan_req *pno;
492
493			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
494			if (!pno)
495				return -ENOMEM;
496
497			pno->enable = 0;
498			ar->nlo_enabled = false;
499			ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
500			kfree(pno);
501		}
502		break;
503	default:
504		break;
505	}
506	return ret;
507}
508
509static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
510{
511	struct ath11k_vif *arvif;
512	int ret;
513
514	lockdep_assert_held(&ar->conf_mutex);
515
516	list_for_each_entry(arvif, &ar->arvifs, list) {
517		ret = ath11k_vif_wow_clean_nlo(arvif);
518		if (ret) {
519			ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
520				    arvif->vdev_id, ret);
521			return ret;
522		}
523	}
524
525	return 0;
526}
527
528static int ath11k_wow_set_hw_filter(struct ath11k *ar)
529{
530	struct ath11k_vif *arvif;
531	u32 bitmap;
532	int ret;
533
534	lockdep_assert_held(&ar->conf_mutex);
535
536	list_for_each_entry(arvif, &ar->arvifs, list) {
537		bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
538			WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
539		ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
540						    bitmap,
541						    true);
542		if (ret) {
543			ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
544				    arvif->vdev_id, ret);
545			return ret;
546		}
547	}
548
549	return 0;
550}
551
552static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
553{
554	struct ath11k_vif *arvif;
555	int ret;
556
557	lockdep_assert_held(&ar->conf_mutex);
558
559	list_for_each_entry(arvif, &ar->arvifs, list) {
560		ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
561
562		if (ret) {
563			ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
564				    arvif->vdev_id, ret);
565			return ret;
566		}
567	}
568
569	return 0;
570}
571
572static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
573{
574	struct ath11k_vif *arvif;
575	int ret;
576
577	lockdep_assert_held(&ar->conf_mutex);
578
579	list_for_each_entry(arvif, &ar->arvifs, list) {
580		if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
581			continue;
582
583		ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
584
585		if (ret) {
586			ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
587				    arvif->vdev_id, enable, ret);
588			return ret;
589		}
590	}
591
592	return 0;
593}
594
595static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
596{
597	struct ath11k_vif *arvif;
598	int ret;
599
600	lockdep_assert_held(&ar->conf_mutex);
601
602	list_for_each_entry(arvif, &ar->arvifs, list) {
603		if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
604		    !arvif->is_up ||
605		    !arvif->rekey_data.enable_offload)
606			continue;
607
608		/* get rekey info before disable rekey offload */
609		if (!enable) {
610			ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
611			if (ret) {
612				ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
613					    arvif->vdev_id, ret);
614				return ret;
615			}
616		}
617
618		ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
619
620		if (ret) {
621			ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
622				    arvif->vdev_id, enable, ret);
623			return ret;
624		}
625	}
626
627	return 0;
628}
629
630static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
631{
632	int ret;
633
634	ret = ath11k_wow_arp_ns_offload(ar, enable);
635	if (ret) {
636		ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
637			    enable, ret);
638		return ret;
639	}
640
641	ret = ath11k_gtk_rekey_offload(ar, enable);
642	if (ret) {
643		ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
644			    enable, ret);
645		return ret;
646	}
647
648	return 0;
649}
650
651static int ath11k_wow_set_keepalive(struct ath11k *ar,
652				    enum wmi_sta_keepalive_method method,
653				    u32 interval)
654{
655	struct ath11k_vif *arvif;
656	int ret;
657
658	lockdep_assert_held(&ar->conf_mutex);
659
660	list_for_each_entry(arvif, &ar->arvifs, list) {
661		ret = ath11k_mac_vif_set_keepalive(arvif, method, interval);
662		if (ret)
663			return ret;
664	}
665
666	return 0;
667}
668
669int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
670			  struct cfg80211_wowlan *wowlan)
671{
672	struct ath11k *ar = hw->priv;
673	int ret;
674
675	ret = ath11k_mac_wait_tx_complete(ar);
676	if (ret) {
677		ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
678		return ret;
679	}
680
681	mutex_lock(&ar->conf_mutex);
682
683	ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
684	if (ret) {
685		ath11k_warn(ar->ab,
686			    "failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
687			    ret);
688		goto exit;
689	}
690
691	ret =  ath11k_wow_cleanup(ar);
692	if (ret) {
693		ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
694			    ret);
695		goto exit;
696	}
697
698	ret = ath11k_wow_set_wakeups(ar, wowlan);
699	if (ret) {
700		ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
701			    ret);
702		goto cleanup;
703	}
704
705	ret = ath11k_wow_protocol_offload(ar, true);
706	if (ret) {
707		ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
708			    ret);
709		goto cleanup;
710	}
711
712	ret = ath11k_wow_set_hw_filter(ar);
713	if (ret) {
714		ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
715			    ret);
716		goto cleanup;
717	}
718
719	ret = ath11k_wow_set_keepalive(ar,
720				       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
721				       WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
722	if (ret) {
723		ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
724		goto cleanup;
725	}
726
727	ret = ath11k_wow_enable(ar->ab);
728	if (ret) {
729		ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
730		goto cleanup;
731	}
732
733	ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
734	if (ret) {
735		ath11k_warn(ar->ab,
736			    "failed to stop dp rx pktlog during wow suspend: %d\n",
737			    ret);
738		goto cleanup;
739	}
740
741	ath11k_ce_stop_shadow_timers(ar->ab);
742	ath11k_dp_stop_shadow_timers(ar->ab);
743
744	ath11k_hif_irq_disable(ar->ab);
745	ath11k_hif_ce_irq_disable(ar->ab);
746
747	ret = ath11k_hif_suspend(ar->ab);
748	if (ret) {
749		ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
750		goto wakeup;
751	}
752
753	goto exit;
754
755wakeup:
756	ath11k_wow_wakeup(ar->ab);
757
758cleanup:
759	ath11k_wow_cleanup(ar);
760
761exit:
762	mutex_unlock(&ar->conf_mutex);
763	return ret ? 1 : 0;
764}
765
766void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
767{
768	struct ath11k *ar = hw->priv;
769
770	mutex_lock(&ar->conf_mutex);
771	device_set_wakeup_enable(ar->ab->dev, enabled);
772	mutex_unlock(&ar->conf_mutex);
773}
774
775int ath11k_wow_op_resume(struct ieee80211_hw *hw)
776{
777	struct ath11k *ar = hw->priv;
778	int ret;
779
780	mutex_lock(&ar->conf_mutex);
781
782	ret = ath11k_hif_resume(ar->ab);
783	if (ret) {
784		ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
785		goto exit;
786	}
787
788	ath11k_hif_ce_irq_enable(ar->ab);
789	ath11k_hif_irq_enable(ar->ab);
790
791	ret = ath11k_dp_rx_pktlog_start(ar->ab);
792	if (ret) {
793		ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
794		goto exit;
795	}
796
797	ret = ath11k_wow_wakeup(ar->ab);
798	if (ret) {
799		ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
800		goto exit;
801	}
802
803	ret = ath11k_wow_nlo_cleanup(ar);
804	if (ret) {
805		ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
806		goto exit;
807	}
808
809	ret = ath11k_wow_clear_hw_filter(ar);
810	if (ret) {
811		ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
812		goto exit;
813	}
814
815	ret = ath11k_wow_protocol_offload(ar, false);
816	if (ret) {
817		ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
818			    ret);
819		goto exit;
820	}
821
822	ret = ath11k_wow_set_keepalive(ar,
823				       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
824				       WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
825	if (ret) {
826		ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
827		goto exit;
828	}
829
830exit:
831	if (ret) {
832		switch (ar->state) {
833		case ATH11K_STATE_ON:
834			ar->state = ATH11K_STATE_RESTARTING;
835			ret = 1;
836			break;
837		case ATH11K_STATE_OFF:
838		case ATH11K_STATE_RESTARTING:
839		case ATH11K_STATE_RESTARTED:
840		case ATH11K_STATE_WEDGED:
841		case ATH11K_STATE_FTM:
842			ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
843				    ar->state);
844			ret = -EIO;
845			break;
846		}
847	}
848
849	mutex_unlock(&ar->conf_mutex);
850	return ret;
851}
852
853int ath11k_wow_init(struct ath11k *ar)
854{
855	if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
856		return 0;
857
858	ar->wow.wowlan_support = ath11k_wowlan_support;
859
860	if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
861	    ATH11K_HW_TXRX_NATIVE_WIFI) {
862		ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
863		ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
864	}
865
866	if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
867		ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
868		ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
869	}
870
871	ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
872	ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
873	ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
874
875	device_set_wakeup_capable(ar->ab->dev, true);
876
877	return 0;
878}
v6.13.7
  1// SPDX-License-Identifier: BSD-3-Clause-Clear
  2/*
  3 * Copyright (c) 2020 The Linux Foundation. All rights reserved.
  4 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5 */
  6
  7#include <linux/delay.h>
  8
  9#include "mac.h"
 10
 11#include <net/mac80211.h>
 12#include "core.h"
 13#include "hif.h"
 14#include "debug.h"
 15#include "wmi.h"
 16#include "wow.h"
 17#include "dp_rx.h"
 18
 19static const struct wiphy_wowlan_support ath11k_wowlan_support = {
 20	.flags = WIPHY_WOWLAN_DISCONNECT |
 21		 WIPHY_WOWLAN_MAGIC_PKT |
 22		 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
 23		 WIPHY_WOWLAN_GTK_REKEY_FAILURE,
 24	.pattern_min_len = WOW_MIN_PATTERN_SIZE,
 25	.pattern_max_len = WOW_MAX_PATTERN_SIZE,
 26	.max_pkt_offset = WOW_MAX_PKT_OFFSET,
 27};
 28
 29int ath11k_wow_enable(struct ath11k_base *ab)
 30{
 31	struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
 32	int i, ret;
 33
 34	clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
 35
 36	for (i = 0; i < ATH11K_WOW_RETRY_NUM; i++) {
 37		reinit_completion(&ab->htc_suspend);
 38
 39		ret = ath11k_wmi_wow_enable(ar);
 40		if (ret) {
 41			ath11k_warn(ab, "failed to issue wow enable: %d\n", ret);
 42			return ret;
 43		}
 44
 45		ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
 46		if (ret == 0) {
 47			ath11k_warn(ab,
 48				    "timed out while waiting for htc suspend completion\n");
 49			return -ETIMEDOUT;
 50		}
 51
 52		if (test_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
 53			/* success, suspend complete received */
 54			return 0;
 55
 56		ath11k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
 57			    i);
 58		msleep(ATH11K_WOW_RETRY_WAIT_MS);
 59	}
 60
 61	ath11k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
 62
 63	return -ETIMEDOUT;
 64}
 65
 66int ath11k_wow_wakeup(struct ath11k_base *ab)
 67{
 68	struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
 69	int ret;
 70
 71	/* In the case of WCN6750, WoW wakeup is done
 72	 * by sending SMP2P power save exit message
 73	 * to the target processor.
 74	 */
 75	if (ab->hw_params.smp2p_wow_exit)
 76		return 0;
 77
 78	reinit_completion(&ab->wow.wakeup_completed);
 79
 80	ret = ath11k_wmi_wow_host_wakeup_ind(ar);
 81	if (ret) {
 82		ath11k_warn(ab, "failed to send wow wakeup indication: %d\n",
 83			    ret);
 84		return ret;
 85	}
 86
 87	ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
 88	if (ret == 0) {
 89		ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
 90		return -ETIMEDOUT;
 91	}
 92
 93	return 0;
 94}
 95
 96static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
 97{
 98	struct ath11k *ar = arvif->ar;
 99	int i, ret;
100
101	for (i = 0; i < WOW_EVENT_MAX; i++) {
102		ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
103		if (ret) {
104			ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
105				    wow_wakeup_event(i), arvif->vdev_id, ret);
106			return ret;
107		}
108	}
109
110	for (i = 0; i < ar->wow.max_num_patterns; i++) {
111		ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
112		if (ret) {
113			ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
114				    i, arvif->vdev_id, ret);
115			return ret;
116		}
117	}
118
119	return 0;
120}
121
122static int ath11k_wow_cleanup(struct ath11k *ar)
123{
124	struct ath11k_vif *arvif;
125	int ret;
126
127	lockdep_assert_held(&ar->conf_mutex);
128
129	list_for_each_entry(arvif, &ar->arvifs, list) {
130		ret = ath11k_wow_vif_cleanup(arvif);
131		if (ret) {
132			ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
133				    arvif->vdev_id, ret);
134			return ret;
135		}
136	}
137
138	return 0;
139}
140
141/* Convert a 802.3 format to a 802.11 format.
142 *         +------------+-----------+--------+----------------+
143 * 802.3:  |dest mac(6B)|src mac(6B)|type(2B)|     body...    |
144 *         +------------+-----------+--------+----------------+
145 *                |__         |_______    |____________  |________
146 *                   |                |                |          |
147 *         +--+------------+----+-----------+---------------+-----------+
148 * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)|  8B  |type(2B)|  body...  |
149 *         +--+------------+----+-----------+---------------+-----------+
150 */
151static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
152					     const struct cfg80211_pkt_pattern *old)
153{
154	u8 hdr_8023_pattern[ETH_HLEN] = {};
155	u8 hdr_8023_bit_mask[ETH_HLEN] = {};
156	u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
157	u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
158	u8 bytemask[WOW_MAX_PATTERN_SIZE] = {};
159
160	int total_len = old->pkt_offset + old->pattern_len;
161	int hdr_80211_end_offset;
162
163	struct ieee80211_hdr_3addr *new_hdr_pattern =
164		(struct ieee80211_hdr_3addr *)hdr_80211_pattern;
165	struct ieee80211_hdr_3addr *new_hdr_mask =
166		(struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
167	struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
168	struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
169	int hdr_len = sizeof(*new_hdr_pattern);
170
171	struct rfc1042_hdr *new_rfc_pattern =
172		(struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
173	struct rfc1042_hdr *new_rfc_mask =
174		(struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
175	int rfc_len = sizeof(*new_rfc_pattern);
176	int i;
177
178	/* convert bitmask to bytemask */
179	for (i = 0; i < old->pattern_len; i++)
180		if (old->mask[i / 8] & BIT(i % 8))
181			bytemask[i] = 0xff;
182
183	memcpy(hdr_8023_pattern + old->pkt_offset,
184	       old->pattern, ETH_HLEN - old->pkt_offset);
185	memcpy(hdr_8023_bit_mask + old->pkt_offset,
186	       bytemask, ETH_HLEN - old->pkt_offset);
187
188	/* Copy destination address */
189	memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
190	memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
191
192	/* Copy source address */
193	memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
194	memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
195
196	/* Copy logic link type */
197	memcpy(&new_rfc_pattern->snap_type,
198	       &old_hdr_pattern->h_proto,
199	       sizeof(old_hdr_pattern->h_proto));
200	memcpy(&new_rfc_mask->snap_type,
201	       &old_hdr_mask->h_proto,
202	       sizeof(old_hdr_mask->h_proto));
203
204	/* Compute new pkt_offset */
205	if (old->pkt_offset < ETH_ALEN)
206		new->pkt_offset = old->pkt_offset +
207			offsetof(struct ieee80211_hdr_3addr, addr1);
208	else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
209		new->pkt_offset = old->pkt_offset +
210			offsetof(struct ieee80211_hdr_3addr, addr3) -
211			offsetof(struct ethhdr, h_source);
212	else
213		new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
214
215	/* Compute new hdr end offset */
216	if (total_len > ETH_HLEN)
217		hdr_80211_end_offset = hdr_len + rfc_len;
218	else if (total_len > offsetof(struct ethhdr, h_proto))
219		hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
220	else if (total_len > ETH_ALEN)
221		hdr_80211_end_offset = total_len - ETH_ALEN +
222			offsetof(struct ieee80211_hdr_3addr, addr3);
223	else
224		hdr_80211_end_offset = total_len +
225			offsetof(struct ieee80211_hdr_3addr, addr1);
226
227	new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
228
229	memcpy((u8 *)new->pattern,
230	       hdr_80211_pattern + new->pkt_offset,
231	       new->pattern_len);
232	memcpy((u8 *)new->mask,
233	       hdr_80211_bit_mask + new->pkt_offset,
234	       new->pattern_len);
235
236	if (total_len > ETH_HLEN) {
237		/* Copy frame body */
238		memcpy((u8 *)new->pattern + new->pattern_len,
239		       (void *)old->pattern + ETH_HLEN - old->pkt_offset,
240		       total_len - ETH_HLEN);
241		memcpy((u8 *)new->mask + new->pattern_len,
242		       bytemask + ETH_HLEN - old->pkt_offset,
243		       total_len - ETH_HLEN);
244
245		new->pattern_len += total_len - ETH_HLEN;
246	}
247}
248
249static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
250					    struct cfg80211_sched_scan_request *nd_config,
251					    struct wmi_pno_scan_req *pno)
252{
253	int i, j;
254	u8 ssid_len;
255
256	pno->enable = 1;
257	pno->vdev_id = vdev_id;
258	pno->uc_networks_count = nd_config->n_match_sets;
259
260	if (!pno->uc_networks_count ||
261	    pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
262		return -EINVAL;
263
264	if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
265		return -EINVAL;
266
267	/* Filling per profile params */
268	for (i = 0; i < pno->uc_networks_count; i++) {
269		ssid_len = nd_config->match_sets[i].ssid.ssid_len;
270
271		if (ssid_len == 0 || ssid_len > 32)
272			return -EINVAL;
273
274		pno->a_networks[i].ssid.ssid_len = ssid_len;
275
276		memcpy(pno->a_networks[i].ssid.ssid,
277		       nd_config->match_sets[i].ssid.ssid,
278		       nd_config->match_sets[i].ssid.ssid_len);
279		pno->a_networks[i].authentication = 0;
280		pno->a_networks[i].encryption     = 0;
281		pno->a_networks[i].bcast_nw_type  = 0;
282
283		/* Copying list of valid channel into request */
284		pno->a_networks[i].channel_count = nd_config->n_channels;
285		pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
286
287		for (j = 0; j < nd_config->n_channels; j++) {
288			pno->a_networks[i].channels[j] =
289					nd_config->channels[j]->center_freq;
290		}
291	}
292
293	/* set scan to passive if no SSIDs are specified in the request */
294	if (nd_config->n_ssids == 0)
295		pno->do_passive_scan = true;
296	else
297		pno->do_passive_scan = false;
298
299	for (i = 0; i < nd_config->n_ssids; i++) {
300		j = 0;
301		while (j < pno->uc_networks_count) {
302			if (pno->a_networks[j].ssid.ssid_len ==
303				nd_config->ssids[i].ssid_len &&
304			(memcmp(pno->a_networks[j].ssid.ssid,
305				nd_config->ssids[i].ssid,
306				pno->a_networks[j].ssid.ssid_len) == 0)) {
307				pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
308				break;
309			}
310			j++;
311		}
312	}
313
314	if (nd_config->n_scan_plans == 2) {
315		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
316		pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
317		pno->slow_scan_period =
318			nd_config->scan_plans[1].interval * MSEC_PER_SEC;
319	} else if (nd_config->n_scan_plans == 1) {
320		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
321		pno->fast_scan_max_cycles = 1;
322		pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
323	} else {
324		ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
325			    nd_config->n_scan_plans);
326	}
327
328	if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
329		/* enable mac randomization */
330		pno->enable_pno_scan_randomization = 1;
331		memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
332		memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
333	}
334
335	pno->delay_start_time = nd_config->delay;
336
337	/* Current FW does not support min-max range for dwell time */
338	pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
339	pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
340
341	return 0;
342}
343
344static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
345				      struct cfg80211_wowlan *wowlan)
346{
347	int ret, i;
348	unsigned long wow_mask = 0;
349	struct ath11k *ar = arvif->ar;
350	const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
351	int pattern_id = 0;
352
353	/* Setup requested WOW features */
354	switch (arvif->vdev_type) {
355	case WMI_VDEV_TYPE_IBSS:
356		__set_bit(WOW_BEACON_EVENT, &wow_mask);
357		fallthrough;
358	case WMI_VDEV_TYPE_AP:
359		__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
360		__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
361		__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
362		__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
363		__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
364		__set_bit(WOW_HTT_EVENT, &wow_mask);
365		__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
366		break;
367	case WMI_VDEV_TYPE_STA:
368		if (wowlan->disconnect) {
369			__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
370			__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
371			__set_bit(WOW_BMISS_EVENT, &wow_mask);
372			__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
373		}
374
375		if (wowlan->magic_pkt)
376			__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
377
378		if (wowlan->nd_config) {
379			struct wmi_pno_scan_req *pno;
380			int ret;
381
382			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
383			if (!pno)
384				return -ENOMEM;
385
386			ar->nlo_enabled = true;
387
388			ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
389							       wowlan->nd_config, pno);
390			if (!ret) {
391				ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
392				__set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
393			}
394
395			kfree(pno);
396		}
397		break;
398	default:
399		break;
400	}
401
402	for (i = 0; i < wowlan->n_patterns; i++) {
 
403		u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
404		u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
405		struct cfg80211_pkt_pattern new_pattern = {};
 
 
406
407		new_pattern.pattern = ath_pattern;
408		new_pattern.mask = ath_bitmask;
409		if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
410			continue;
 
 
 
 
 
411
412		if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
413		    ATH11K_HW_TXRX_NATIVE_WIFI) {
414			if (patterns[i].pkt_offset < ETH_HLEN) {
 
 
 
 
 
415				ath11k_wow_convert_8023_to_80211(&new_pattern,
416								 &patterns[i]);
417			} else {
418				int j;
419
420				new_pattern = patterns[i];
421				new_pattern.mask = ath_bitmask;
422
423				/* convert bitmask to bytemask */
424				for (j = 0; j < patterns[i].pattern_len; j++)
425					if (patterns[i].mask[j / 8] & BIT(j % 8))
426						ath_bitmask[j] = 0xff;
427
428				new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
429			}
430		}
431
432		if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
433			return -EINVAL;
434
435		ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
436						 pattern_id,
437						 new_pattern.pattern,
438						 new_pattern.mask,
439						 new_pattern.pattern_len,
440						 new_pattern.pkt_offset);
441		if (ret) {
442			ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
443				    pattern_id,
444				    arvif->vdev_id, ret);
445			return ret;
446		}
447
448		pattern_id++;
449		__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
450	}
451
452	for (i = 0; i < WOW_EVENT_MAX; i++) {
453		if (!test_bit(i, &wow_mask))
454			continue;
455		ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
456		if (ret) {
457			ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
458				    wow_wakeup_event(i), arvif->vdev_id, ret);
459			return ret;
460		}
461	}
462
463	return 0;
464}
465
466static int ath11k_wow_set_wakeups(struct ath11k *ar,
467				  struct cfg80211_wowlan *wowlan)
468{
469	struct ath11k_vif *arvif;
470	int ret;
471
472	lockdep_assert_held(&ar->conf_mutex);
473
474	list_for_each_entry(arvif, &ar->arvifs, list) {
475		ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
476		if (ret) {
477			ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
478				    arvif->vdev_id, ret);
479			return ret;
480		}
481	}
482
483	return 0;
484}
485
486static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
487{
488	int ret = 0;
489	struct ath11k *ar = arvif->ar;
490
491	switch (arvif->vdev_type) {
492	case WMI_VDEV_TYPE_STA:
493		if (ar->nlo_enabled) {
494			struct wmi_pno_scan_req *pno;
495
496			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
497			if (!pno)
498				return -ENOMEM;
499
500			pno->enable = 0;
501			ar->nlo_enabled = false;
502			ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
503			kfree(pno);
504		}
505		break;
506	default:
507		break;
508	}
509	return ret;
510}
511
512static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
513{
514	struct ath11k_vif *arvif;
515	int ret;
516
517	lockdep_assert_held(&ar->conf_mutex);
518
519	list_for_each_entry(arvif, &ar->arvifs, list) {
520		ret = ath11k_vif_wow_clean_nlo(arvif);
521		if (ret) {
522			ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
523				    arvif->vdev_id, ret);
524			return ret;
525		}
526	}
527
528	return 0;
529}
530
531static int ath11k_wow_set_hw_filter(struct ath11k *ar)
532{
533	struct ath11k_vif *arvif;
534	u32 bitmap;
535	int ret;
536
537	lockdep_assert_held(&ar->conf_mutex);
538
539	list_for_each_entry(arvif, &ar->arvifs, list) {
540		bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
541			WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
542		ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
543						    bitmap,
544						    true);
545		if (ret) {
546			ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
547				    arvif->vdev_id, ret);
548			return ret;
549		}
550	}
551
552	return 0;
553}
554
555static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
556{
557	struct ath11k_vif *arvif;
558	int ret;
559
560	lockdep_assert_held(&ar->conf_mutex);
561
562	list_for_each_entry(arvif, &ar->arvifs, list) {
563		ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
564
565		if (ret) {
566			ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
567				    arvif->vdev_id, ret);
568			return ret;
569		}
570	}
571
572	return 0;
573}
574
575static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
576{
577	struct ath11k_vif *arvif;
578	int ret;
579
580	lockdep_assert_held(&ar->conf_mutex);
581
582	list_for_each_entry(arvif, &ar->arvifs, list) {
583		if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
584			continue;
585
586		ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
587
588		if (ret) {
589			ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
590				    arvif->vdev_id, enable, ret);
591			return ret;
592		}
593	}
594
595	return 0;
596}
597
598static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
599{
600	struct ath11k_vif *arvif;
601	int ret;
602
603	lockdep_assert_held(&ar->conf_mutex);
604
605	list_for_each_entry(arvif, &ar->arvifs, list) {
606		if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
607		    !arvif->is_up ||
608		    !arvif->rekey_data.enable_offload)
609			continue;
610
611		/* get rekey info before disable rekey offload */
612		if (!enable) {
613			ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
614			if (ret) {
615				ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
616					    arvif->vdev_id, ret);
617				return ret;
618			}
619		}
620
621		ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
622
623		if (ret) {
624			ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
625				    arvif->vdev_id, enable, ret);
626			return ret;
627		}
628	}
629
630	return 0;
631}
632
633static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
634{
635	int ret;
636
637	ret = ath11k_wow_arp_ns_offload(ar, enable);
638	if (ret) {
639		ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
640			    enable, ret);
641		return ret;
642	}
643
644	ret = ath11k_gtk_rekey_offload(ar, enable);
645	if (ret) {
646		ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
647			    enable, ret);
648		return ret;
649	}
650
651	return 0;
652}
653
654static int ath11k_wow_set_keepalive(struct ath11k *ar,
655				    enum wmi_sta_keepalive_method method,
656				    u32 interval)
657{
658	struct ath11k_vif *arvif;
659	int ret;
660
661	lockdep_assert_held(&ar->conf_mutex);
662
663	list_for_each_entry(arvif, &ar->arvifs, list) {
664		ret = ath11k_mac_vif_set_keepalive(arvif, method, interval);
665		if (ret)
666			return ret;
667	}
668
669	return 0;
670}
671
672int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
673			  struct cfg80211_wowlan *wowlan)
674{
675	struct ath11k *ar = hw->priv;
676	int ret;
677
678	ret = ath11k_mac_wait_tx_complete(ar);
679	if (ret) {
680		ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
681		return ret;
682	}
683
684	mutex_lock(&ar->conf_mutex);
685
686	ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
687	if (ret) {
688		ath11k_warn(ar->ab,
689			    "failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
690			    ret);
691		goto exit;
692	}
693
694	ret =  ath11k_wow_cleanup(ar);
695	if (ret) {
696		ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
697			    ret);
698		goto exit;
699	}
700
701	ret = ath11k_wow_set_wakeups(ar, wowlan);
702	if (ret) {
703		ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
704			    ret);
705		goto cleanup;
706	}
707
708	ret = ath11k_wow_protocol_offload(ar, true);
709	if (ret) {
710		ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
711			    ret);
712		goto cleanup;
713	}
714
715	ret = ath11k_wow_set_hw_filter(ar);
716	if (ret) {
717		ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
718			    ret);
719		goto cleanup;
720	}
721
722	ret = ath11k_wow_set_keepalive(ar,
723				       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
724				       WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
725	if (ret) {
726		ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
727		goto cleanup;
728	}
729
730	ret = ath11k_wow_enable(ar->ab);
731	if (ret) {
732		ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
733		goto cleanup;
734	}
735
736	ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
737	if (ret) {
738		ath11k_warn(ar->ab,
739			    "failed to stop dp rx pktlog during wow suspend: %d\n",
740			    ret);
741		goto cleanup;
742	}
743
744	ath11k_ce_stop_shadow_timers(ar->ab);
745	ath11k_dp_stop_shadow_timers(ar->ab);
746
747	ath11k_hif_irq_disable(ar->ab);
748	ath11k_hif_ce_irq_disable(ar->ab);
749
750	ret = ath11k_hif_suspend(ar->ab);
751	if (ret) {
752		ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
753		goto wakeup;
754	}
755
756	goto exit;
757
758wakeup:
759	ath11k_wow_wakeup(ar->ab);
760
761cleanup:
762	ath11k_wow_cleanup(ar);
763
764exit:
765	mutex_unlock(&ar->conf_mutex);
766	return ret ? 1 : 0;
767}
768
769void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
770{
771	struct ath11k *ar = hw->priv;
772
773	mutex_lock(&ar->conf_mutex);
774	device_set_wakeup_enable(ar->ab->dev, enabled);
775	mutex_unlock(&ar->conf_mutex);
776}
777
778int ath11k_wow_op_resume(struct ieee80211_hw *hw)
779{
780	struct ath11k *ar = hw->priv;
781	int ret;
782
783	mutex_lock(&ar->conf_mutex);
784
785	ret = ath11k_hif_resume(ar->ab);
786	if (ret) {
787		ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
788		goto exit;
789	}
790
791	ath11k_hif_ce_irq_enable(ar->ab);
792	ath11k_hif_irq_enable(ar->ab);
793
794	ret = ath11k_dp_rx_pktlog_start(ar->ab);
795	if (ret) {
796		ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
797		goto exit;
798	}
799
800	ret = ath11k_wow_wakeup(ar->ab);
801	if (ret) {
802		ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
803		goto exit;
804	}
805
806	ret = ath11k_wow_nlo_cleanup(ar);
807	if (ret) {
808		ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
809		goto exit;
810	}
811
812	ret = ath11k_wow_clear_hw_filter(ar);
813	if (ret) {
814		ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
815		goto exit;
816	}
817
818	ret = ath11k_wow_protocol_offload(ar, false);
819	if (ret) {
820		ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
821			    ret);
822		goto exit;
823	}
824
825	ret = ath11k_wow_set_keepalive(ar,
826				       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
827				       WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
828	if (ret) {
829		ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
830		goto exit;
831	}
832
833exit:
834	if (ret) {
835		switch (ar->state) {
836		case ATH11K_STATE_ON:
837			ar->state = ATH11K_STATE_RESTARTING;
838			ret = 1;
839			break;
840		case ATH11K_STATE_OFF:
841		case ATH11K_STATE_RESTARTING:
842		case ATH11K_STATE_RESTARTED:
843		case ATH11K_STATE_WEDGED:
844		case ATH11K_STATE_FTM:
845			ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
846				    ar->state);
847			ret = -EIO;
848			break;
849		}
850	}
851
852	mutex_unlock(&ar->conf_mutex);
853	return ret;
854}
855
856int ath11k_wow_init(struct ath11k *ar)
857{
858	if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
859		return 0;
860
861	ar->wow.wowlan_support = ath11k_wowlan_support;
862
863	if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
864	    ATH11K_HW_TXRX_NATIVE_WIFI) {
865		ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
866		ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
867	}
868
869	if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
870		ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
871		ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
872	}
873
874	ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
875	ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
876	ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
877
878	device_set_wakeup_capable(ar->ab->dev, true);
879
880	return 0;
881}