Loading...
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2012-2014, 2021-2022 Intel Corporation
4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
5 * Copyright (C) 2015 Intel Deutschland GmbH
6 */
7#include <net/ipv6.h>
8#include <net/addrconf.h>
9#include <linux/bitops.h>
10#include "mvm.h"
11
12void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
13 struct iwl_wowlan_config_cmd *cmd)
14{
15 int i;
16
17 /*
18 * For QoS counters, we store the one to use next, so subtract 0x10
19 * since the uCode will add 0x10 *before* using the value while we
20 * increment after using the value (i.e. store the next value to use).
21 */
22 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
23 u16 seq = mvm_ap_sta->tid_data[i].seq_number;
24 seq -= 0x10;
25 cmd->qos_seq[i] = cpu_to_le16(seq);
26 }
27}
28
29int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
30 struct ieee80211_vif *vif,
31 bool disable_offloading,
32 bool offload_ns,
33 u32 cmd_flags)
34{
35 union {
36 struct iwl_proto_offload_cmd_v1 v1;
37 struct iwl_proto_offload_cmd_v2 v2;
38 struct iwl_proto_offload_cmd_v3_small v3s;
39 struct iwl_proto_offload_cmd_v4 v4;
40 } cmd = {};
41 struct iwl_host_cmd hcmd = {
42 .id = PROT_OFFLOAD_CONFIG_CMD,
43 .flags = cmd_flags,
44 .data[0] = &cmd,
45 .dataflags[0] = IWL_HCMD_DFL_DUP,
46 };
47 struct iwl_proto_offload_cmd_common *common;
48 u32 enabled = 0, size;
49 u32 capa_flags = mvm->fw->ucode_capa.flags;
50 int ver = iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 0);
51
52#if IS_ENABLED(CONFIG_IPV6)
53 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
54 int i;
55 /*
56 * Skip tentative address when ns offload is enabled to avoid
57 * violating RFC4862.
58 * Keep tentative address when ns offload is disabled so the NS packets
59 * will not be filtered out and will wake up the host.
60 */
61 bool skip_tentative = offload_ns;
62
63 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
64 capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
65 struct iwl_ns_config *nsc;
66 struct iwl_targ_addr *addrs;
67 int n_nsc, n_addrs;
68 int c;
69 int num_skipped = 0;
70
71 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
72 nsc = cmd.v3s.ns_config;
73 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
74 addrs = cmd.v3s.targ_addrs;
75 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
76 } else {
77 nsc = cmd.v4.ns_config;
78 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
79 addrs = cmd.v4.targ_addrs;
80 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
81 }
82
83 /*
84 * For each address we have (and that will fit) fill a target
85 * address struct and combine for NS offload structs with the
86 * solicited node addresses.
87 */
88 for (i = 0, c = 0;
89 i < mvmvif->num_target_ipv6_addrs &&
90 i < n_addrs && c < n_nsc; i++) {
91 struct in6_addr solicited_addr;
92 int j;
93
94 if (skip_tentative &&
95 test_bit(i, mvmvif->tentative_addrs)) {
96 num_skipped++;
97 continue;
98 }
99
100 addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
101 &solicited_addr);
102 for (j = 0; j < c; j++)
103 if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
104 &solicited_addr) == 0)
105 break;
106 if (j == c)
107 c++;
108 addrs[i].addr = mvmvif->target_ipv6_addrs[i];
109 addrs[i].config_num = cpu_to_le32(j);
110 nsc[j].dest_ipv6_addr = solicited_addr;
111 memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
112 }
113
114 if (mvmvif->num_target_ipv6_addrs - num_skipped)
115 enabled |= IWL_D3_PROTO_IPV6_VALID;
116
117 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
118 cmd.v3s.num_valid_ipv6_addrs =
119 cpu_to_le32(i - num_skipped);
120 else
121 cmd.v4.num_valid_ipv6_addrs =
122 cpu_to_le32(i - num_skipped);
123 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
124 bool found = false;
125
126 BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
127 sizeof(mvmvif->target_ipv6_addrs[0]));
128
129 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
130 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) {
131 if (skip_tentative &&
132 test_bit(i, mvmvif->tentative_addrs))
133 continue;
134
135 memcpy(cmd.v2.target_ipv6_addr[i],
136 &mvmvif->target_ipv6_addrs[i],
137 sizeof(cmd.v2.target_ipv6_addr[i]));
138
139 found = true;
140 }
141 if (found) {
142 enabled |= IWL_D3_PROTO_IPV6_VALID;
143 memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
144 }
145 } else {
146 bool found = false;
147 BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
148 sizeof(mvmvif->target_ipv6_addrs[0]));
149
150 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
151 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) {
152 if (skip_tentative &&
153 test_bit(i, mvmvif->tentative_addrs))
154 continue;
155
156 memcpy(cmd.v1.target_ipv6_addr[i],
157 &mvmvif->target_ipv6_addrs[i],
158 sizeof(cmd.v1.target_ipv6_addr[i]));
159
160 found = true;
161 }
162
163 if (found) {
164 enabled |= IWL_D3_PROTO_IPV6_VALID;
165 memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
166 }
167 }
168
169 if (offload_ns && (enabled & IWL_D3_PROTO_IPV6_VALID))
170 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
171#endif
172 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
173 common = &cmd.v3s.common;
174 size = sizeof(cmd.v3s);
175 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
176 common = &cmd.v4.common;
177 size = sizeof(cmd.v4);
178 if (ver < 4) {
179 /*
180 * This basically uses iwl_proto_offload_cmd_v3_large
181 * which doesn't have the sta_id parameter before the
182 * common part.
183 */
184 size -= sizeof(cmd.v4.sta_id);
185 hcmd.data[0] = common;
186 }
187 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
188 common = &cmd.v2.common;
189 size = sizeof(cmd.v2);
190 } else {
191 common = &cmd.v1.common;
192 size = sizeof(cmd.v1);
193 }
194
195 if (vif->cfg.arp_addr_cnt) {
196 enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID;
197 common->host_ipv4_addr = vif->cfg.arp_addr_list[0];
198 memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
199 }
200
201 if (fw_has_capa(&mvm->fw->ucode_capa,
202 IWL_UCODE_TLV_CAPA_OFFLOAD_BTM_SUPPORT))
203 enabled |= IWL_D3_PROTO_OFFLOAD_BTM;
204
205 if (!disable_offloading)
206 common->enabled = cpu_to_le32(enabled);
207
208 hcmd.len[0] = size;
209 return iwl_mvm_send_cmd(mvm, &hcmd);
210}
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2012-2014, 2021-2022, 2024 Intel Corporation
4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
5 * Copyright (C) 2015 Intel Deutschland GmbH
6 */
7#include <net/ipv6.h>
8#include <net/addrconf.h>
9#include <linux/bitops.h>
10#include "mvm.h"
11
12void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
13 struct iwl_wowlan_config_cmd_v6 *cmd)
14{
15 int i;
16
17 /*
18 * For QoS counters, we store the one to use next, so subtract 0x10
19 * since the uCode will add 0x10 *before* using the value while we
20 * increment after using the value (i.e. store the next value to use).
21 */
22 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
23 u16 seq = mvm_ap_sta->tid_data[i].seq_number;
24 seq -= 0x10;
25 cmd->qos_seq[i] = cpu_to_le16(seq);
26 }
27}
28
29int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
30 struct ieee80211_vif *vif,
31 bool disable_offloading,
32 bool offload_ns,
33 u32 cmd_flags,
34 u8 sta_id)
35{
36 union {
37 struct iwl_proto_offload_cmd_v1 v1;
38 struct iwl_proto_offload_cmd_v2 v2;
39 struct iwl_proto_offload_cmd_v3_small v3s;
40 struct iwl_proto_offload_cmd_v4 v4;
41 } cmd = {};
42 struct iwl_host_cmd hcmd = {
43 .id = PROT_OFFLOAD_CONFIG_CMD,
44 .flags = cmd_flags,
45 .data[0] = &cmd,
46 .dataflags[0] = IWL_HCMD_DFL_DUP,
47 };
48 struct iwl_proto_offload_cmd_common *common;
49 u32 enabled = 0, size;
50 u32 capa_flags = mvm->fw->ucode_capa.flags;
51 int ver = iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 0);
52
53#if IS_ENABLED(CONFIG_IPV6)
54 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
55 int i;
56 /*
57 * Skip tentative address when ns offload is enabled to avoid
58 * violating RFC4862.
59 * Keep tentative address when ns offload is disabled so the NS packets
60 * will not be filtered out and will wake up the host.
61 */
62 bool skip_tentative = offload_ns;
63
64 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
65 capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
66 struct iwl_ns_config *nsc;
67 struct iwl_targ_addr *addrs;
68 int n_nsc, n_addrs;
69 int c;
70 int num_skipped = 0;
71
72 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
73 nsc = cmd.v3s.ns_config;
74 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
75 addrs = cmd.v3s.targ_addrs;
76 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
77 } else {
78 nsc = cmd.v4.ns_config;
79 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
80 addrs = cmd.v4.targ_addrs;
81 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
82 }
83
84 /*
85 * For each address we have (and that will fit) fill a target
86 * address struct and combine for NS offload structs with the
87 * solicited node addresses.
88 */
89 for (i = 0, c = 0;
90 i < mvmvif->num_target_ipv6_addrs &&
91 i < n_addrs && c < n_nsc; i++) {
92 struct in6_addr solicited_addr;
93 int j;
94
95 if (skip_tentative &&
96 test_bit(i, mvmvif->tentative_addrs)) {
97 num_skipped++;
98 continue;
99 }
100
101 addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
102 &solicited_addr);
103 for (j = 0; j < c; j++)
104 if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
105 &solicited_addr) == 0)
106 break;
107 if (j == c)
108 c++;
109 addrs[i].addr = mvmvif->target_ipv6_addrs[i];
110 addrs[i].config_num = cpu_to_le32(j);
111 nsc[j].dest_ipv6_addr = solicited_addr;
112 memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
113 }
114
115 if (mvmvif->num_target_ipv6_addrs - num_skipped)
116 enabled |= IWL_D3_PROTO_IPV6_VALID;
117
118 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
119 cmd.v3s.num_valid_ipv6_addrs =
120 cpu_to_le32(i - num_skipped);
121 else
122 cmd.v4.num_valid_ipv6_addrs =
123 cpu_to_le32(i - num_skipped);
124 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
125 bool found = false;
126
127 BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
128 sizeof(mvmvif->target_ipv6_addrs[0]));
129
130 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
131 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) {
132 if (skip_tentative &&
133 test_bit(i, mvmvif->tentative_addrs))
134 continue;
135
136 memcpy(cmd.v2.target_ipv6_addr[i],
137 &mvmvif->target_ipv6_addrs[i],
138 sizeof(cmd.v2.target_ipv6_addr[i]));
139
140 found = true;
141 }
142 if (found) {
143 enabled |= IWL_D3_PROTO_IPV6_VALID;
144 memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
145 }
146 } else {
147 bool found = false;
148 BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
149 sizeof(mvmvif->target_ipv6_addrs[0]));
150
151 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
152 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) {
153 if (skip_tentative &&
154 test_bit(i, mvmvif->tentative_addrs))
155 continue;
156
157 memcpy(cmd.v1.target_ipv6_addr[i],
158 &mvmvif->target_ipv6_addrs[i],
159 sizeof(cmd.v1.target_ipv6_addr[i]));
160
161 found = true;
162 }
163
164 if (found) {
165 enabled |= IWL_D3_PROTO_IPV6_VALID;
166 memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
167 }
168 }
169
170 if (offload_ns && (enabled & IWL_D3_PROTO_IPV6_VALID))
171 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
172#endif
173 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
174 common = &cmd.v3s.common;
175 size = sizeof(cmd.v3s);
176 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
177 common = &cmd.v4.common;
178 size = sizeof(cmd.v4);
179 if (ver < 4) {
180 /*
181 * This basically uses iwl_proto_offload_cmd_v3_large
182 * which doesn't have the sta_id parameter before the
183 * common part.
184 */
185 size -= sizeof(cmd.v4.sta_id);
186 hcmd.data[0] = common;
187 }
188 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
189 common = &cmd.v2.common;
190 size = sizeof(cmd.v2);
191 } else {
192 common = &cmd.v1.common;
193 size = sizeof(cmd.v1);
194 }
195
196 if (vif->cfg.arp_addr_cnt) {
197 enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID;
198 common->host_ipv4_addr = vif->cfg.arp_addr_list[0];
199 memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
200 }
201
202 if (fw_has_capa(&mvm->fw->ucode_capa,
203 IWL_UCODE_TLV_CAPA_OFFLOAD_BTM_SUPPORT))
204 enabled |= IWL_D3_PROTO_OFFLOAD_BTM;
205
206 if (!disable_offloading)
207 common->enabled = cpu_to_le32(enabled);
208
209 if (ver >= 4)
210 cmd.v4.sta_id = cpu_to_le32(sta_id);
211
212 hcmd.len[0] = size;
213 return iwl_mvm_send_cmd(mvm, &hcmd);
214}