Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/******************************************************************************
3
4 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
5
6
7 Contact Information:
8 Intel Linux Wireless <ilw@linux.intel.com>
9 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
10
11******************************************************************************/
12#include <linux/compiler.h>
13#include <linux/errno.h>
14#include <linux/if_arp.h>
15#include <linux/in6.h>
16#include <linux/in.h>
17#include <linux/ip.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/netdevice.h>
21#include <linux/proc_fs.h>
22#include <linux/skbuff.h>
23#include <linux/slab.h>
24#include <linux/tcp.h>
25#include <linux/types.h>
26#include <linux/wireless.h>
27#include <linux/etherdevice.h>
28#include <linux/uaccess.h>
29
30#include "libipw.h"
31
32/*
33
34802.11 Data Frame
35
36 ,-------------------------------------------------------------------.
37Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
38 |------|------|---------|---------|---------|------|---------|------|
39Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
40 | | tion | (BSSID) | | | ence | data | |
41 `--------------------------------------------------| |------'
42Total: 28 non-data bytes `----.----'
43 |
44 .- 'Frame data' expands, if WEP enabled, to <----------'
45 |
46 V
47 ,-----------------------.
48Bytes | 4 | 0-2296 | 4 |
49 |-----|-----------|-----|
50Desc. | IV | Encrypted | ICV |
51 | | Packet | |
52 `-----| |-----'
53 `-----.-----'
54 |
55 .- 'Encrypted Packet' expands to
56 |
57 V
58 ,---------------------------------------------------.
59Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
60 |------|------|---------|----------|------|---------|
61Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
62 | DSAP | SSAP | | | | Packet |
63 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
64 `----------------------------------------------------
65Total: 8 non-data bytes
66
67802.3 Ethernet Data Frame
68
69 ,-----------------------------------------.
70Bytes | 6 | 6 | 2 | Variable | 4 |
71 |-------|-------|------|-----------|------|
72Desc. | Dest. | Source| Type | IP Packet | fcs |
73 | MAC | MAC | | | |
74 `-----------------------------------------'
75Total: 18 non-data bytes
76
77In the event that fragmentation is required, the incoming payload is split into
78N parts of size ieee->fts. The first fragment contains the SNAP header and the
79remaining packets are just data.
80
81If encryption is enabled, each fragment payload size is reduced by enough space
82to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
83So if you have 1500 bytes of payload with ieee->fts set to 500 without
84encryption it will take 3 frames. With WEP it will take 4 frames as the
85payload of each frame is reduced to 492 bytes.
86
87* SKB visualization
88*
89* ,- skb->data
90* |
91* | ETHERNET HEADER ,-<-- PAYLOAD
92* | | 14 bytes from skb->data
93* | 2 bytes for Type --> ,T. | (sizeof ethhdr)
94* | | | |
95* |,-Dest.--. ,--Src.---. | | |
96* | 6 bytes| | 6 bytes | | | |
97* v | | | | | |
98* 0 | v 1 | v | v 2
99* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
100* ^ | ^ | ^ |
101* | | | | | |
102* | | | | `T' <---- 2 bytes for Type
103* | | | |
104* | | '---SNAP--' <-------- 6 bytes for SNAP
105* | |
106* `-IV--' <-------------------- 4 bytes for IV (WEP)
107*
108* SNAP HEADER
109*
110*/
111
112static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
113static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
114
115static int libipw_copy_snap(u8 * data, __be16 h_proto)
116{
117 struct libipw_snap_hdr *snap;
118 u8 *oui;
119
120 snap = (struct libipw_snap_hdr *)data;
121 snap->dsap = 0xaa;
122 snap->ssap = 0xaa;
123 snap->ctrl = 0x03;
124
125 if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX))
126 oui = P802_1H_OUI;
127 else
128 oui = RFC1042_OUI;
129 snap->oui[0] = oui[0];
130 snap->oui[1] = oui[1];
131 snap->oui[2] = oui[2];
132
133 memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16));
134
135 return SNAP_SIZE + sizeof(u16);
136}
137
138static int libipw_encrypt_fragment(struct libipw_device *ieee,
139 struct sk_buff *frag, int hdr_len)
140{
141 struct lib80211_crypt_data *crypt =
142 ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
143 int res;
144
145 if (crypt == NULL)
146 return -1;
147
148 /* To encrypt, frame format is:
149 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
150 atomic_inc(&crypt->refcnt);
151 res = 0;
152 if (crypt->ops && crypt->ops->encrypt_mpdu)
153 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
154
155 atomic_dec(&crypt->refcnt);
156 if (res < 0) {
157 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
158 ieee->dev->name, frag->len);
159 ieee->ieee_stats.tx_discards++;
160 return -1;
161 }
162
163 return 0;
164}
165
166void libipw_txb_free(struct libipw_txb *txb)
167{
168 int i;
169 if (unlikely(!txb))
170 return;
171 for (i = 0; i < txb->nr_frags; i++)
172 if (txb->fragments[i])
173 dev_kfree_skb_any(txb->fragments[i]);
174 kfree(txb);
175}
176
177static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size,
178 int headroom, gfp_t gfp_mask)
179{
180 struct libipw_txb *txb;
181 int i;
182
183 txb = kmalloc(struct_size(txb, fragments, nr_frags), gfp_mask);
184 if (!txb)
185 return NULL;
186
187 memset(txb, 0, sizeof(struct libipw_txb));
188 txb->nr_frags = nr_frags;
189 txb->frag_size = txb_size;
190
191 for (i = 0; i < nr_frags; i++) {
192 txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
193 gfp_mask);
194 if (unlikely(!txb->fragments[i])) {
195 i--;
196 break;
197 }
198 skb_reserve(txb->fragments[i], headroom);
199 }
200 if (unlikely(i != nr_frags)) {
201 while (i >= 0)
202 dev_kfree_skb_any(txb->fragments[i--]);
203 kfree(txb);
204 return NULL;
205 }
206 return txb;
207}
208
209static int libipw_classify(struct sk_buff *skb)
210{
211 struct ethhdr *eth;
212 struct iphdr *ip;
213
214 eth = (struct ethhdr *)skb->data;
215 if (eth->h_proto != htons(ETH_P_IP))
216 return 0;
217
218 ip = ip_hdr(skb);
219 switch (ip->tos & 0xfc) {
220 case 0x20:
221 return 2;
222 case 0x40:
223 return 1;
224 case 0x60:
225 return 3;
226 case 0x80:
227 return 4;
228 case 0xa0:
229 return 5;
230 case 0xc0:
231 return 6;
232 case 0xe0:
233 return 7;
234 default:
235 return 0;
236 }
237}
238
239/* Incoming skb is converted to a txb which consists of
240 * a block of 802.11 fragment packets (stored as skbs) */
241netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
242{
243 struct libipw_device *ieee = netdev_priv(dev);
244 struct libipw_txb *txb = NULL;
245 struct libipw_hdr_3addrqos *frag_hdr;
246 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
247 rts_required;
248 unsigned long flags;
249 int encrypt, host_encrypt, host_encrypt_msdu;
250 __be16 ether_type;
251 int bytes, fc, hdr_len;
252 struct sk_buff *skb_frag;
253 struct libipw_hdr_3addrqos header = {/* Ensure zero initialized */
254 .duration_id = 0,
255 .seq_ctl = 0,
256 .qos_ctl = 0
257 };
258 u8 dest[ETH_ALEN], src[ETH_ALEN];
259 struct lib80211_crypt_data *crypt;
260 int priority = skb->priority;
261 int snapped = 0;
262
263 if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
264 return NETDEV_TX_BUSY;
265
266 spin_lock_irqsave(&ieee->lock, flags);
267
268 /* If there is no driver handler to take the TXB, dont' bother
269 * creating it... */
270 if (!ieee->hard_start_xmit) {
271 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
272 goto success;
273 }
274
275 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
276 printk(KERN_WARNING "%s: skb too small (%d).\n",
277 ieee->dev->name, skb->len);
278 goto success;
279 }
280
281 ether_type = ((struct ethhdr *)skb->data)->h_proto;
282
283 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
284
285 encrypt = !(ether_type == htons(ETH_P_PAE) && ieee->ieee802_1x) &&
286 ieee->sec.encrypt;
287
288 host_encrypt = ieee->host_encrypt && encrypt && crypt;
289 host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
290
291 if (!encrypt && ieee->ieee802_1x &&
292 ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) {
293 dev->stats.tx_dropped++;
294 goto success;
295 }
296
297 /* Save source and destination addresses */
298 skb_copy_from_linear_data(skb, dest, ETH_ALEN);
299 skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
300
301 if (host_encrypt)
302 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
303 IEEE80211_FCTL_PROTECTED;
304 else
305 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
306
307 if (ieee->iw_mode == IW_MODE_INFRA) {
308 fc |= IEEE80211_FCTL_TODS;
309 /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
310 memcpy(header.addr1, ieee->bssid, ETH_ALEN);
311 memcpy(header.addr2, src, ETH_ALEN);
312 memcpy(header.addr3, dest, ETH_ALEN);
313 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
314 /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
315 memcpy(header.addr1, dest, ETH_ALEN);
316 memcpy(header.addr2, src, ETH_ALEN);
317 memcpy(header.addr3, ieee->bssid, ETH_ALEN);
318 }
319 hdr_len = LIBIPW_3ADDR_LEN;
320
321 if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
322 fc |= IEEE80211_STYPE_QOS_DATA;
323 hdr_len += 2;
324
325 skb->priority = libipw_classify(skb);
326 header.qos_ctl |= cpu_to_le16(skb->priority & LIBIPW_QCTL_TID);
327 }
328 header.frame_ctl = cpu_to_le16(fc);
329
330 /* Advance the SKB to the start of the payload */
331 skb_pull(skb, sizeof(struct ethhdr));
332
333 /* Determine total amount of storage required for TXB packets */
334 bytes = skb->len + SNAP_SIZE + sizeof(u16);
335
336 /* Encrypt msdu first on the whole data packet. */
337 if ((host_encrypt || host_encrypt_msdu) &&
338 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
339 int res = 0;
340 int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
341 crypt->ops->extra_msdu_postfix_len;
342 struct sk_buff *skb_new = dev_alloc_skb(len);
343
344 if (unlikely(!skb_new))
345 goto failed;
346
347 skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
348 skb_put_data(skb_new, &header, hdr_len);
349 snapped = 1;
350 libipw_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
351 ether_type);
352 skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
353 res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
354 if (res < 0) {
355 LIBIPW_ERROR("msdu encryption failed\n");
356 dev_kfree_skb_any(skb_new);
357 goto failed;
358 }
359 dev_kfree_skb_any(skb);
360 skb = skb_new;
361 bytes += crypt->ops->extra_msdu_prefix_len +
362 crypt->ops->extra_msdu_postfix_len;
363 skb_pull(skb, hdr_len);
364 }
365
366 if (host_encrypt || ieee->host_open_frag) {
367 /* Determine fragmentation size based on destination (multicast
368 * and broadcast are not fragmented) */
369 if (is_multicast_ether_addr(dest) ||
370 is_broadcast_ether_addr(dest))
371 frag_size = MAX_FRAG_THRESHOLD;
372 else
373 frag_size = ieee->fts;
374
375 /* Determine amount of payload per fragment. Regardless of if
376 * this stack is providing the full 802.11 header, one will
377 * eventually be affixed to this fragment -- so we must account
378 * for it when determining the amount of payload space. */
379 bytes_per_frag = frag_size - hdr_len;
380 if (ieee->config &
381 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
382 bytes_per_frag -= LIBIPW_FCS_LEN;
383
384 /* Each fragment may need to have room for encryption
385 * pre/postfix */
386 if (host_encrypt && crypt && crypt->ops)
387 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
388 crypt->ops->extra_mpdu_postfix_len;
389
390 /* Number of fragments is the total
391 * bytes_per_frag / payload_per_fragment */
392 nr_frags = bytes / bytes_per_frag;
393 bytes_last_frag = bytes % bytes_per_frag;
394 if (bytes_last_frag)
395 nr_frags++;
396 else
397 bytes_last_frag = bytes_per_frag;
398 } else {
399 nr_frags = 1;
400 bytes_per_frag = bytes_last_frag = bytes;
401 frag_size = bytes + hdr_len;
402 }
403
404 rts_required = (frag_size > ieee->rts
405 && ieee->config & CFG_LIBIPW_RTS);
406 if (rts_required)
407 nr_frags++;
408
409 /* When we allocate the TXB we allocate enough space for the reserve
410 * and full fragment bytes (bytes_per_frag doesn't include prefix,
411 * postfix, header, FCS, etc.) */
412 txb = libipw_alloc_txb(nr_frags, frag_size,
413 ieee->tx_headroom, GFP_ATOMIC);
414 if (unlikely(!txb)) {
415 printk(KERN_WARNING "%s: Could not allocate TXB\n",
416 ieee->dev->name);
417 goto failed;
418 }
419 txb->encrypted = encrypt;
420 if (host_encrypt)
421 txb->payload_size = frag_size * (nr_frags - 1) +
422 bytes_last_frag;
423 else
424 txb->payload_size = bytes;
425
426 if (rts_required) {
427 skb_frag = txb->fragments[0];
428 frag_hdr = skb_put(skb_frag, hdr_len);
429
430 /*
431 * Set header frame_ctl to the RTS.
432 */
433 header.frame_ctl =
434 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
435 memcpy(frag_hdr, &header, hdr_len);
436
437 /*
438 * Restore header frame_ctl to the original data setting.
439 */
440 header.frame_ctl = cpu_to_le16(fc);
441
442 if (ieee->config &
443 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
444 skb_put(skb_frag, 4);
445
446 txb->rts_included = 1;
447 i = 1;
448 } else
449 i = 0;
450
451 for (; i < nr_frags; i++) {
452 skb_frag = txb->fragments[i];
453
454 if (host_encrypt)
455 skb_reserve(skb_frag,
456 crypt->ops->extra_mpdu_prefix_len);
457
458 frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
459
460 /* If this is not the last fragment, then add the MOREFRAGS
461 * bit to the frame control */
462 if (i != nr_frags - 1) {
463 frag_hdr->frame_ctl =
464 cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS);
465 bytes = bytes_per_frag;
466 } else {
467 /* The last fragment takes the remaining length */
468 bytes = bytes_last_frag;
469 }
470
471 if (i == 0 && !snapped) {
472 libipw_copy_snap(skb_put
473 (skb_frag, SNAP_SIZE + sizeof(u16)),
474 ether_type);
475 bytes -= SNAP_SIZE + sizeof(u16);
476 }
477
478 skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes);
479
480 /* Advance the SKB... */
481 skb_pull(skb, bytes);
482
483 /* Encryption routine will move the header forward in order
484 * to insert the IV between the header and the payload */
485 if (host_encrypt)
486 libipw_encrypt_fragment(ieee, skb_frag, hdr_len);
487
488 if (ieee->config &
489 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
490 skb_put(skb_frag, 4);
491 }
492
493 success:
494 spin_unlock_irqrestore(&ieee->lock, flags);
495
496 dev_kfree_skb_any(skb);
497
498 if (txb) {
499 netdev_tx_t ret = (*ieee->hard_start_xmit)(txb, dev, priority);
500 if (ret == NETDEV_TX_OK) {
501 dev->stats.tx_packets++;
502 dev->stats.tx_bytes += txb->payload_size;
503 return NETDEV_TX_OK;
504 }
505
506 libipw_txb_free(txb);
507 }
508
509 return NETDEV_TX_OK;
510
511 failed:
512 spin_unlock_irqrestore(&ieee->lock, flags);
513 netif_stop_queue(dev);
514 dev->stats.tx_errors++;
515 return NETDEV_TX_BUSY;
516}
517EXPORT_SYMBOL(libipw_xmit);
518
519EXPORT_SYMBOL(libipw_txb_free);
1// SPDX-License-Identifier: GPL-2.0-only
2/******************************************************************************
3
4 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
5
6
7 Contact Information:
8 Intel Linux Wireless <ilw@linux.intel.com>
9 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
10
11******************************************************************************/
12#include <linux/compiler.h>
13#include <linux/errno.h>
14#include <linux/if_arp.h>
15#include <linux/in6.h>
16#include <linux/in.h>
17#include <linux/ip.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/netdevice.h>
21#include <linux/proc_fs.h>
22#include <linux/skbuff.h>
23#include <linux/slab.h>
24#include <linux/tcp.h>
25#include <linux/types.h>
26#include <linux/wireless.h>
27#include <linux/etherdevice.h>
28#include <linux/uaccess.h>
29
30#include "libipw.h"
31
32/*
33
34802.11 Data Frame
35
36 ,-------------------------------------------------------------------.
37Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
38 |------|------|---------|---------|---------|------|---------|------|
39Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
40 | | tion | (BSSID) | | | ence | data | |
41 `--------------------------------------------------| |------'
42Total: 28 non-data bytes `----.----'
43 |
44 .- 'Frame data' expands, if WEP enabled, to <----------'
45 |
46 V
47 ,-----------------------.
48Bytes | 4 | 0-2296 | 4 |
49 |-----|-----------|-----|
50Desc. | IV | Encrypted | ICV |
51 | | Packet | |
52 `-----| |-----'
53 `-----.-----'
54 |
55 .- 'Encrypted Packet' expands to
56 |
57 V
58 ,---------------------------------------------------.
59Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
60 |------|------|---------|----------|------|---------|
61Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
62 | DSAP | SSAP | | | | Packet |
63 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
64 `----------------------------------------------------
65Total: 8 non-data bytes
66
67802.3 Ethernet Data Frame
68
69 ,-----------------------------------------.
70Bytes | 6 | 6 | 2 | Variable | 4 |
71 |-------|-------|------|-----------|------|
72Desc. | Dest. | Source| Type | IP Packet | fcs |
73 | MAC | MAC | | | |
74 `-----------------------------------------'
75Total: 18 non-data bytes
76
77In the event that fragmentation is required, the incoming payload is split into
78N parts of size ieee->fts. The first fragment contains the SNAP header and the
79remaining packets are just data.
80
81If encryption is enabled, each fragment payload size is reduced by enough space
82to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
83So if you have 1500 bytes of payload with ieee->fts set to 500 without
84encryption it will take 3 frames. With WEP it will take 4 frames as the
85payload of each frame is reduced to 492 bytes.
86
87* SKB visualization
88*
89* ,- skb->data
90* |
91* | ETHERNET HEADER ,-<-- PAYLOAD
92* | | 14 bytes from skb->data
93* | 2 bytes for Type --> ,T. | (sizeof ethhdr)
94* | | | |
95* |,-Dest.--. ,--Src.---. | | |
96* | 6 bytes| | 6 bytes | | | |
97* v | | | | | |
98* 0 | v 1 | v | v 2
99* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
100* ^ | ^ | ^ |
101* | | | | | |
102* | | | | `T' <---- 2 bytes for Type
103* | | | |
104* | | '---SNAP--' <-------- 6 bytes for SNAP
105* | |
106* `-IV--' <-------------------- 4 bytes for IV (WEP)
107*
108* SNAP HEADER
109*
110*/
111
112static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
113static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
114
115static int libipw_copy_snap(u8 * data, __be16 h_proto)
116{
117 struct libipw_snap_hdr *snap;
118 u8 *oui;
119
120 snap = (struct libipw_snap_hdr *)data;
121 snap->dsap = 0xaa;
122 snap->ssap = 0xaa;
123 snap->ctrl = 0x03;
124
125 if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX))
126 oui = P802_1H_OUI;
127 else
128 oui = RFC1042_OUI;
129 snap->oui[0] = oui[0];
130 snap->oui[1] = oui[1];
131 snap->oui[2] = oui[2];
132
133 memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16));
134
135 return SNAP_SIZE + sizeof(u16);
136}
137
138static int libipw_encrypt_fragment(struct libipw_device *ieee,
139 struct sk_buff *frag, int hdr_len)
140{
141 struct libipw_crypt_data *crypt =
142 ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
143 int res;
144
145 if (crypt == NULL)
146 return -1;
147
148 /* To encrypt, frame format is:
149 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
150 atomic_inc(&crypt->refcnt);
151 res = 0;
152 if (crypt->ops && crypt->ops->encrypt_mpdu)
153 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
154
155 atomic_dec(&crypt->refcnt);
156 if (res < 0) {
157 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
158 ieee->dev->name, frag->len);
159 ieee->ieee_stats.tx_discards++;
160 return -1;
161 }
162
163 return 0;
164}
165
166void libipw_txb_free(struct libipw_txb *txb)
167{
168 int i;
169 if (unlikely(!txb))
170 return;
171 for (i = 0; i < txb->nr_frags; i++)
172 if (txb->fragments[i])
173 dev_kfree_skb_any(txb->fragments[i]);
174 kfree(txb);
175}
176
177static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size,
178 int headroom, gfp_t gfp_mask)
179{
180 struct libipw_txb *txb;
181 int i;
182
183 txb = kzalloc(struct_size(txb, fragments, nr_frags), gfp_mask);
184 if (!txb)
185 return NULL;
186
187 txb->nr_frags = nr_frags;
188 txb->frag_size = txb_size;
189
190 for (i = 0; i < nr_frags; i++) {
191 txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
192 gfp_mask);
193 if (unlikely(!txb->fragments[i])) {
194 i--;
195 break;
196 }
197 skb_reserve(txb->fragments[i], headroom);
198 }
199 if (unlikely(i != nr_frags)) {
200 while (i >= 0)
201 dev_kfree_skb_any(txb->fragments[i--]);
202 kfree(txb);
203 return NULL;
204 }
205 return txb;
206}
207
208static int libipw_classify(struct sk_buff *skb)
209{
210 struct ethhdr *eth;
211 struct iphdr *ip;
212
213 eth = (struct ethhdr *)skb->data;
214 if (eth->h_proto != htons(ETH_P_IP))
215 return 0;
216
217 ip = ip_hdr(skb);
218 switch (ip->tos & 0xfc) {
219 case 0x20:
220 return 2;
221 case 0x40:
222 return 1;
223 case 0x60:
224 return 3;
225 case 0x80:
226 return 4;
227 case 0xa0:
228 return 5;
229 case 0xc0:
230 return 6;
231 case 0xe0:
232 return 7;
233 default:
234 return 0;
235 }
236}
237
238/* Incoming skb is converted to a txb which consists of
239 * a block of 802.11 fragment packets (stored as skbs) */
240netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
241{
242 struct libipw_device *ieee = netdev_priv(dev);
243 struct libipw_txb *txb = NULL;
244 struct libipw_hdr_3addrqos *frag_hdr;
245 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
246 rts_required;
247 unsigned long flags;
248 int encrypt, host_encrypt, host_encrypt_msdu;
249 __be16 ether_type;
250 int bytes, fc, hdr_len;
251 struct sk_buff *skb_frag;
252 struct libipw_hdr_3addrqos header = {/* Ensure zero initialized */
253 .duration_id = 0,
254 .seq_ctl = 0,
255 .qos_ctl = 0
256 };
257 u8 dest[ETH_ALEN], src[ETH_ALEN];
258 struct libipw_crypt_data *crypt;
259 int priority = skb->priority;
260 int snapped = 0;
261
262 if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
263 return NETDEV_TX_BUSY;
264
265 spin_lock_irqsave(&ieee->lock, flags);
266
267 /* If there is no driver handler to take the TXB, dont' bother
268 * creating it... */
269 if (!ieee->hard_start_xmit) {
270 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
271 goto success;
272 }
273
274 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
275 printk(KERN_WARNING "%s: skb too small (%d).\n",
276 ieee->dev->name, skb->len);
277 goto success;
278 }
279
280 ether_type = ((struct ethhdr *)skb->data)->h_proto;
281
282 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
283
284 encrypt = !(ether_type == htons(ETH_P_PAE) && ieee->ieee802_1x) &&
285 ieee->sec.encrypt;
286
287 host_encrypt = ieee->host_encrypt && encrypt && crypt;
288 host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
289
290 if (!encrypt && ieee->ieee802_1x &&
291 ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) {
292 dev->stats.tx_dropped++;
293 goto success;
294 }
295
296 /* Save source and destination addresses */
297 skb_copy_from_linear_data(skb, dest, ETH_ALEN);
298 skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
299
300 if (host_encrypt)
301 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
302 IEEE80211_FCTL_PROTECTED;
303 else
304 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
305
306 if (ieee->iw_mode == IW_MODE_INFRA) {
307 fc |= IEEE80211_FCTL_TODS;
308 /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
309 memcpy(header.addr1, ieee->bssid, ETH_ALEN);
310 memcpy(header.addr2, src, ETH_ALEN);
311 memcpy(header.addr3, dest, ETH_ALEN);
312 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
313 /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
314 memcpy(header.addr1, dest, ETH_ALEN);
315 memcpy(header.addr2, src, ETH_ALEN);
316 memcpy(header.addr3, ieee->bssid, ETH_ALEN);
317 }
318 hdr_len = LIBIPW_3ADDR_LEN;
319
320 if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
321 fc |= IEEE80211_STYPE_QOS_DATA;
322 hdr_len += 2;
323
324 skb->priority = libipw_classify(skb);
325 header.qos_ctl |= cpu_to_le16(skb->priority & LIBIPW_QCTL_TID);
326 }
327 header.frame_ctl = cpu_to_le16(fc);
328
329 /* Advance the SKB to the start of the payload */
330 skb_pull(skb, sizeof(struct ethhdr));
331
332 /* Determine total amount of storage required for TXB packets */
333 bytes = skb->len + SNAP_SIZE + sizeof(u16);
334
335 /* Encrypt msdu first on the whole data packet. */
336 if ((host_encrypt || host_encrypt_msdu) &&
337 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
338 int res = 0;
339 int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
340 crypt->ops->extra_msdu_postfix_len;
341 struct sk_buff *skb_new = dev_alloc_skb(len);
342
343 if (unlikely(!skb_new))
344 goto failed;
345
346 skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
347 skb_put_data(skb_new, &header, hdr_len);
348 snapped = 1;
349 libipw_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
350 ether_type);
351 skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
352 res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
353 if (res < 0) {
354 LIBIPW_ERROR("msdu encryption failed\n");
355 dev_kfree_skb_any(skb_new);
356 goto failed;
357 }
358 dev_kfree_skb_any(skb);
359 skb = skb_new;
360 bytes += crypt->ops->extra_msdu_prefix_len +
361 crypt->ops->extra_msdu_postfix_len;
362 skb_pull(skb, hdr_len);
363 }
364
365 if (host_encrypt || ieee->host_open_frag) {
366 /* Determine fragmentation size based on destination (multicast
367 * and broadcast are not fragmented) */
368 if (is_multicast_ether_addr(dest) ||
369 is_broadcast_ether_addr(dest))
370 frag_size = MAX_FRAG_THRESHOLD;
371 else
372 frag_size = ieee->fts;
373
374 /* Determine amount of payload per fragment. Regardless of if
375 * this stack is providing the full 802.11 header, one will
376 * eventually be affixed to this fragment -- so we must account
377 * for it when determining the amount of payload space. */
378 bytes_per_frag = frag_size - hdr_len;
379 if (ieee->config &
380 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
381 bytes_per_frag -= LIBIPW_FCS_LEN;
382
383 /* Each fragment may need to have room for encryption
384 * pre/postfix */
385 if (host_encrypt && crypt && crypt->ops)
386 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
387 crypt->ops->extra_mpdu_postfix_len;
388
389 /* Number of fragments is the total
390 * bytes_per_frag / payload_per_fragment */
391 nr_frags = bytes / bytes_per_frag;
392 bytes_last_frag = bytes % bytes_per_frag;
393 if (bytes_last_frag)
394 nr_frags++;
395 else
396 bytes_last_frag = bytes_per_frag;
397 } else {
398 nr_frags = 1;
399 bytes_per_frag = bytes_last_frag = bytes;
400 frag_size = bytes + hdr_len;
401 }
402
403 rts_required = (frag_size > ieee->rts
404 && ieee->config & CFG_LIBIPW_RTS);
405 if (rts_required)
406 nr_frags++;
407
408 /* When we allocate the TXB we allocate enough space for the reserve
409 * and full fragment bytes (bytes_per_frag doesn't include prefix,
410 * postfix, header, FCS, etc.) */
411 txb = libipw_alloc_txb(nr_frags, frag_size,
412 ieee->tx_headroom, GFP_ATOMIC);
413 if (unlikely(!txb)) {
414 printk(KERN_WARNING "%s: Could not allocate TXB\n",
415 ieee->dev->name);
416 goto failed;
417 }
418 txb->encrypted = encrypt;
419 if (host_encrypt)
420 txb->payload_size = frag_size * (nr_frags - 1) +
421 bytes_last_frag;
422 else
423 txb->payload_size = bytes;
424
425 if (rts_required) {
426 skb_frag = txb->fragments[0];
427 frag_hdr = skb_put(skb_frag, hdr_len);
428
429 /*
430 * Set header frame_ctl to the RTS.
431 */
432 header.frame_ctl =
433 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
434 memcpy(frag_hdr, &header, hdr_len);
435
436 /*
437 * Restore header frame_ctl to the original data setting.
438 */
439 header.frame_ctl = cpu_to_le16(fc);
440
441 if (ieee->config &
442 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
443 skb_put(skb_frag, 4);
444
445 txb->rts_included = 1;
446 i = 1;
447 } else
448 i = 0;
449
450 for (; i < nr_frags; i++) {
451 skb_frag = txb->fragments[i];
452
453 if (host_encrypt)
454 skb_reserve(skb_frag,
455 crypt->ops->extra_mpdu_prefix_len);
456
457 frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
458
459 /* If this is not the last fragment, then add the MOREFRAGS
460 * bit to the frame control */
461 if (i != nr_frags - 1) {
462 frag_hdr->frame_ctl =
463 cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS);
464 bytes = bytes_per_frag;
465 } else {
466 /* The last fragment takes the remaining length */
467 bytes = bytes_last_frag;
468 }
469
470 if (i == 0 && !snapped) {
471 libipw_copy_snap(skb_put
472 (skb_frag, SNAP_SIZE + sizeof(u16)),
473 ether_type);
474 bytes -= SNAP_SIZE + sizeof(u16);
475 }
476
477 skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes);
478
479 /* Advance the SKB... */
480 skb_pull(skb, bytes);
481
482 /* Encryption routine will move the header forward in order
483 * to insert the IV between the header and the payload */
484 if (host_encrypt)
485 libipw_encrypt_fragment(ieee, skb_frag, hdr_len);
486
487 if (ieee->config &
488 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
489 skb_put(skb_frag, 4);
490 }
491
492 success:
493 spin_unlock_irqrestore(&ieee->lock, flags);
494
495 dev_kfree_skb_any(skb);
496
497 if (txb) {
498 netdev_tx_t ret = (*ieee->hard_start_xmit)(txb, dev, priority);
499 if (ret == NETDEV_TX_OK) {
500 dev->stats.tx_packets++;
501 dev->stats.tx_bytes += txb->payload_size;
502 return NETDEV_TX_OK;
503 }
504
505 libipw_txb_free(txb);
506 }
507
508 return NETDEV_TX_OK;
509
510 failed:
511 spin_unlock_irqrestore(&ieee->lock, flags);
512 netif_stop_queue(dev);
513 dev->stats.tx_errors++;
514 return NETDEV_TX_BUSY;
515}
516EXPORT_SYMBOL(libipw_xmit);
517
518EXPORT_SYMBOL(libipw_txb_free);