Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * Copyright 2002-2005, Instant802 Networks, Inc.
   3 * Copyright 2005-2006, Devicescape Software, Inc.
   4 * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
   5 * Copyright 2007-2010	Johannes Berg <johannes@sipsolutions.net>
   6 * Copyright 2013-2014  Intel Mobile Communications GmbH
   7 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#include <linux/jiffies.h>
  15#include <linux/slab.h>
  16#include <linux/kernel.h>
  17#include <linux/skbuff.h>
  18#include <linux/netdevice.h>
  19#include <linux/etherdevice.h>
  20#include <linux/rcupdate.h>
  21#include <linux/export.h>
  22#include <linux/bitops.h>
  23#include <net/mac80211.h>
  24#include <net/ieee80211_radiotap.h>
  25#include <asm/unaligned.h>
  26
  27#include "ieee80211_i.h"
  28#include "driver-ops.h"
  29#include "led.h"
  30#include "mesh.h"
  31#include "wep.h"
  32#include "wpa.h"
  33#include "tkip.h"
  34#include "wme.h"
  35#include "rate.h"
  36
  37static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
  38{
  39	struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
  40
  41	u64_stats_update_begin(&tstats->syncp);
  42	tstats->rx_packets++;
  43	tstats->rx_bytes += len;
  44	u64_stats_update_end(&tstats->syncp);
  45}
  46
  47static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
  48			       enum nl80211_iftype type)
  49{
  50	__le16 fc = hdr->frame_control;
  51
  52	if (ieee80211_is_data(fc)) {
  53		if (len < 24) /* drop incorrect hdr len (data) */
  54			return NULL;
  55
  56		if (ieee80211_has_a4(fc))
  57			return NULL;
  58		if (ieee80211_has_tods(fc))
  59			return hdr->addr1;
  60		if (ieee80211_has_fromds(fc))
  61			return hdr->addr2;
  62
  63		return hdr->addr3;
  64	}
  65
  66	if (ieee80211_is_mgmt(fc)) {
  67		if (len < 24) /* drop incorrect hdr len (mgmt) */
  68			return NULL;
  69		return hdr->addr3;
  70	}
  71
  72	if (ieee80211_is_ctl(fc)) {
  73		if (ieee80211_is_pspoll(fc))
  74			return hdr->addr1;
  75
  76		if (ieee80211_is_back_req(fc)) {
  77			switch (type) {
  78			case NL80211_IFTYPE_STATION:
  79				return hdr->addr2;
  80			case NL80211_IFTYPE_AP:
  81			case NL80211_IFTYPE_AP_VLAN:
  82				return hdr->addr1;
  83			default:
  84				break; /* fall through to the return */
  85			}
  86		}
  87	}
  88
  89	return NULL;
  90}
  91
  92/*
  93 * monitor mode reception
  94 *
  95 * This function cleans up the SKB, i.e. it removes all the stuff
  96 * only useful for monitoring.
  97 */
  98static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
  99					   struct sk_buff *skb,
 100					   unsigned int rtap_vendor_space)
 101{
 102	if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
 103		if (likely(skb->len > FCS_LEN))
 104			__pskb_trim(skb, skb->len - FCS_LEN);
 105		else {
 106			/* driver bug */
 107			WARN_ON(1);
 108			dev_kfree_skb(skb);
 109			return NULL;
 110		}
 111	}
 112
 113	__pskb_pull(skb, rtap_vendor_space);
 114
 115	return skb;
 116}
 117
 118static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
 119				     unsigned int rtap_vendor_space)
 120{
 121	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 122	struct ieee80211_hdr *hdr;
 123
 124	hdr = (void *)(skb->data + rtap_vendor_space);
 125
 126	if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
 127			    RX_FLAG_FAILED_PLCP_CRC |
 128			    RX_FLAG_ONLY_MONITOR))
 129		return true;
 130
 131	if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
 132		return true;
 133
 134	if (ieee80211_is_ctl(hdr->frame_control) &&
 135	    !ieee80211_is_pspoll(hdr->frame_control) &&
 136	    !ieee80211_is_back_req(hdr->frame_control))
 137		return true;
 138
 139	return false;
 140}
 141
 142static int
 143ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
 144			     struct ieee80211_rx_status *status,
 145			     struct sk_buff *skb)
 146{
 147	int len;
 148
 149	/* always present fields */
 150	len = sizeof(struct ieee80211_radiotap_header) + 8;
 151
 152	/* allocate extra bitmaps */
 153	if (status->chains)
 154		len += 4 * hweight8(status->chains);
 155
 156	if (ieee80211_have_rx_timestamp(status)) {
 157		len = ALIGN(len, 8);
 158		len += 8;
 159	}
 160	if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
 161		len += 1;
 162
 163	/* antenna field, if we don't have per-chain info */
 164	if (!status->chains)
 165		len += 1;
 166
 167	/* padding for RX_FLAGS if necessary */
 168	len = ALIGN(len, 2);
 169
 170	if (status->flag & RX_FLAG_HT) /* HT info */
 171		len += 3;
 172
 173	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
 174		len = ALIGN(len, 4);
 175		len += 8;
 176	}
 177
 178	if (status->flag & RX_FLAG_VHT) {
 179		len = ALIGN(len, 2);
 180		len += 12;
 181	}
 182
 183	if (local->hw.radiotap_timestamp.units_pos >= 0) {
 184		len = ALIGN(len, 8);
 185		len += 12;
 186	}
 187
 188	if (status->chains) {
 189		/* antenna and antenna signal fields */
 190		len += 2 * hweight8(status->chains);
 191	}
 192
 193	if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
 194		struct ieee80211_vendor_radiotap *rtap = (void *)skb->data;
 195
 196		/* vendor presence bitmap */
 197		len += 4;
 198		/* alignment for fixed 6-byte vendor data header */
 199		len = ALIGN(len, 2);
 200		/* vendor data header */
 201		len += 6;
 202		if (WARN_ON(rtap->align == 0))
 203			rtap->align = 1;
 204		len = ALIGN(len, rtap->align);
 205		len += rtap->len + rtap->pad;
 206	}
 207
 208	return len;
 209}
 210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 211/*
 212 * ieee80211_add_rx_radiotap_header - add radiotap header
 213 *
 214 * add a radiotap header containing all the fields which the hardware provided.
 215 */
 216static void
 217ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
 218				 struct sk_buff *skb,
 219				 struct ieee80211_rate *rate,
 220				 int rtap_len, bool has_fcs)
 221{
 222	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 223	struct ieee80211_radiotap_header *rthdr;
 224	unsigned char *pos;
 225	__le32 *it_present;
 226	u32 it_present_val;
 227	u16 rx_flags = 0;
 228	u16 channel_flags = 0;
 229	int mpdulen, chain;
 230	unsigned long chains = status->chains;
 231	struct ieee80211_vendor_radiotap rtap = {};
 232
 233	if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
 234		rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
 235		/* rtap.len and rtap.pad are undone immediately */
 236		skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad);
 237	}
 238
 239	mpdulen = skb->len;
 240	if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
 241		mpdulen += FCS_LEN;
 242
 243	rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
 244	memset(rthdr, 0, rtap_len - rtap.len - rtap.pad);
 245	it_present = &rthdr->it_present;
 246
 247	/* radiotap header, set always present flags */
 248	rthdr->it_len = cpu_to_le16(rtap_len);
 249	it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
 250			 BIT(IEEE80211_RADIOTAP_CHANNEL) |
 251			 BIT(IEEE80211_RADIOTAP_RX_FLAGS);
 252
 253	if (!status->chains)
 254		it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
 255
 256	for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
 257		it_present_val |=
 258			BIT(IEEE80211_RADIOTAP_EXT) |
 259			BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
 260		put_unaligned_le32(it_present_val, it_present);
 261		it_present++;
 262		it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
 263				 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
 264	}
 265
 266	if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
 267		it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
 268				  BIT(IEEE80211_RADIOTAP_EXT);
 269		put_unaligned_le32(it_present_val, it_present);
 270		it_present++;
 271		it_present_val = rtap.present;
 272	}
 273
 274	put_unaligned_le32(it_present_val, it_present);
 275
 276	pos = (void *)(it_present + 1);
 277
 278	/* the order of the following fields is important */
 279
 280	/* IEEE80211_RADIOTAP_TSFT */
 281	if (ieee80211_have_rx_timestamp(status)) {
 282		/* padding */
 283		while ((pos - (u8 *)rthdr) & 7)
 284			*pos++ = 0;
 285		put_unaligned_le64(
 286			ieee80211_calculate_rx_timestamp(local, status,
 287							 mpdulen, 0),
 288			pos);
 289		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
 290		pos += 8;
 291	}
 292
 293	/* IEEE80211_RADIOTAP_FLAGS */
 294	if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
 295		*pos |= IEEE80211_RADIOTAP_F_FCS;
 296	if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
 297		*pos |= IEEE80211_RADIOTAP_F_BADFCS;
 298	if (status->flag & RX_FLAG_SHORTPRE)
 299		*pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
 300	pos++;
 301
 302	/* IEEE80211_RADIOTAP_RATE */
 303	if (!rate || status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) {
 304		/*
 305		 * Without rate information don't add it. If we have,
 306		 * MCS information is a separate field in radiotap,
 307		 * added below. The byte here is needed as padding
 308		 * for the channel though, so initialise it to 0.
 309		 */
 310		*pos = 0;
 311	} else {
 312		int shift = 0;
 313		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
 314		if (status->flag & RX_FLAG_10MHZ)
 315			shift = 1;
 316		else if (status->flag & RX_FLAG_5MHZ)
 317			shift = 2;
 318		*pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
 319	}
 320	pos++;
 321
 322	/* IEEE80211_RADIOTAP_CHANNEL */
 323	put_unaligned_le16(status->freq, pos);
 324	pos += 2;
 325	if (status->flag & RX_FLAG_10MHZ)
 326		channel_flags |= IEEE80211_CHAN_HALF;
 327	else if (status->flag & RX_FLAG_5MHZ)
 328		channel_flags |= IEEE80211_CHAN_QUARTER;
 329
 330	if (status->band == NL80211_BAND_5GHZ)
 331		channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
 332	else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
 333		channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
 334	else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
 335		channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
 336	else if (rate)
 337		channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
 338	else
 339		channel_flags |= IEEE80211_CHAN_2GHZ;
 340	put_unaligned_le16(channel_flags, pos);
 341	pos += 2;
 342
 343	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
 344	if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
 345	    !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
 346		*pos = status->signal;
 347		rthdr->it_present |=
 348			cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
 349		pos++;
 350	}
 351
 352	/* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
 353
 354	if (!status->chains) {
 355		/* IEEE80211_RADIOTAP_ANTENNA */
 356		*pos = status->antenna;
 357		pos++;
 358	}
 359
 360	/* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
 361
 362	/* IEEE80211_RADIOTAP_RX_FLAGS */
 363	/* ensure 2 byte alignment for the 2 byte field as required */
 364	if ((pos - (u8 *)rthdr) & 1)
 365		*pos++ = 0;
 366	if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
 367		rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
 368	put_unaligned_le16(rx_flags, pos);
 369	pos += 2;
 370
 371	if (status->flag & RX_FLAG_HT) {
 372		unsigned int stbc;
 373
 374		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
 375		*pos++ = local->hw.radiotap_mcs_details;
 376		*pos = 0;
 377		if (status->flag & RX_FLAG_SHORT_GI)
 378			*pos |= IEEE80211_RADIOTAP_MCS_SGI;
 379		if (status->flag & RX_FLAG_40MHZ)
 380			*pos |= IEEE80211_RADIOTAP_MCS_BW_40;
 381		if (status->flag & RX_FLAG_HT_GF)
 382			*pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
 383		if (status->flag & RX_FLAG_LDPC)
 384			*pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
 385		stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT;
 386		*pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
 387		pos++;
 388		*pos++ = status->rate_idx;
 389	}
 390
 391	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
 392		u16 flags = 0;
 393
 394		/* ensure 4 byte alignment */
 395		while ((pos - (u8 *)rthdr) & 3)
 396			pos++;
 397		rthdr->it_present |=
 398			cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
 399		put_unaligned_le32(status->ampdu_reference, pos);
 400		pos += 4;
 401		if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
 402			flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
 403		if (status->flag & RX_FLAG_AMPDU_IS_LAST)
 404			flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
 405		if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
 406			flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
 407		if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
 408			flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
 
 
 
 
 409		put_unaligned_le16(flags, pos);
 410		pos += 2;
 411		if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
 412			*pos++ = status->ampdu_delimiter_crc;
 413		else
 414			*pos++ = 0;
 415		*pos++ = 0;
 416	}
 417
 418	if (status->flag & RX_FLAG_VHT) {
 419		u16 known = local->hw.radiotap_vht_details;
 420
 421		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
 422		put_unaligned_le16(known, pos);
 423		pos += 2;
 424		/* flags */
 425		if (status->flag & RX_FLAG_SHORT_GI)
 426			*pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
 427		/* in VHT, STBC is binary */
 428		if (status->flag & RX_FLAG_STBC_MASK)
 429			*pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
 430		if (status->vht_flag & RX_VHT_FLAG_BF)
 431			*pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
 432		pos++;
 433		/* bandwidth */
 434		if (status->vht_flag & RX_VHT_FLAG_80MHZ)
 
 435			*pos++ = 4;
 436		else if (status->vht_flag & RX_VHT_FLAG_160MHZ)
 
 437			*pos++ = 11;
 438		else if (status->flag & RX_FLAG_40MHZ)
 
 439			*pos++ = 1;
 440		else /* 20 MHz */
 
 441			*pos++ = 0;
 
 442		/* MCS/NSS */
 443		*pos = (status->rate_idx << 4) | status->vht_nss;
 444		pos += 4;
 445		/* coding field */
 446		if (status->flag & RX_FLAG_LDPC)
 447			*pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
 448		pos++;
 449		/* group ID */
 450		pos++;
 451		/* partial_aid */
 452		pos += 2;
 453	}
 454
 455	if (local->hw.radiotap_timestamp.units_pos >= 0) {
 456		u16 accuracy = 0;
 457		u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
 458
 459		rthdr->it_present |=
 460			cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP);
 461
 462		/* ensure 8 byte alignment */
 463		while ((pos - (u8 *)rthdr) & 7)
 464			pos++;
 465
 466		put_unaligned_le64(status->device_timestamp, pos);
 467		pos += sizeof(u64);
 468
 469		if (local->hw.radiotap_timestamp.accuracy >= 0) {
 470			accuracy = local->hw.radiotap_timestamp.accuracy;
 471			flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
 472		}
 473		put_unaligned_le16(accuracy, pos);
 474		pos += sizeof(u16);
 475
 476		*pos++ = local->hw.radiotap_timestamp.units_pos;
 477		*pos++ = flags;
 478	}
 479
 480	for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
 481		*pos++ = status->chain_signal[chain];
 482		*pos++ = chain;
 483	}
 484
 485	if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
 486		/* ensure 2 byte alignment for the vendor field as required */
 487		if ((pos - (u8 *)rthdr) & 1)
 488			*pos++ = 0;
 489		*pos++ = rtap.oui[0];
 490		*pos++ = rtap.oui[1];
 491		*pos++ = rtap.oui[2];
 492		*pos++ = rtap.subns;
 493		put_unaligned_le16(rtap.len, pos);
 494		pos += 2;
 495		/* align the actual payload as requested */
 496		while ((pos - (u8 *)rthdr) & (rtap.align - 1))
 497			*pos++ = 0;
 498		/* data (and possible padding) already follows */
 499	}
 500}
 501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 502/*
 503 * This function copies a received frame to all monitor interfaces and
 504 * returns a cleaned-up SKB that no longer includes the FCS nor the
 505 * radiotap header the driver might have added.
 506 */
 507static struct sk_buff *
 508ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
 509		     struct ieee80211_rate *rate)
 510{
 511	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
 512	struct ieee80211_sub_if_data *sdata;
 513	int rt_hdrlen, needed_headroom;
 514	struct sk_buff *skb, *skb2;
 515	struct net_device *prev_dev = NULL;
 516	int present_fcs_len = 0;
 517	unsigned int rtap_vendor_space = 0;
 518	struct ieee80211_mgmt *mgmt;
 519	struct ieee80211_sub_if_data *monitor_sdata =
 520		rcu_dereference(local->monitor_sdata);
 
 521
 522	if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
 523		struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data;
 524
 525		rtap_vendor_space = sizeof(*rtap) + rtap->len + rtap->pad;
 526	}
 527
 528	/*
 529	 * First, we may need to make a copy of the skb because
 530	 *  (1) we need to modify it for radiotap (if not present), and
 531	 *  (2) the other RX handlers will modify the skb we got.
 532	 *
 533	 * We don't need to, of course, if we aren't going to return
 534	 * the SKB because it has a bad FCS/PLCP checksum.
 535	 */
 536
 537	if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
 
 
 
 
 
 
 538		present_fcs_len = FCS_LEN;
 
 539
 540	/* ensure hdr->frame_control and vendor radiotap data are in skb head */
 541	if (!pskb_may_pull(origskb, 2 + rtap_vendor_space)) {
 542		dev_kfree_skb(origskb);
 543		return NULL;
 544	}
 545
 
 
 
 546	if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
 547		if (should_drop_frame(origskb, present_fcs_len,
 548				      rtap_vendor_space)) {
 549			dev_kfree_skb(origskb);
 550			return NULL;
 551		}
 552
 553		return remove_monitor_info(local, origskb, rtap_vendor_space);
 554	}
 555
 556	/* room for the radiotap header based on driver features */
 557	rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb);
 558	needed_headroom = rt_hdrlen - rtap_vendor_space;
 559
 560	if (should_drop_frame(origskb, present_fcs_len, rtap_vendor_space)) {
 561		/* only need to expand headroom if necessary */
 562		skb = origskb;
 563		origskb = NULL;
 564
 565		/*
 566		 * This shouldn't trigger often because most devices have an
 567		 * RX header they pull before we get here, and that should
 568		 * be big enough for our radiotap information. We should
 569		 * probably export the length to drivers so that we can have
 570		 * them allocate enough headroom to start with.
 571		 */
 572		if (skb_headroom(skb) < needed_headroom &&
 573		    pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
 574			dev_kfree_skb(skb);
 575			return NULL;
 576		}
 577	} else {
 578		/*
 579		 * Need to make a copy and possibly remove radiotap header
 580		 * and FCS from the original.
 581		 */
 582		skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
 583
 584		origskb = remove_monitor_info(local, origskb,
 585					      rtap_vendor_space);
 586
 587		if (!skb)
 588			return origskb;
 589	}
 590
 591	/* prepend radiotap information */
 592	ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
 593
 594	skb_reset_mac_header(skb);
 595	skb->ip_summed = CHECKSUM_UNNECESSARY;
 596	skb->pkt_type = PACKET_OTHERHOST;
 597	skb->protocol = htons(ETH_P_802_2);
 598
 599	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
 600		if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
 601			continue;
 602
 603		if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
 604			continue;
 605
 606		if (!ieee80211_sdata_running(sdata))
 607			continue;
 608
 609		if (prev_dev) {
 610			skb2 = skb_clone(skb, GFP_ATOMIC);
 611			if (skb2) {
 612				skb2->dev = prev_dev;
 613				netif_receive_skb(skb2);
 614			}
 615		}
 616
 617		prev_dev = sdata->dev;
 618		ieee80211_rx_stats(sdata->dev, skb->len);
 619	}
 620
 621	mgmt = (void *)skb->data;
 622	if (monitor_sdata &&
 623	    skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN &&
 624	    ieee80211_is_action(mgmt->frame_control) &&
 625	    mgmt->u.action.category == WLAN_CATEGORY_VHT &&
 626	    mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT &&
 627	    is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) &&
 628	    ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) {
 629		struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC);
 630
 631		if (mu_skb) {
 632			mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
 633			skb_queue_tail(&monitor_sdata->skb_queue, mu_skb);
 634			ieee80211_queue_work(&local->hw, &monitor_sdata->work);
 635		}
 636	}
 637
 638	if (prev_dev) {
 639		skb->dev = prev_dev;
 640		netif_receive_skb(skb);
 641	} else
 642		dev_kfree_skb(skb);
 643
 
 644	return origskb;
 645}
 646
 647static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
 648{
 649	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
 650	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
 651	int tid, seqno_idx, security_idx;
 652
 653	/* does the frame have a qos control field? */
 654	if (ieee80211_is_data_qos(hdr->frame_control)) {
 655		u8 *qc = ieee80211_get_qos_ctl(hdr);
 656		/* frame has qos control */
 657		tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
 658		if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
 659			status->rx_flags |= IEEE80211_RX_AMSDU;
 660
 661		seqno_idx = tid;
 662		security_idx = tid;
 663	} else {
 664		/*
 665		 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
 666		 *
 667		 *	Sequence numbers for management frames, QoS data
 668		 *	frames with a broadcast/multicast address in the
 669		 *	Address 1 field, and all non-QoS data frames sent
 670		 *	by QoS STAs are assigned using an additional single
 671		 *	modulo-4096 counter, [...]
 672		 *
 673		 * We also use that counter for non-QoS STAs.
 674		 */
 675		seqno_idx = IEEE80211_NUM_TIDS;
 676		security_idx = 0;
 677		if (ieee80211_is_mgmt(hdr->frame_control))
 678			security_idx = IEEE80211_NUM_TIDS;
 679		tid = 0;
 680	}
 681
 682	rx->seqno_idx = seqno_idx;
 683	rx->security_idx = security_idx;
 684	/* Set skb->priority to 1d tag if highest order bit of TID is not set.
 685	 * For now, set skb->priority to 0 for other cases. */
 686	rx->skb->priority = (tid > 7) ? 0 : tid;
 687}
 688
 689/**
 690 * DOC: Packet alignment
 691 *
 692 * Drivers always need to pass packets that are aligned to two-byte boundaries
 693 * to the stack.
 694 *
 695 * Additionally, should, if possible, align the payload data in a way that
 696 * guarantees that the contained IP header is aligned to a four-byte
 697 * boundary. In the case of regular frames, this simply means aligning the
 698 * payload to a four-byte boundary (because either the IP header is directly
 699 * contained, or IV/RFC1042 headers that have a length divisible by four are
 700 * in front of it).  If the payload data is not properly aligned and the
 701 * architecture doesn't support efficient unaligned operations, mac80211
 702 * will align the data.
 703 *
 704 * With A-MSDU frames, however, the payload data address must yield two modulo
 705 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
 706 * push the IP header further back to a multiple of four again. Thankfully, the
 707 * specs were sane enough this time around to require padding each A-MSDU
 708 * subframe to a length that is a multiple of four.
 709 *
 710 * Padding like Atheros hardware adds which is between the 802.11 header and
 711 * the payload is not supported, the driver is required to move the 802.11
 712 * header to be directly in front of the payload in that case.
 713 */
 714static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
 715{
 716#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 717	WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
 718#endif
 719}
 720
 721
 722/* rx handlers */
 723
 724static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
 725{
 726	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 727
 728	if (is_multicast_ether_addr(hdr->addr1))
 729		return 0;
 730
 731	return ieee80211_is_robust_mgmt_frame(skb);
 732}
 733
 734
 735static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
 736{
 737	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 738
 739	if (!is_multicast_ether_addr(hdr->addr1))
 740		return 0;
 741
 742	return ieee80211_is_robust_mgmt_frame(skb);
 743}
 744
 745
 746/* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
 747static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
 748{
 749	struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
 750	struct ieee80211_mmie *mmie;
 751	struct ieee80211_mmie_16 *mmie16;
 752
 753	if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
 754		return -1;
 755
 756	if (!ieee80211_is_robust_mgmt_frame(skb))
 757		return -1; /* not a robust management frame */
 758
 759	mmie = (struct ieee80211_mmie *)
 760		(skb->data + skb->len - sizeof(*mmie));
 761	if (mmie->element_id == WLAN_EID_MMIE &&
 762	    mmie->length == sizeof(*mmie) - 2)
 763		return le16_to_cpu(mmie->key_id);
 764
 765	mmie16 = (struct ieee80211_mmie_16 *)
 766		(skb->data + skb->len - sizeof(*mmie16));
 767	if (skb->len >= 24 + sizeof(*mmie16) &&
 768	    mmie16->element_id == WLAN_EID_MMIE &&
 769	    mmie16->length == sizeof(*mmie16) - 2)
 770		return le16_to_cpu(mmie16->key_id);
 771
 772	return -1;
 773}
 774
 775static int ieee80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs,
 776				  struct sk_buff *skb)
 777{
 778	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 779	__le16 fc;
 780	int hdrlen;
 781	u8 keyid;
 782
 783	fc = hdr->frame_control;
 784	hdrlen = ieee80211_hdrlen(fc);
 785
 786	if (skb->len < hdrlen + cs->hdr_len)
 787		return -EINVAL;
 788
 789	skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1);
 790	keyid &= cs->key_idx_mask;
 791	keyid >>= cs->key_idx_shift;
 792
 793	return keyid;
 794}
 795
 796static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
 797{
 798	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
 799	char *dev_addr = rx->sdata->vif.addr;
 800
 801	if (ieee80211_is_data(hdr->frame_control)) {
 802		if (is_multicast_ether_addr(hdr->addr1)) {
 803			if (ieee80211_has_tods(hdr->frame_control) ||
 804			    !ieee80211_has_fromds(hdr->frame_control))
 805				return RX_DROP_MONITOR;
 806			if (ether_addr_equal(hdr->addr3, dev_addr))
 807				return RX_DROP_MONITOR;
 808		} else {
 809			if (!ieee80211_has_a4(hdr->frame_control))
 810				return RX_DROP_MONITOR;
 811			if (ether_addr_equal(hdr->addr4, dev_addr))
 812				return RX_DROP_MONITOR;
 813		}
 814	}
 815
 816	/* If there is not an established peer link and this is not a peer link
 817	 * establisment frame, beacon or probe, drop the frame.
 818	 */
 819
 820	if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
 821		struct ieee80211_mgmt *mgmt;
 822
 823		if (!ieee80211_is_mgmt(hdr->frame_control))
 824			return RX_DROP_MONITOR;
 825
 826		if (ieee80211_is_action(hdr->frame_control)) {
 827			u8 category;
 828
 829			/* make sure category field is present */
 830			if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
 831				return RX_DROP_MONITOR;
 832
 833			mgmt = (struct ieee80211_mgmt *)hdr;
 834			category = mgmt->u.action.category;
 835			if (category != WLAN_CATEGORY_MESH_ACTION &&
 836			    category != WLAN_CATEGORY_SELF_PROTECTED)
 837				return RX_DROP_MONITOR;
 838			return RX_CONTINUE;
 839		}
 840
 841		if (ieee80211_is_probe_req(hdr->frame_control) ||
 842		    ieee80211_is_probe_resp(hdr->frame_control) ||
 843		    ieee80211_is_beacon(hdr->frame_control) ||
 844		    ieee80211_is_auth(hdr->frame_control))
 845			return RX_CONTINUE;
 846
 847		return RX_DROP_MONITOR;
 848	}
 849
 850	return RX_CONTINUE;
 851}
 852
 853static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
 854					      int index)
 855{
 856	struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
 857	struct sk_buff *tail = skb_peek_tail(frames);
 858	struct ieee80211_rx_status *status;
 859
 860	if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
 861		return true;
 862
 863	if (!tail)
 864		return false;
 865
 866	status = IEEE80211_SKB_RXCB(tail);
 867	if (status->flag & RX_FLAG_AMSDU_MORE)
 868		return false;
 869
 870	return true;
 871}
 872
 873static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
 874					    struct tid_ampdu_rx *tid_agg_rx,
 875					    int index,
 876					    struct sk_buff_head *frames)
 877{
 878	struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
 879	struct sk_buff *skb;
 880	struct ieee80211_rx_status *status;
 881
 882	lockdep_assert_held(&tid_agg_rx->reorder_lock);
 883
 884	if (skb_queue_empty(skb_list))
 885		goto no_frame;
 886
 887	if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
 888		__skb_queue_purge(skb_list);
 889		goto no_frame;
 890	}
 891
 892	/* release frames from the reorder ring buffer */
 893	tid_agg_rx->stored_mpdu_num--;
 894	while ((skb = __skb_dequeue(skb_list))) {
 895		status = IEEE80211_SKB_RXCB(skb);
 896		status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
 897		__skb_queue_tail(frames, skb);
 898	}
 899
 900no_frame:
 901	tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
 902	tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
 903}
 904
 905static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
 906					     struct tid_ampdu_rx *tid_agg_rx,
 907					     u16 head_seq_num,
 908					     struct sk_buff_head *frames)
 909{
 910	int index;
 911
 912	lockdep_assert_held(&tid_agg_rx->reorder_lock);
 913
 914	while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
 915		index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
 916		ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
 917						frames);
 918	}
 919}
 920
 921/*
 922 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
 923 * the skb was added to the buffer longer than this time ago, the earlier
 924 * frames that have not yet been received are assumed to be lost and the skb
 925 * can be released for processing. This may also release other skb's from the
 926 * reorder buffer if there are no additional gaps between the frames.
 927 *
 928 * Callers must hold tid_agg_rx->reorder_lock.
 929 */
 930#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
 931
 932static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
 933					  struct tid_ampdu_rx *tid_agg_rx,
 934					  struct sk_buff_head *frames)
 935{
 936	int index, i, j;
 937
 938	lockdep_assert_held(&tid_agg_rx->reorder_lock);
 939
 940	/* release the buffer until next missing frame */
 941	index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
 942	if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
 943	    tid_agg_rx->stored_mpdu_num) {
 944		/*
 945		 * No buffers ready to be released, but check whether any
 946		 * frames in the reorder buffer have timed out.
 947		 */
 948		int skipped = 1;
 949		for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
 950		     j = (j + 1) % tid_agg_rx->buf_size) {
 951			if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
 952				skipped++;
 953				continue;
 954			}
 955			if (skipped &&
 956			    !time_after(jiffies, tid_agg_rx->reorder_time[j] +
 957					HT_RX_REORDER_BUF_TIMEOUT))
 958				goto set_release_timer;
 959
 960			/* don't leave incomplete A-MSDUs around */
 961			for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
 962			     i = (i + 1) % tid_agg_rx->buf_size)
 963				__skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
 964
 965			ht_dbg_ratelimited(sdata,
 966					   "release an RX reorder frame due to timeout on earlier frames\n");
 967			ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
 968							frames);
 969
 970			/*
 971			 * Increment the head seq# also for the skipped slots.
 972			 */
 973			tid_agg_rx->head_seq_num =
 974				(tid_agg_rx->head_seq_num +
 975				 skipped) & IEEE80211_SN_MASK;
 976			skipped = 0;
 977		}
 978	} else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
 979		ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
 980						frames);
 981		index =	tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
 982	}
 983
 984	if (tid_agg_rx->stored_mpdu_num) {
 985		j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
 986
 987		for (; j != (index - 1) % tid_agg_rx->buf_size;
 988		     j = (j + 1) % tid_agg_rx->buf_size) {
 989			if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
 990				break;
 991		}
 992
 993 set_release_timer:
 994
 995		if (!tid_agg_rx->removed)
 996			mod_timer(&tid_agg_rx->reorder_timer,
 997				  tid_agg_rx->reorder_time[j] + 1 +
 998				  HT_RX_REORDER_BUF_TIMEOUT);
 999	} else {
1000		del_timer(&tid_agg_rx->reorder_timer);
1001	}
1002}
1003
1004/*
1005 * As this function belongs to the RX path it must be under
1006 * rcu_read_lock protection. It returns false if the frame
1007 * can be processed immediately, true if it was consumed.
1008 */
1009static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
1010					     struct tid_ampdu_rx *tid_agg_rx,
1011					     struct sk_buff *skb,
1012					     struct sk_buff_head *frames)
1013{
1014	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1015	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1016	u16 sc = le16_to_cpu(hdr->seq_ctrl);
1017	u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
1018	u16 head_seq_num, buf_size;
1019	int index;
1020	bool ret = true;
1021
1022	spin_lock(&tid_agg_rx->reorder_lock);
1023
1024	/*
1025	 * Offloaded BA sessions have no known starting sequence number so pick
1026	 * one from first Rxed frame for this tid after BA was started.
1027	 */
1028	if (unlikely(tid_agg_rx->auto_seq)) {
1029		tid_agg_rx->auto_seq = false;
1030		tid_agg_rx->ssn = mpdu_seq_num;
1031		tid_agg_rx->head_seq_num = mpdu_seq_num;
1032	}
1033
1034	buf_size = tid_agg_rx->buf_size;
1035	head_seq_num = tid_agg_rx->head_seq_num;
1036
1037	/*
1038	 * If the current MPDU's SN is smaller than the SSN, it shouldn't
1039	 * be reordered.
1040	 */
1041	if (unlikely(!tid_agg_rx->started)) {
1042		if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1043			ret = false;
1044			goto out;
1045		}
1046		tid_agg_rx->started = true;
1047	}
1048
1049	/* frame with out of date sequence number */
1050	if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1051		dev_kfree_skb(skb);
1052		goto out;
1053	}
1054
1055	/*
1056	 * If frame the sequence number exceeds our buffering window
1057	 * size release some previous frames to make room for this one.
1058	 */
1059	if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
1060		head_seq_num = ieee80211_sn_inc(
1061				ieee80211_sn_sub(mpdu_seq_num, buf_size));
1062		/* release stored frames up to new head to stack */
1063		ieee80211_release_reorder_frames(sdata, tid_agg_rx,
1064						 head_seq_num, frames);
1065	}
1066
1067	/* Now the new frame is always in the range of the reordering buffer */
1068
1069	index = mpdu_seq_num % tid_agg_rx->buf_size;
1070
1071	/* check if we already stored this frame */
1072	if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1073		dev_kfree_skb(skb);
1074		goto out;
1075	}
1076
1077	/*
1078	 * If the current MPDU is in the right order and nothing else
1079	 * is stored we can process it directly, no need to buffer it.
1080	 * If it is first but there's something stored, we may be able
1081	 * to release frames after this one.
1082	 */
1083	if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
1084	    tid_agg_rx->stored_mpdu_num == 0) {
1085		if (!(status->flag & RX_FLAG_AMSDU_MORE))
1086			tid_agg_rx->head_seq_num =
1087				ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1088		ret = false;
1089		goto out;
1090	}
1091
1092	/* put the frame in the reordering buffer */
1093	__skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
1094	if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1095		tid_agg_rx->reorder_time[index] = jiffies;
1096		tid_agg_rx->stored_mpdu_num++;
1097		ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
1098	}
1099
1100 out:
1101	spin_unlock(&tid_agg_rx->reorder_lock);
1102	return ret;
1103}
1104
1105/*
1106 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
1107 * true if the MPDU was buffered, false if it should be processed.
1108 */
1109static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
1110				       struct sk_buff_head *frames)
1111{
1112	struct sk_buff *skb = rx->skb;
1113	struct ieee80211_local *local = rx->local;
1114	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1115	struct sta_info *sta = rx->sta;
1116	struct tid_ampdu_rx *tid_agg_rx;
1117	u16 sc;
1118	u8 tid, ack_policy;
1119
1120	if (!ieee80211_is_data_qos(hdr->frame_control) ||
1121	    is_multicast_ether_addr(hdr->addr1))
1122		goto dont_reorder;
1123
1124	/*
1125	 * filter the QoS data rx stream according to
1126	 * STA/TID and check if this STA/TID is on aggregation
1127	 */
1128
1129	if (!sta)
1130		goto dont_reorder;
1131
1132	ack_policy = *ieee80211_get_qos_ctl(hdr) &
1133		     IEEE80211_QOS_CTL_ACK_POLICY_MASK;
1134	tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
1135
1136	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
1137	if (!tid_agg_rx) {
1138		if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1139		    !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
1140		    !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
1141			ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
1142					     WLAN_BACK_RECIPIENT,
1143					     WLAN_REASON_QSTA_REQUIRE_SETUP);
1144		goto dont_reorder;
1145	}
1146
1147	/* qos null data frames are excluded */
1148	if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
1149		goto dont_reorder;
1150
1151	/* not part of a BA session */
1152	if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1153	    ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
1154		goto dont_reorder;
1155
1156	/* new, potentially un-ordered, ampdu frame - process it */
1157
1158	/* reset session timer */
1159	if (tid_agg_rx->timeout)
1160		tid_agg_rx->last_rx = jiffies;
1161
1162	/* if this mpdu is fragmented - terminate rx aggregation session */
1163	sc = le16_to_cpu(hdr->seq_ctrl);
1164	if (sc & IEEE80211_SCTL_FRAG) {
1165		skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
1166		skb_queue_tail(&rx->sdata->skb_queue, skb);
1167		ieee80211_queue_work(&local->hw, &rx->sdata->work);
1168		return;
1169	}
1170
1171	/*
1172	 * No locking needed -- we will only ever process one
1173	 * RX packet at a time, and thus own tid_agg_rx. All
1174	 * other code manipulating it needs to (and does) make
1175	 * sure that we cannot get to it any more before doing
1176	 * anything with it.
1177	 */
1178	if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
1179					     frames))
1180		return;
1181
1182 dont_reorder:
1183	__skb_queue_tail(frames, skb);
1184}
1185
1186static ieee80211_rx_result debug_noinline
1187ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
1188{
1189	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1190	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1191
1192	if (status->flag & RX_FLAG_DUP_VALIDATED)
1193		return RX_CONTINUE;
1194
1195	/*
1196	 * Drop duplicate 802.11 retransmissions
1197	 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
1198	 */
1199
1200	if (rx->skb->len < 24)
1201		return RX_CONTINUE;
1202
1203	if (ieee80211_is_ctl(hdr->frame_control) ||
1204	    ieee80211_is_qos_nullfunc(hdr->frame_control) ||
1205	    is_multicast_ether_addr(hdr->addr1))
1206		return RX_CONTINUE;
1207
1208	if (!rx->sta)
1209		return RX_CONTINUE;
1210
1211	if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
1212		     rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
1213		I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
1214		rx->sta->rx_stats.num_duplicates++;
1215		return RX_DROP_UNUSABLE;
1216	} else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1217		rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
1218	}
1219
1220	return RX_CONTINUE;
1221}
1222
1223static ieee80211_rx_result debug_noinline
1224ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
1225{
1226	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1227
1228	/* Drop disallowed frame classes based on STA auth/assoc state;
1229	 * IEEE 802.11, Chap 5.5.
1230	 *
1231	 * mac80211 filters only based on association state, i.e. it drops
1232	 * Class 3 frames from not associated stations. hostapd sends
1233	 * deauth/disassoc frames when needed. In addition, hostapd is
1234	 * responsible for filtering on both auth and assoc states.
1235	 */
1236
1237	if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1238		return ieee80211_rx_mesh_check(rx);
1239
1240	if (unlikely((ieee80211_is_data(hdr->frame_control) ||
1241		      ieee80211_is_pspoll(hdr->frame_control)) &&
1242		     rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1243		     rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
1244		     rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
1245		     (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
1246		/*
1247		 * accept port control frames from the AP even when it's not
1248		 * yet marked ASSOC to prevent a race where we don't set the
1249		 * assoc bit quickly enough before it sends the first frame
1250		 */
1251		if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1252		    ieee80211_is_data_present(hdr->frame_control)) {
1253			unsigned int hdrlen;
1254			__be16 ethertype;
1255
1256			hdrlen = ieee80211_hdrlen(hdr->frame_control);
1257
1258			if (rx->skb->len < hdrlen + 8)
1259				return RX_DROP_MONITOR;
1260
1261			skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2);
1262			if (ethertype == rx->sdata->control_port_protocol)
1263				return RX_CONTINUE;
1264		}
1265
1266		if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
1267		    cfg80211_rx_spurious_frame(rx->sdata->dev,
1268					       hdr->addr2,
1269					       GFP_ATOMIC))
1270			return RX_DROP_UNUSABLE;
1271
1272		return RX_DROP_MONITOR;
1273	}
1274
1275	return RX_CONTINUE;
1276}
1277
1278
1279static ieee80211_rx_result debug_noinline
1280ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1281{
1282	struct ieee80211_local *local;
1283	struct ieee80211_hdr *hdr;
1284	struct sk_buff *skb;
1285
1286	local = rx->local;
1287	skb = rx->skb;
1288	hdr = (struct ieee80211_hdr *) skb->data;
1289
1290	if (!local->pspolling)
1291		return RX_CONTINUE;
1292
1293	if (!ieee80211_has_fromds(hdr->frame_control))
1294		/* this is not from AP */
1295		return RX_CONTINUE;
1296
1297	if (!ieee80211_is_data(hdr->frame_control))
1298		return RX_CONTINUE;
1299
1300	if (!ieee80211_has_moredata(hdr->frame_control)) {
1301		/* AP has no more frames buffered for us */
1302		local->pspolling = false;
1303		return RX_CONTINUE;
1304	}
1305
1306	/* more data bit is set, let's request a new frame from the AP */
1307	ieee80211_send_pspoll(local, rx->sdata);
1308
1309	return RX_CONTINUE;
1310}
1311
1312static void sta_ps_start(struct sta_info *sta)
1313{
1314	struct ieee80211_sub_if_data *sdata = sta->sdata;
1315	struct ieee80211_local *local = sdata->local;
1316	struct ps_data *ps;
1317	int tid;
1318
1319	if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1320	    sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1321		ps = &sdata->bss->ps;
1322	else
1323		return;
1324
1325	atomic_inc(&ps->num_sta_ps);
1326	set_sta_flag(sta, WLAN_STA_PS_STA);
1327	if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
1328		drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1329	ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1330	       sta->sta.addr, sta->sta.aid);
1331
1332	ieee80211_clear_fast_xmit(sta);
1333
1334	if (!sta->sta.txq[0])
1335		return;
1336
1337	for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
1338		if (txq_has_queue(sta->sta.txq[tid]))
1339			set_bit(tid, &sta->txq_buffered_tids);
1340		else
1341			clear_bit(tid, &sta->txq_buffered_tids);
1342	}
1343}
1344
1345static void sta_ps_end(struct sta_info *sta)
1346{
1347	ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1348	       sta->sta.addr, sta->sta.aid);
1349
1350	if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1351		/*
1352		 * Clear the flag only if the other one is still set
1353		 * so that the TX path won't start TX'ing new frames
1354		 * directly ... In the case that the driver flag isn't
1355		 * set ieee80211_sta_ps_deliver_wakeup() will clear it.
1356		 */
1357		clear_sta_flag(sta, WLAN_STA_PS_STA);
1358		ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1359		       sta->sta.addr, sta->sta.aid);
1360		return;
1361	}
1362
1363	set_sta_flag(sta, WLAN_STA_PS_DELIVER);
1364	clear_sta_flag(sta, WLAN_STA_PS_STA);
1365	ieee80211_sta_ps_deliver_wakeup(sta);
1366}
1367
1368int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
1369{
1370	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1371	bool in_ps;
1372
1373	WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
1374
1375	/* Don't let the same PS state be set twice */
1376	in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
1377	if ((start && in_ps) || (!start && !in_ps))
1378		return -EINVAL;
1379
1380	if (start)
1381		sta_ps_start(sta);
1382	else
1383		sta_ps_end(sta);
1384
1385	return 0;
1386}
1387EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1388
1389void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
1390{
1391	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1392
1393	if (test_sta_flag(sta, WLAN_STA_SP))
1394		return;
1395
1396	if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1397		ieee80211_sta_ps_deliver_poll_response(sta);
1398	else
1399		set_sta_flag(sta, WLAN_STA_PSPOLL);
1400}
1401EXPORT_SYMBOL(ieee80211_sta_pspoll);
1402
1403void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
1404{
1405	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1406	u8 ac = ieee802_1d_to_ac[tid & 7];
1407
1408	/*
1409	 * If this AC is not trigger-enabled do nothing unless the
1410	 * driver is calling us after it already checked.
1411	 *
1412	 * NB: This could/should check a separate bitmap of trigger-
1413	 * enabled queues, but for now we only implement uAPSD w/o
1414	 * TSPEC changes to the ACs, so they're always the same.
1415	 */
1416	if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) &&
1417	    tid != IEEE80211_NUM_TIDS)
1418		return;
1419
1420	/* if we are in a service period, do nothing */
1421	if (test_sta_flag(sta, WLAN_STA_SP))
1422		return;
1423
1424	if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1425		ieee80211_sta_ps_deliver_uapsd(sta);
1426	else
1427		set_sta_flag(sta, WLAN_STA_UAPSD);
1428}
1429EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger);
1430
1431static ieee80211_rx_result debug_noinline
1432ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1433{
1434	struct ieee80211_sub_if_data *sdata = rx->sdata;
1435	struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1436	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1437
1438	if (!rx->sta)
1439		return RX_CONTINUE;
1440
1441	if (sdata->vif.type != NL80211_IFTYPE_AP &&
1442	    sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1443		return RX_CONTINUE;
1444
1445	/*
1446	 * The device handles station powersave, so don't do anything about
1447	 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1448	 * it to mac80211 since they're handled.)
1449	 */
1450	if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
1451		return RX_CONTINUE;
1452
1453	/*
1454	 * Don't do anything if the station isn't already asleep. In
1455	 * the uAPSD case, the station will probably be marked asleep,
1456	 * in the PS-Poll case the station must be confused ...
1457	 */
1458	if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1459		return RX_CONTINUE;
1460
1461	if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1462		ieee80211_sta_pspoll(&rx->sta->sta);
1463
1464		/* Free PS Poll skb here instead of returning RX_DROP that would
1465		 * count as an dropped frame. */
1466		dev_kfree_skb(rx->skb);
1467
1468		return RX_QUEUED;
1469	} else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1470		   !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1471		   ieee80211_has_pm(hdr->frame_control) &&
1472		   (ieee80211_is_data_qos(hdr->frame_control) ||
1473		    ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1474		u8 tid;
1475
1476		tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
1477
1478		ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
1479	}
1480
1481	return RX_CONTINUE;
1482}
1483
1484static ieee80211_rx_result debug_noinline
1485ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1486{
1487	struct sta_info *sta = rx->sta;
1488	struct sk_buff *skb = rx->skb;
1489	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1490	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1491	int i;
1492
1493	if (!sta)
1494		return RX_CONTINUE;
1495
1496	/*
1497	 * Update last_rx only for IBSS packets which are for the current
1498	 * BSSID and for station already AUTHORIZED to avoid keeping the
1499	 * current IBSS network alive in cases where other STAs start
1500	 * using different BSSID. This will also give the station another
1501	 * chance to restart the authentication/authorization in case
1502	 * something went wrong the first time.
1503	 */
1504	if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1505		u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1506						NL80211_IFTYPE_ADHOC);
1507		if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1508		    test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1509			sta->rx_stats.last_rx = jiffies;
1510			if (ieee80211_is_data(hdr->frame_control) &&
1511			    !is_multicast_ether_addr(hdr->addr1))
1512				sta->rx_stats.last_rate =
1513					sta_stats_encode_rate(status);
1514		}
1515	} else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
1516		sta->rx_stats.last_rx = jiffies;
1517	} else if (!is_multicast_ether_addr(hdr->addr1)) {
1518		/*
1519		 * Mesh beacons will update last_rx when if they are found to
1520		 * match the current local configuration when processed.
1521		 */
1522		sta->rx_stats.last_rx = jiffies;
1523		if (ieee80211_is_data(hdr->frame_control))
1524			sta->rx_stats.last_rate = sta_stats_encode_rate(status);
1525	}
1526
1527	if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1528		ieee80211_sta_rx_notify(rx->sdata, hdr);
1529
1530	sta->rx_stats.fragments++;
1531
1532	u64_stats_update_begin(&rx->sta->rx_stats.syncp);
1533	sta->rx_stats.bytes += rx->skb->len;
1534	u64_stats_update_end(&rx->sta->rx_stats.syncp);
1535
1536	if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1537		sta->rx_stats.last_signal = status->signal;
1538		ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
1539	}
1540
1541	if (status->chains) {
1542		sta->rx_stats.chains = status->chains;
1543		for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
1544			int signal = status->chain_signal[i];
1545
1546			if (!(status->chains & BIT(i)))
1547				continue;
1548
1549			sta->rx_stats.chain_signal_last[i] = signal;
1550			ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
1551					-signal);
1552		}
1553	}
1554
1555	/*
1556	 * Change STA power saving mode only at the end of a frame
1557	 * exchange sequence.
 
1558	 */
1559	if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1560	    !ieee80211_has_morefrags(hdr->frame_control) &&
 
 
1561	    !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1562	    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1563	     rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1564	    /* PM bit is only checked in frames where it isn't reserved,
1565	     * in AP mode it's reserved in non-bufferable management frames
1566	     * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
1567	     */
1568	    (!ieee80211_is_mgmt(hdr->frame_control) ||
1569	     ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
1570		if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1571			if (!ieee80211_has_pm(hdr->frame_control))
1572				sta_ps_end(sta);
1573		} else {
1574			if (ieee80211_has_pm(hdr->frame_control))
1575				sta_ps_start(sta);
1576		}
1577	}
1578
1579	/* mesh power save support */
1580	if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1581		ieee80211_mps_rx_h_sta_process(sta, hdr);
1582
1583	/*
1584	 * Drop (qos-)data::nullfunc frames silently, since they
1585	 * are used only to control station power saving mode.
1586	 */
1587	if (ieee80211_is_nullfunc(hdr->frame_control) ||
1588	    ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1589		I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1590
1591		/*
1592		 * If we receive a 4-addr nullfunc frame from a STA
1593		 * that was not moved to a 4-addr STA vlan yet send
1594		 * the event to userspace and for older hostapd drop
1595		 * the frame to the monitor interface.
1596		 */
1597		if (ieee80211_has_a4(hdr->frame_control) &&
1598		    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1599		     (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1600		      !rx->sdata->u.vlan.sta))) {
1601			if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1602				cfg80211_rx_unexpected_4addr_frame(
1603					rx->sdata->dev, sta->sta.addr,
1604					GFP_ATOMIC);
1605			return RX_DROP_MONITOR;
1606		}
1607		/*
1608		 * Update counter and free packet here to avoid
1609		 * counting this as a dropped packed.
1610		 */
1611		sta->rx_stats.packets++;
1612		dev_kfree_skb(rx->skb);
1613		return RX_QUEUED;
1614	}
1615
1616	return RX_CONTINUE;
1617} /* ieee80211_rx_h_sta_process */
1618
1619static ieee80211_rx_result debug_noinline
1620ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1621{
1622	struct sk_buff *skb = rx->skb;
1623	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1624	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1625	int keyidx;
1626	int hdrlen;
1627	ieee80211_rx_result result = RX_DROP_UNUSABLE;
1628	struct ieee80211_key *sta_ptk = NULL;
1629	int mmie_keyidx = -1;
1630	__le16 fc;
1631	const struct ieee80211_cipher_scheme *cs = NULL;
1632
1633	/*
1634	 * Key selection 101
1635	 *
1636	 * There are four types of keys:
1637	 *  - GTK (group keys)
1638	 *  - IGTK (group keys for management frames)
1639	 *  - PTK (pairwise keys)
1640	 *  - STK (station-to-station pairwise keys)
1641	 *
1642	 * When selecting a key, we have to distinguish between multicast
1643	 * (including broadcast) and unicast frames, the latter can only
1644	 * use PTKs and STKs while the former always use GTKs and IGTKs.
1645	 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
1646	 * unicast frames can also use key indices like GTKs. Hence, if we
1647	 * don't have a PTK/STK we check the key index for a WEP key.
1648	 *
1649	 * Note that in a regular BSS, multicast frames are sent by the
1650	 * AP only, associated stations unicast the frame to the AP first
1651	 * which then multicasts it on their behalf.
1652	 *
1653	 * There is also a slight problem in IBSS mode: GTKs are negotiated
1654	 * with each station, that is something we don't currently handle.
1655	 * The spec seems to expect that one negotiates the same key with
1656	 * every station but there's no such requirement; VLANs could be
1657	 * possible.
1658	 */
1659
1660	/* start without a key */
1661	rx->key = NULL;
1662	fc = hdr->frame_control;
1663
1664	if (rx->sta) {
1665		int keyid = rx->sta->ptk_idx;
1666
1667		if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) {
1668			cs = rx->sta->cipher_scheme;
1669			keyid = ieee80211_get_cs_keyid(cs, rx->skb);
1670			if (unlikely(keyid < 0))
1671				return RX_DROP_UNUSABLE;
1672		}
1673		sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
1674	}
1675
1676	if (!ieee80211_has_protected(fc))
1677		mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
1678
1679	if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
1680		rx->key = sta_ptk;
1681		if ((status->flag & RX_FLAG_DECRYPTED) &&
1682		    (status->flag & RX_FLAG_IV_STRIPPED))
1683			return RX_CONTINUE;
1684		/* Skip decryption if the frame is not protected. */
1685		if (!ieee80211_has_protected(fc))
1686			return RX_CONTINUE;
1687	} else if (mmie_keyidx >= 0) {
1688		/* Broadcast/multicast robust management frame / BIP */
1689		if ((status->flag & RX_FLAG_DECRYPTED) &&
1690		    (status->flag & RX_FLAG_IV_STRIPPED))
1691			return RX_CONTINUE;
1692
1693		if (mmie_keyidx < NUM_DEFAULT_KEYS ||
1694		    mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1695			return RX_DROP_MONITOR; /* unexpected BIP keyidx */
1696		if (rx->sta) {
1697			if (ieee80211_is_group_privacy_action(skb) &&
1698			    test_sta_flag(rx->sta, WLAN_STA_MFP))
1699				return RX_DROP_MONITOR;
1700
1701			rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
1702		}
1703		if (!rx->key)
1704			rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
1705	} else if (!ieee80211_has_protected(fc)) {
1706		/*
1707		 * The frame was not protected, so skip decryption. However, we
1708		 * need to set rx->key if there is a key that could have been
1709		 * used so that the frame may be dropped if encryption would
1710		 * have been expected.
1711		 */
1712		struct ieee80211_key *key = NULL;
1713		struct ieee80211_sub_if_data *sdata = rx->sdata;
1714		int i;
1715
1716		if (ieee80211_is_mgmt(fc) &&
1717		    is_multicast_ether_addr(hdr->addr1) &&
1718		    (key = rcu_dereference(rx->sdata->default_mgmt_key)))
1719			rx->key = key;
1720		else {
1721			if (rx->sta) {
1722				for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1723					key = rcu_dereference(rx->sta->gtk[i]);
1724					if (key)
1725						break;
1726				}
1727			}
1728			if (!key) {
1729				for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1730					key = rcu_dereference(sdata->keys[i]);
1731					if (key)
1732						break;
1733				}
1734			}
1735			if (key)
1736				rx->key = key;
1737		}
1738		return RX_CONTINUE;
1739	} else {
1740		u8 keyid;
1741
1742		/*
1743		 * The device doesn't give us the IV so we won't be
1744		 * able to look up the key. That's ok though, we
1745		 * don't need to decrypt the frame, we just won't
1746		 * be able to keep statistics accurate.
1747		 * Except for key threshold notifications, should
1748		 * we somehow allow the driver to tell us which key
1749		 * the hardware used if this flag is set?
1750		 */
1751		if ((status->flag & RX_FLAG_DECRYPTED) &&
1752		    (status->flag & RX_FLAG_IV_STRIPPED))
1753			return RX_CONTINUE;
1754
1755		hdrlen = ieee80211_hdrlen(fc);
1756
1757		if (cs) {
1758			keyidx = ieee80211_get_cs_keyid(cs, rx->skb);
1759
1760			if (unlikely(keyidx < 0))
1761				return RX_DROP_UNUSABLE;
1762		} else {
1763			if (rx->skb->len < 8 + hdrlen)
1764				return RX_DROP_UNUSABLE; /* TODO: count this? */
1765			/*
1766			 * no need to call ieee80211_wep_get_keyidx,
1767			 * it verifies a bunch of things we've done already
1768			 */
1769			skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
1770			keyidx = keyid >> 6;
1771		}
1772
1773		/* check per-station GTK first, if multicast packet */
1774		if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
1775			rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
1776
1777		/* if not found, try default key */
1778		if (!rx->key) {
1779			rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
1780
1781			/*
1782			 * RSNA-protected unicast frames should always be
1783			 * sent with pairwise or station-to-station keys,
1784			 * but for WEP we allow using a key index as well.
1785			 */
1786			if (rx->key &&
1787			    rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1788			    rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1789			    !is_multicast_ether_addr(hdr->addr1))
1790				rx->key = NULL;
1791		}
1792	}
1793
1794	if (rx->key) {
1795		if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
1796			return RX_DROP_MONITOR;
1797
1798		/* TODO: add threshold stuff again */
1799	} else {
1800		return RX_DROP_MONITOR;
1801	}
1802
1803	switch (rx->key->conf.cipher) {
1804	case WLAN_CIPHER_SUITE_WEP40:
1805	case WLAN_CIPHER_SUITE_WEP104:
1806		result = ieee80211_crypto_wep_decrypt(rx);
1807		break;
1808	case WLAN_CIPHER_SUITE_TKIP:
1809		result = ieee80211_crypto_tkip_decrypt(rx);
1810		break;
1811	case WLAN_CIPHER_SUITE_CCMP:
1812		result = ieee80211_crypto_ccmp_decrypt(
1813			rx, IEEE80211_CCMP_MIC_LEN);
1814		break;
1815	case WLAN_CIPHER_SUITE_CCMP_256:
1816		result = ieee80211_crypto_ccmp_decrypt(
1817			rx, IEEE80211_CCMP_256_MIC_LEN);
1818		break;
1819	case WLAN_CIPHER_SUITE_AES_CMAC:
1820		result = ieee80211_crypto_aes_cmac_decrypt(rx);
1821		break;
1822	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
1823		result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
1824		break;
1825	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
1826	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
1827		result = ieee80211_crypto_aes_gmac_decrypt(rx);
1828		break;
1829	case WLAN_CIPHER_SUITE_GCMP:
1830	case WLAN_CIPHER_SUITE_GCMP_256:
1831		result = ieee80211_crypto_gcmp_decrypt(rx);
1832		break;
1833	default:
1834		result = ieee80211_crypto_hw_decrypt(rx);
1835	}
1836
1837	/* the hdr variable is invalid after the decrypt handlers */
1838
1839	/* either the frame has been decrypted or will be dropped */
1840	status->flag |= RX_FLAG_DECRYPTED;
1841
1842	return result;
1843}
1844
1845static inline struct ieee80211_fragment_entry *
1846ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1847			 unsigned int frag, unsigned int seq, int rx_queue,
1848			 struct sk_buff **skb)
1849{
1850	struct ieee80211_fragment_entry *entry;
1851
1852	entry = &sdata->fragments[sdata->fragment_next++];
1853	if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1854		sdata->fragment_next = 0;
1855
1856	if (!skb_queue_empty(&entry->skb_list))
1857		__skb_queue_purge(&entry->skb_list);
1858
1859	__skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1860	*skb = NULL;
1861	entry->first_frag_time = jiffies;
1862	entry->seq = seq;
1863	entry->rx_queue = rx_queue;
1864	entry->last_frag = frag;
1865	entry->check_sequential_pn = false;
1866	entry->extra_len = 0;
1867
1868	return entry;
1869}
1870
1871static inline struct ieee80211_fragment_entry *
1872ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1873			  unsigned int frag, unsigned int seq,
1874			  int rx_queue, struct ieee80211_hdr *hdr)
1875{
1876	struct ieee80211_fragment_entry *entry;
1877	int i, idx;
1878
1879	idx = sdata->fragment_next;
1880	for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1881		struct ieee80211_hdr *f_hdr;
1882
1883		idx--;
1884		if (idx < 0)
1885			idx = IEEE80211_FRAGMENT_MAX - 1;
1886
1887		entry = &sdata->fragments[idx];
1888		if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1889		    entry->rx_queue != rx_queue ||
1890		    entry->last_frag + 1 != frag)
1891			continue;
1892
1893		f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1894
1895		/*
1896		 * Check ftype and addresses are equal, else check next fragment
1897		 */
1898		if (((hdr->frame_control ^ f_hdr->frame_control) &
1899		     cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1900		    !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
1901		    !ether_addr_equal(hdr->addr2, f_hdr->addr2))
1902			continue;
1903
1904		if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1905			__skb_queue_purge(&entry->skb_list);
1906			continue;
1907		}
1908		return entry;
1909	}
1910
1911	return NULL;
1912}
1913
1914static ieee80211_rx_result debug_noinline
1915ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1916{
1917	struct ieee80211_hdr *hdr;
1918	u16 sc;
1919	__le16 fc;
1920	unsigned int frag, seq;
1921	struct ieee80211_fragment_entry *entry;
1922	struct sk_buff *skb;
1923	struct ieee80211_rx_status *status;
1924
1925	hdr = (struct ieee80211_hdr *)rx->skb->data;
1926	fc = hdr->frame_control;
1927
1928	if (ieee80211_is_ctl(fc))
1929		return RX_CONTINUE;
1930
1931	sc = le16_to_cpu(hdr->seq_ctrl);
1932	frag = sc & IEEE80211_SCTL_FRAG;
1933
1934	if (is_multicast_ether_addr(hdr->addr1)) {
1935		I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
1936		goto out_no_led;
1937	}
1938
1939	if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
1940		goto out;
1941
1942	I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1943
1944	if (skb_linearize(rx->skb))
1945		return RX_DROP_UNUSABLE;
1946
1947	/*
1948	 *  skb_linearize() might change the skb->data and
1949	 *  previously cached variables (in this case, hdr) need to
1950	 *  be refreshed with the new data.
1951	 */
1952	hdr = (struct ieee80211_hdr *)rx->skb->data;
1953	seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1954
1955	if (frag == 0) {
1956		/* This is the first fragment of a new frame. */
1957		entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1958						 rx->seqno_idx, &(rx->skb));
1959		if (rx->key &&
1960		    (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
1961		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
1962		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
1963		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
1964		    ieee80211_has_protected(fc)) {
1965			int queue = rx->security_idx;
1966
1967			/* Store CCMP/GCMP PN so that we can verify that the
1968			 * next fragment has a sequential PN value.
1969			 */
1970			entry->check_sequential_pn = true;
1971			memcpy(entry->last_pn,
1972			       rx->key->u.ccmp.rx_pn[queue],
1973			       IEEE80211_CCMP_PN_LEN);
1974			BUILD_BUG_ON(offsetof(struct ieee80211_key,
1975					      u.ccmp.rx_pn) !=
1976				     offsetof(struct ieee80211_key,
1977					      u.gcmp.rx_pn));
1978			BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
1979				     sizeof(rx->key->u.gcmp.rx_pn[queue]));
1980			BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
1981				     IEEE80211_GCMP_PN_LEN);
1982		}
1983		return RX_QUEUED;
1984	}
1985
1986	/* This is a fragment for a frame that should already be pending in
1987	 * fragment cache. Add this fragment to the end of the pending entry.
1988	 */
1989	entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
1990					  rx->seqno_idx, hdr);
1991	if (!entry) {
1992		I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1993		return RX_DROP_MONITOR;
1994	}
1995
1996	/* "The receiver shall discard MSDUs and MMPDUs whose constituent
1997	 *  MPDU PN values are not incrementing in steps of 1."
1998	 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
1999	 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
2000	 */
2001	if (entry->check_sequential_pn) {
2002		int i;
2003		u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
2004		int queue;
2005
2006		if (!rx->key ||
2007		    (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
2008		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
2009		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
2010		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
2011			return RX_DROP_UNUSABLE;
2012		memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
2013		for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
2014			pn[i]++;
2015			if (pn[i])
2016				break;
2017		}
2018		queue = rx->security_idx;
2019		rpn = rx->key->u.ccmp.rx_pn[queue];
2020		if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
2021			return RX_DROP_UNUSABLE;
2022		memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
2023	}
2024
2025	skb_pull(rx->skb, ieee80211_hdrlen(fc));
2026	__skb_queue_tail(&entry->skb_list, rx->skb);
2027	entry->last_frag = frag;
2028	entry->extra_len += rx->skb->len;
2029	if (ieee80211_has_morefrags(fc)) {
2030		rx->skb = NULL;
2031		return RX_QUEUED;
2032	}
2033
2034	rx->skb = __skb_dequeue(&entry->skb_list);
2035	if (skb_tailroom(rx->skb) < entry->extra_len) {
2036		I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
2037		if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
2038					      GFP_ATOMIC))) {
2039			I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2040			__skb_queue_purge(&entry->skb_list);
2041			return RX_DROP_UNUSABLE;
2042		}
2043	}
2044	while ((skb = __skb_dequeue(&entry->skb_list))) {
2045		memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
2046		dev_kfree_skb(skb);
2047	}
2048
2049	/* Complete frame has been reassembled - process it now */
2050	status = IEEE80211_SKB_RXCB(rx->skb);
2051
2052 out:
2053	ieee80211_led_rx(rx->local);
2054 out_no_led:
2055	if (rx->sta)
2056		rx->sta->rx_stats.packets++;
2057	return RX_CONTINUE;
2058}
2059
2060static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
2061{
2062	if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
2063		return -EACCES;
2064
2065	return 0;
2066}
2067
2068static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
2069{
2070	struct sk_buff *skb = rx->skb;
2071	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2072
2073	/*
2074	 * Pass through unencrypted frames if the hardware has
2075	 * decrypted them already.
2076	 */
2077	if (status->flag & RX_FLAG_DECRYPTED)
2078		return 0;
2079
2080	/* Drop unencrypted frames if key is set. */
2081	if (unlikely(!ieee80211_has_protected(fc) &&
2082		     !ieee80211_is_nullfunc(fc) &&
2083		     ieee80211_is_data(fc) && rx->key))
2084		return -EACCES;
2085
2086	return 0;
2087}
2088
2089static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
2090{
2091	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2092	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2093	__le16 fc = hdr->frame_control;
2094
2095	/*
2096	 * Pass through unencrypted frames if the hardware has
2097	 * decrypted them already.
2098	 */
2099	if (status->flag & RX_FLAG_DECRYPTED)
2100		return 0;
2101
2102	if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
2103		if (unlikely(!ieee80211_has_protected(fc) &&
2104			     ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
2105			     rx->key)) {
2106			if (ieee80211_is_deauth(fc) ||
2107			    ieee80211_is_disassoc(fc))
2108				cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2109							     rx->skb->data,
2110							     rx->skb->len);
2111			return -EACCES;
2112		}
2113		/* BIP does not use Protected field, so need to check MMIE */
2114		if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
2115			     ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2116			if (ieee80211_is_deauth(fc) ||
2117			    ieee80211_is_disassoc(fc))
2118				cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2119							     rx->skb->data,
2120							     rx->skb->len);
2121			return -EACCES;
2122		}
2123		/*
2124		 * When using MFP, Action frames are not allowed prior to
2125		 * having configured keys.
2126		 */
2127		if (unlikely(ieee80211_is_action(fc) && !rx->key &&
2128			     ieee80211_is_robust_mgmt_frame(rx->skb)))
2129			return -EACCES;
2130	}
2131
2132	return 0;
2133}
2134
2135static int
2136__ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
2137{
2138	struct ieee80211_sub_if_data *sdata = rx->sdata;
2139	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2140	bool check_port_control = false;
2141	struct ethhdr *ehdr;
2142	int ret;
2143
2144	*port_control = false;
2145	if (ieee80211_has_a4(hdr->frame_control) &&
2146	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
2147		return -1;
2148
2149	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
2150	    !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
2151
2152		if (!sdata->u.mgd.use_4addr)
2153			return -1;
2154		else
2155			check_port_control = true;
2156	}
2157
2158	if (is_multicast_ether_addr(hdr->addr1) &&
2159	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
2160		return -1;
2161
2162	ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
2163	if (ret < 0)
2164		return ret;
2165
2166	ehdr = (struct ethhdr *) rx->skb->data;
2167	if (ehdr->h_proto == rx->sdata->control_port_protocol)
2168		*port_control = true;
2169	else if (check_port_control)
2170		return -1;
2171
2172	return 0;
2173}
2174
2175/*
2176 * requires that rx->skb is a frame with ethernet header
2177 */
2178static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
2179{
2180	static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
2181		= { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
2182	struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2183
2184	/*
2185	 * Allow EAPOL frames to us/the PAE group address regardless
2186	 * of whether the frame was encrypted or not.
2187	 */
2188	if (ehdr->h_proto == rx->sdata->control_port_protocol &&
2189	    (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
2190	     ether_addr_equal(ehdr->h_dest, pae_group_addr)))
2191		return true;
2192
2193	if (ieee80211_802_1x_port_control(rx) ||
2194	    ieee80211_drop_unencrypted(rx, fc))
2195		return false;
2196
2197	return true;
2198}
2199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2200/*
2201 * requires that rx->skb is a frame with ethernet header
2202 */
2203static void
2204ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2205{
2206	struct ieee80211_sub_if_data *sdata = rx->sdata;
2207	struct net_device *dev = sdata->dev;
2208	struct sk_buff *skb, *xmit_skb;
2209	struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2210	struct sta_info *dsta;
2211
2212	skb = rx->skb;
2213	xmit_skb = NULL;
2214
2215	ieee80211_rx_stats(dev, skb->len);
2216
2217	if (rx->sta) {
2218		/* The seqno index has the same property as needed
2219		 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
2220		 * for non-QoS-data frames. Here we know it's a data
2221		 * frame, so count MSDUs.
2222		 */
2223		u64_stats_update_begin(&rx->sta->rx_stats.syncp);
2224		rx->sta->rx_stats.msdu[rx->seqno_idx]++;
2225		u64_stats_update_end(&rx->sta->rx_stats.syncp);
2226	}
2227
2228	if ((sdata->vif.type == NL80211_IFTYPE_AP ||
2229	     sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
2230	    !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
2231	    (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
2232		if (is_multicast_ether_addr(ehdr->h_dest) &&
2233		    ieee80211_vif_get_num_mcast_if(sdata) != 0) {
2234			/*
2235			 * send multicast frames both to higher layers in
2236			 * local net stack and back to the wireless medium
2237			 */
2238			xmit_skb = skb_copy(skb, GFP_ATOMIC);
2239			if (!xmit_skb)
2240				net_info_ratelimited("%s: failed to clone multicast frame\n",
2241						    dev->name);
2242		} else if (!is_multicast_ether_addr(ehdr->h_dest)) {
2243			dsta = sta_info_get(sdata, skb->data);
2244			if (dsta) {
2245				/*
2246				 * The destination station is associated to
2247				 * this AP (in this VLAN), so send the frame
2248				 * directly to it and do not pass it to local
2249				 * net stack.
2250				 */
2251				xmit_skb = skb;
2252				skb = NULL;
2253			}
2254		}
2255	}
2256
2257#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2258	if (skb) {
2259		/* 'align' will only take the values 0 or 2 here since all
2260		 * frames are required to be aligned to 2-byte boundaries
2261		 * when being passed to mac80211; the code here works just
2262		 * as well if that isn't true, but mac80211 assumes it can
2263		 * access fields as 2-byte aligned (e.g. for ether_addr_equal)
2264		 */
2265		int align;
2266
2267		align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
2268		if (align) {
2269			if (WARN_ON(skb_headroom(skb) < 3)) {
2270				dev_kfree_skb(skb);
2271				skb = NULL;
2272			} else {
2273				u8 *data = skb->data;
2274				size_t len = skb_headlen(skb);
2275				skb->data -= align;
2276				memmove(skb->data, data, len);
2277				skb_set_tail_pointer(skb, len);
2278			}
2279		}
2280	}
2281#endif
2282
2283	if (skb) {
2284		/* deliver to local stack */
2285		skb->protocol = eth_type_trans(skb, dev);
2286		memset(skb->cb, 0, sizeof(skb->cb));
2287		if (rx->napi)
2288			napi_gro_receive(rx->napi, skb);
2289		else
2290			netif_receive_skb(skb);
2291	}
2292
2293	if (xmit_skb) {
2294		/*
2295		 * Send to wireless media and increase priority by 256 to
2296		 * keep the received priority instead of reclassifying
2297		 * the frame (see cfg80211_classify8021d).
2298		 */
2299		xmit_skb->priority += 256;
2300		xmit_skb->protocol = htons(ETH_P_802_3);
2301		skb_reset_network_header(xmit_skb);
2302		skb_reset_mac_header(xmit_skb);
2303		dev_queue_xmit(xmit_skb);
2304	}
2305}
2306
2307static ieee80211_rx_result debug_noinline
2308ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
2309{
2310	struct net_device *dev = rx->sdata->dev;
2311	struct sk_buff *skb = rx->skb;
2312	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2313	__le16 fc = hdr->frame_control;
2314	struct sk_buff_head frame_list;
2315	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2316	struct ethhdr ethhdr;
2317	const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
2318
2319	if (unlikely(!ieee80211_is_data(fc)))
2320		return RX_CONTINUE;
2321
2322	if (unlikely(!ieee80211_is_data_present(fc)))
2323		return RX_DROP_MONITOR;
2324
2325	if (!(status->rx_flags & IEEE80211_RX_AMSDU))
2326		return RX_CONTINUE;
2327
2328	if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2329		switch (rx->sdata->vif.type) {
2330		case NL80211_IFTYPE_AP_VLAN:
2331			if (!rx->sdata->u.vlan.sta)
2332				return RX_DROP_UNUSABLE;
2333			break;
2334		case NL80211_IFTYPE_STATION:
2335			if (!rx->sdata->u.mgd.use_4addr)
2336				return RX_DROP_UNUSABLE;
2337			break;
2338		default:
2339			return RX_DROP_UNUSABLE;
2340		}
2341		check_da = NULL;
2342		check_sa = NULL;
2343	} else switch (rx->sdata->vif.type) {
2344		case NL80211_IFTYPE_AP:
2345		case NL80211_IFTYPE_AP_VLAN:
2346			check_da = NULL;
2347			break;
2348		case NL80211_IFTYPE_STATION:
2349			if (!rx->sta ||
2350			    !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
2351				check_sa = NULL;
2352			break;
2353		case NL80211_IFTYPE_MESH_POINT:
2354			check_sa = NULL;
2355			break;
2356		default:
2357			break;
2358	}
2359
2360	if (is_multicast_ether_addr(hdr->addr1))
2361		return RX_DROP_UNUSABLE;
2362
2363	skb->dev = dev;
2364	__skb_queue_head_init(&frame_list);
2365
2366	if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
2367					  rx->sdata->vif.addr,
2368					  rx->sdata->vif.type))
 
2369		return RX_DROP_UNUSABLE;
2370
2371	ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
2372				 rx->sdata->vif.type,
2373				 rx->local->hw.extra_tx_headroom,
2374				 check_da, check_sa);
2375
2376	while (!skb_queue_empty(&frame_list)) {
2377		rx->skb = __skb_dequeue(&frame_list);
2378
2379		if (!ieee80211_frame_allowed(rx, fc)) {
2380			dev_kfree_skb(rx->skb);
2381			continue;
2382		}
2383
2384		ieee80211_deliver_skb(rx);
2385	}
2386
2387	return RX_QUEUED;
2388}
2389
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2390#ifdef CONFIG_MAC80211_MESH
2391static ieee80211_rx_result
2392ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2393{
2394	struct ieee80211_hdr *fwd_hdr, *hdr;
2395	struct ieee80211_tx_info *info;
2396	struct ieee80211s_hdr *mesh_hdr;
2397	struct sk_buff *skb = rx->skb, *fwd_skb;
2398	struct ieee80211_local *local = rx->local;
2399	struct ieee80211_sub_if_data *sdata = rx->sdata;
2400	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2401	u16 ac, q, hdrlen;
2402
2403	hdr = (struct ieee80211_hdr *) skb->data;
2404	hdrlen = ieee80211_hdrlen(hdr->frame_control);
2405
2406	/* make sure fixed part of mesh header is there, also checks skb len */
2407	if (!pskb_may_pull(rx->skb, hdrlen + 6))
2408		return RX_DROP_MONITOR;
2409
2410	mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2411
2412	/* make sure full mesh header is there, also checks skb len */
2413	if (!pskb_may_pull(rx->skb,
2414			   hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
2415		return RX_DROP_MONITOR;
2416
2417	/* reload pointers */
2418	hdr = (struct ieee80211_hdr *) skb->data;
2419	mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2420
2421	if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
2422		return RX_DROP_MONITOR;
2423
2424	/* frame is in RMC, don't forward */
2425	if (ieee80211_is_data(hdr->frame_control) &&
2426	    is_multicast_ether_addr(hdr->addr1) &&
2427	    mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
2428		return RX_DROP_MONITOR;
2429
2430	if (!ieee80211_is_data(hdr->frame_control))
2431		return RX_CONTINUE;
2432
2433	if (!mesh_hdr->ttl)
2434		return RX_DROP_MONITOR;
2435
2436	if (mesh_hdr->flags & MESH_FLAGS_AE) {
2437		struct mesh_path *mppath;
2438		char *proxied_addr;
2439		char *mpp_addr;
2440
2441		if (is_multicast_ether_addr(hdr->addr1)) {
2442			mpp_addr = hdr->addr3;
2443			proxied_addr = mesh_hdr->eaddr1;
2444		} else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) {
 
2445			/* has_a4 already checked in ieee80211_rx_mesh_check */
2446			mpp_addr = hdr->addr4;
2447			proxied_addr = mesh_hdr->eaddr2;
2448		} else {
2449			return RX_DROP_MONITOR;
2450		}
2451
2452		rcu_read_lock();
2453		mppath = mpp_path_lookup(sdata, proxied_addr);
2454		if (!mppath) {
2455			mpp_path_add(sdata, proxied_addr, mpp_addr);
2456		} else {
2457			spin_lock_bh(&mppath->state_lock);
2458			if (!ether_addr_equal(mppath->mpp, mpp_addr))
2459				memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
2460			mppath->exp_time = jiffies;
2461			spin_unlock_bh(&mppath->state_lock);
2462		}
2463		rcu_read_unlock();
2464	}
2465
2466	/* Frame has reached destination.  Don't forward */
2467	if (!is_multicast_ether_addr(hdr->addr1) &&
2468	    ether_addr_equal(sdata->vif.addr, hdr->addr3))
2469		return RX_CONTINUE;
2470
2471	ac = ieee80211_select_queue_80211(sdata, skb, hdr);
2472	q = sdata->vif.hw_queue[ac];
2473	if (ieee80211_queue_stopped(&local->hw, q)) {
2474		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
2475		return RX_DROP_MONITOR;
2476	}
2477	skb_set_queue_mapping(skb, q);
2478
2479	if (!--mesh_hdr->ttl) {
2480		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
2481		goto out;
2482	}
2483
2484	if (!ifmsh->mshcfg.dot11MeshForwarding)
2485		goto out;
2486
2487	fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2488				       sdata->encrypt_headroom, 0, GFP_ATOMIC);
2489	if (!fwd_skb) {
2490		net_info_ratelimited("%s: failed to clone mesh frame\n",
2491				    sdata->name);
2492		goto out;
2493	}
2494
2495	fwd_hdr =  (struct ieee80211_hdr *) fwd_skb->data;
2496	fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
2497	info = IEEE80211_SKB_CB(fwd_skb);
2498	memset(info, 0, sizeof(*info));
2499	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
2500	info->control.vif = &rx->sdata->vif;
2501	info->control.jiffies = jiffies;
2502	if (is_multicast_ether_addr(fwd_hdr->addr1)) {
2503		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
2504		memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
2505		/* update power mode indication when forwarding */
2506		ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
2507	} else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
2508		/* mesh power mode flags updated in mesh_nexthop_lookup */
2509		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
2510	} else {
2511		/* unable to resolve next hop */
2512		mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
2513				   fwd_hdr->addr3, 0,
2514				   WLAN_REASON_MESH_PATH_NOFORWARD,
2515				   fwd_hdr->addr2);
2516		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
2517		kfree_skb(fwd_skb);
2518		return RX_DROP_MONITOR;
2519	}
2520
2521	IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
2522	ieee80211_add_pending_skb(local, fwd_skb);
2523 out:
2524	if (is_multicast_ether_addr(hdr->addr1))
2525		return RX_CONTINUE;
2526	return RX_DROP_MONITOR;
2527}
2528#endif
2529
2530static ieee80211_rx_result debug_noinline
2531ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2532{
2533	struct ieee80211_sub_if_data *sdata = rx->sdata;
2534	struct ieee80211_local *local = rx->local;
2535	struct net_device *dev = sdata->dev;
2536	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2537	__le16 fc = hdr->frame_control;
2538	bool port_control;
2539	int err;
2540
2541	if (unlikely(!ieee80211_is_data(hdr->frame_control)))
2542		return RX_CONTINUE;
2543
2544	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
2545		return RX_DROP_MONITOR;
2546
2547	/*
2548	 * Send unexpected-4addr-frame event to hostapd. For older versions,
2549	 * also drop the frame to cooked monitor interfaces.
2550	 */
2551	if (ieee80211_has_a4(hdr->frame_control) &&
2552	    sdata->vif.type == NL80211_IFTYPE_AP) {
2553		if (rx->sta &&
2554		    !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
2555			cfg80211_rx_unexpected_4addr_frame(
2556				rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
2557		return RX_DROP_MONITOR;
2558	}
2559
2560	err = __ieee80211_data_to_8023(rx, &port_control);
2561	if (unlikely(err))
2562		return RX_DROP_UNUSABLE;
2563
2564	if (!ieee80211_frame_allowed(rx, fc))
2565		return RX_DROP_MONITOR;
2566
2567	/* directly handle TDLS channel switch requests/responses */
2568	if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
2569						cpu_to_be16(ETH_P_TDLS))) {
2570		struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
2571
2572		if (pskb_may_pull(rx->skb,
2573				  offsetof(struct ieee80211_tdls_data, u)) &&
2574		    tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
2575		    tf->category == WLAN_CATEGORY_TDLS &&
2576		    (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
2577		     tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
2578			skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
2579			schedule_work(&local->tdls_chsw_work);
2580			if (rx->sta)
2581				rx->sta->rx_stats.packets++;
2582
2583			return RX_QUEUED;
2584		}
2585	}
2586
2587	if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2588	    unlikely(port_control) && sdata->bss) {
2589		sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2590				     u.ap);
2591		dev = sdata->dev;
2592		rx->sdata = sdata;
2593	}
2594
2595	rx->skb->dev = dev;
2596
2597	if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
2598	    local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2599	    !is_multicast_ether_addr(
2600		    ((struct ethhdr *)rx->skb->data)->h_dest) &&
2601	    (!local->scanning &&
2602	     !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
2603		mod_timer(&local->dynamic_ps_timer, jiffies +
2604			  msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2605
2606	ieee80211_deliver_skb(rx);
2607
2608	return RX_QUEUED;
2609}
2610
2611static ieee80211_rx_result debug_noinline
2612ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
2613{
2614	struct sk_buff *skb = rx->skb;
2615	struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2616	struct tid_ampdu_rx *tid_agg_rx;
2617	u16 start_seq_num;
2618	u16 tid;
2619
2620	if (likely(!ieee80211_is_ctl(bar->frame_control)))
2621		return RX_CONTINUE;
2622
2623	if (ieee80211_is_back_req(bar->frame_control)) {
2624		struct {
2625			__le16 control, start_seq_num;
2626		} __packed bar_data;
2627		struct ieee80211_event event = {
2628			.type = BAR_RX_EVENT,
2629		};
2630
2631		if (!rx->sta)
2632			return RX_DROP_MONITOR;
2633
2634		if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2635				  &bar_data, sizeof(bar_data)))
2636			return RX_DROP_MONITOR;
2637
2638		tid = le16_to_cpu(bar_data.control) >> 12;
2639
2640		if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
2641		    !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
2642			ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
2643					     WLAN_BACK_RECIPIENT,
2644					     WLAN_REASON_QSTA_REQUIRE_SETUP);
2645
2646		tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2647		if (!tid_agg_rx)
2648			return RX_DROP_MONITOR;
2649
2650		start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
2651		event.u.ba.tid = tid;
2652		event.u.ba.ssn = start_seq_num;
2653		event.u.ba.sta = &rx->sta->sta;
2654
2655		/* reset session timer */
2656		if (tid_agg_rx->timeout)
2657			mod_timer(&tid_agg_rx->session_timer,
2658				  TU_TO_EXP_TIME(tid_agg_rx->timeout));
2659
2660		spin_lock(&tid_agg_rx->reorder_lock);
2661		/* release stored frames up to start of BAR */
2662		ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
2663						 start_seq_num, frames);
2664		spin_unlock(&tid_agg_rx->reorder_lock);
2665
2666		drv_event_callback(rx->local, rx->sdata, &event);
2667
2668		kfree_skb(skb);
2669		return RX_QUEUED;
2670	}
2671
2672	/*
2673	 * After this point, we only want management frames,
2674	 * so we can drop all remaining control frames to
2675	 * cooked monitor interfaces.
2676	 */
2677	return RX_DROP_MONITOR;
2678}
2679
2680static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
2681					   struct ieee80211_mgmt *mgmt,
2682					   size_t len)
2683{
2684	struct ieee80211_local *local = sdata->local;
2685	struct sk_buff *skb;
2686	struct ieee80211_mgmt *resp;
2687
2688	if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
2689		/* Not to own unicast address */
2690		return;
2691	}
2692
2693	if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
2694	    !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
2695		/* Not from the current AP or not associated yet. */
2696		return;
2697	}
2698
2699	if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
2700		/* Too short SA Query request frame */
2701		return;
2702	}
2703
2704	skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
2705	if (skb == NULL)
2706		return;
2707
2708	skb_reserve(skb, local->hw.extra_tx_headroom);
2709	resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
2710	memset(resp, 0, 24);
2711	memcpy(resp->da, mgmt->sa, ETH_ALEN);
2712	memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
2713	memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2714	resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2715					  IEEE80211_STYPE_ACTION);
2716	skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
2717	resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
2718	resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
2719	memcpy(resp->u.action.u.sa_query.trans_id,
2720	       mgmt->u.action.u.sa_query.trans_id,
2721	       WLAN_SA_QUERY_TR_ID_LEN);
2722
2723	ieee80211_tx_skb(sdata, skb);
2724}
2725
2726static ieee80211_rx_result debug_noinline
2727ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2728{
2729	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2730	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2731
2732	/*
2733	 * From here on, look only at management frames.
2734	 * Data and control frames are already handled,
2735	 * and unknown (reserved) frames are useless.
2736	 */
2737	if (rx->skb->len < 24)
2738		return RX_DROP_MONITOR;
2739
2740	if (!ieee80211_is_mgmt(mgmt->frame_control))
2741		return RX_DROP_MONITOR;
2742
2743	if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
2744	    ieee80211_is_beacon(mgmt->frame_control) &&
2745	    !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
2746		int sig = 0;
2747
2748		if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM))
 
2749			sig = status->signal;
2750
2751		cfg80211_report_obss_beacon(rx->local->hw.wiphy,
2752					    rx->skb->data, rx->skb->len,
2753					    status->freq, sig);
2754		rx->flags |= IEEE80211_RX_BEACON_REPORTED;
2755	}
2756
2757	if (ieee80211_drop_unencrypted_mgmt(rx))
2758		return RX_DROP_UNUSABLE;
2759
2760	return RX_CONTINUE;
2761}
2762
2763static ieee80211_rx_result debug_noinline
2764ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2765{
2766	struct ieee80211_local *local = rx->local;
2767	struct ieee80211_sub_if_data *sdata = rx->sdata;
2768	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2769	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2770	int len = rx->skb->len;
2771
2772	if (!ieee80211_is_action(mgmt->frame_control))
2773		return RX_CONTINUE;
2774
2775	/* drop too small frames */
2776	if (len < IEEE80211_MIN_ACTION_SIZE)
2777		return RX_DROP_UNUSABLE;
2778
2779	if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
2780	    mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
2781	    mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
2782		return RX_DROP_UNUSABLE;
2783
2784	switch (mgmt->u.action.category) {
2785	case WLAN_CATEGORY_HT:
2786		/* reject HT action frames from stations not supporting HT */
2787		if (!rx->sta->sta.ht_cap.ht_supported)
2788			goto invalid;
2789
2790		if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2791		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2792		    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2793		    sdata->vif.type != NL80211_IFTYPE_AP &&
2794		    sdata->vif.type != NL80211_IFTYPE_ADHOC)
2795			break;
2796
2797		/* verify action & smps_control/chanwidth are present */
2798		if (len < IEEE80211_MIN_ACTION_SIZE + 2)
2799			goto invalid;
2800
2801		switch (mgmt->u.action.u.ht_smps.action) {
2802		case WLAN_HT_ACTION_SMPS: {
2803			struct ieee80211_supported_band *sband;
2804			enum ieee80211_smps_mode smps_mode;
 
2805
2806			/* convert to HT capability */
2807			switch (mgmt->u.action.u.ht_smps.smps_control) {
2808			case WLAN_HT_SMPS_CONTROL_DISABLED:
2809				smps_mode = IEEE80211_SMPS_OFF;
2810				break;
2811			case WLAN_HT_SMPS_CONTROL_STATIC:
2812				smps_mode = IEEE80211_SMPS_STATIC;
2813				break;
2814			case WLAN_HT_SMPS_CONTROL_DYNAMIC:
2815				smps_mode = IEEE80211_SMPS_DYNAMIC;
2816				break;
2817			default:
2818				goto invalid;
2819			}
2820
2821			/* if no change do nothing */
2822			if (rx->sta->sta.smps_mode == smps_mode)
2823				goto handled;
2824			rx->sta->sta.smps_mode = smps_mode;
 
 
 
2825
2826			sband = rx->local->hw.wiphy->bands[status->band];
2827
2828			rate_control_rate_update(local, sband, rx->sta,
2829						 IEEE80211_RC_SMPS_CHANGED);
 
 
 
 
2830			goto handled;
2831		}
2832		case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
2833			struct ieee80211_supported_band *sband;
2834			u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
2835			enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
 
2836
2837			/* If it doesn't support 40 MHz it can't change ... */
2838			if (!(rx->sta->sta.ht_cap.cap &
2839					IEEE80211_HT_CAP_SUP_WIDTH_20_40))
2840				goto handled;
2841
2842			if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
2843				max_bw = IEEE80211_STA_RX_BW_20;
2844			else
2845				max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
2846
2847			/* set cur_max_bandwidth and recalc sta bw */
2848			rx->sta->cur_max_bandwidth = max_bw;
2849			new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
2850
2851			if (rx->sta->sta.bandwidth == new_bw)
2852				goto handled;
2853
2854			rx->sta->sta.bandwidth = new_bw;
2855			sband = rx->local->hw.wiphy->bands[status->band];
 
 
 
2856
2857			rate_control_rate_update(local, sband, rx->sta,
2858						 IEEE80211_RC_BW_CHANGED);
 
 
 
 
2859			goto handled;
2860		}
2861		default:
2862			goto invalid;
2863		}
2864
2865		break;
2866	case WLAN_CATEGORY_PUBLIC:
2867		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2868			goto invalid;
2869		if (sdata->vif.type != NL80211_IFTYPE_STATION)
2870			break;
2871		if (!rx->sta)
2872			break;
2873		if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
2874			break;
2875		if (mgmt->u.action.u.ext_chan_switch.action_code !=
2876				WLAN_PUB_ACTION_EXT_CHANSW_ANN)
2877			break;
2878		if (len < offsetof(struct ieee80211_mgmt,
2879				   u.action.u.ext_chan_switch.variable))
2880			goto invalid;
2881		goto queue;
2882	case WLAN_CATEGORY_VHT:
2883		if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2884		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2885		    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2886		    sdata->vif.type != NL80211_IFTYPE_AP &&
2887		    sdata->vif.type != NL80211_IFTYPE_ADHOC)
2888			break;
2889
2890		/* verify action code is present */
2891		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2892			goto invalid;
2893
2894		switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
2895		case WLAN_VHT_ACTION_OPMODE_NOTIF: {
2896			/* verify opmode is present */
2897			if (len < IEEE80211_MIN_ACTION_SIZE + 2)
2898				goto invalid;
2899			goto queue;
2900		}
2901		case WLAN_VHT_ACTION_GROUPID_MGMT: {
2902			if (len < IEEE80211_MIN_ACTION_SIZE + 25)
2903				goto invalid;
2904			goto queue;
2905		}
2906		default:
2907			break;
2908		}
2909		break;
2910	case WLAN_CATEGORY_BACK:
2911		if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2912		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2913		    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2914		    sdata->vif.type != NL80211_IFTYPE_AP &&
2915		    sdata->vif.type != NL80211_IFTYPE_ADHOC)
2916			break;
2917
2918		/* verify action_code is present */
2919		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2920			break;
2921
2922		switch (mgmt->u.action.u.addba_req.action_code) {
2923		case WLAN_ACTION_ADDBA_REQ:
2924			if (len < (IEEE80211_MIN_ACTION_SIZE +
2925				   sizeof(mgmt->u.action.u.addba_req)))
2926				goto invalid;
2927			break;
2928		case WLAN_ACTION_ADDBA_RESP:
2929			if (len < (IEEE80211_MIN_ACTION_SIZE +
2930				   sizeof(mgmt->u.action.u.addba_resp)))
2931				goto invalid;
2932			break;
2933		case WLAN_ACTION_DELBA:
2934			if (len < (IEEE80211_MIN_ACTION_SIZE +
2935				   sizeof(mgmt->u.action.u.delba)))
2936				goto invalid;
2937			break;
2938		default:
2939			goto invalid;
2940		}
2941
2942		goto queue;
2943	case WLAN_CATEGORY_SPECTRUM_MGMT:
2944		/* verify action_code is present */
2945		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2946			break;
2947
2948		switch (mgmt->u.action.u.measurement.action_code) {
2949		case WLAN_ACTION_SPCT_MSR_REQ:
2950			if (status->band != NL80211_BAND_5GHZ)
2951				break;
2952
2953			if (len < (IEEE80211_MIN_ACTION_SIZE +
2954				   sizeof(mgmt->u.action.u.measurement)))
2955				break;
2956
2957			if (sdata->vif.type != NL80211_IFTYPE_STATION)
2958				break;
2959
2960			ieee80211_process_measurement_req(sdata, mgmt, len);
2961			goto handled;
2962		case WLAN_ACTION_SPCT_CHL_SWITCH: {
2963			u8 *bssid;
2964			if (len < (IEEE80211_MIN_ACTION_SIZE +
2965				   sizeof(mgmt->u.action.u.chan_switch)))
2966				break;
2967
2968			if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2969			    sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2970			    sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
2971				break;
2972
2973			if (sdata->vif.type == NL80211_IFTYPE_STATION)
2974				bssid = sdata->u.mgd.bssid;
2975			else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
2976				bssid = sdata->u.ibss.bssid;
2977			else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
2978				bssid = mgmt->sa;
2979			else
2980				break;
2981
2982			if (!ether_addr_equal(mgmt->bssid, bssid))
2983				break;
2984
2985			goto queue;
2986			}
2987		}
2988		break;
2989	case WLAN_CATEGORY_SA_QUERY:
2990		if (len < (IEEE80211_MIN_ACTION_SIZE +
2991			   sizeof(mgmt->u.action.u.sa_query)))
2992			break;
2993
2994		switch (mgmt->u.action.u.sa_query.action) {
2995		case WLAN_ACTION_SA_QUERY_REQUEST:
2996			if (sdata->vif.type != NL80211_IFTYPE_STATION)
2997				break;
2998			ieee80211_process_sa_query_req(sdata, mgmt, len);
2999			goto handled;
3000		}
3001		break;
3002	case WLAN_CATEGORY_SELF_PROTECTED:
3003		if (len < (IEEE80211_MIN_ACTION_SIZE +
3004			   sizeof(mgmt->u.action.u.self_prot.action_code)))
3005			break;
3006
3007		switch (mgmt->u.action.u.self_prot.action_code) {
3008		case WLAN_SP_MESH_PEERING_OPEN:
3009		case WLAN_SP_MESH_PEERING_CLOSE:
3010		case WLAN_SP_MESH_PEERING_CONFIRM:
3011			if (!ieee80211_vif_is_mesh(&sdata->vif))
3012				goto invalid;
3013			if (sdata->u.mesh.user_mpm)
3014				/* userspace handles this frame */
3015				break;
3016			goto queue;
3017		case WLAN_SP_MGK_INFORM:
3018		case WLAN_SP_MGK_ACK:
3019			if (!ieee80211_vif_is_mesh(&sdata->vif))
3020				goto invalid;
3021			break;
3022		}
3023		break;
3024	case WLAN_CATEGORY_MESH_ACTION:
3025		if (len < (IEEE80211_MIN_ACTION_SIZE +
3026			   sizeof(mgmt->u.action.u.mesh_action.action_code)))
3027			break;
3028
3029		if (!ieee80211_vif_is_mesh(&sdata->vif))
3030			break;
3031		if (mesh_action_is_path_sel(mgmt) &&
3032		    !mesh_path_sel_is_hwmp(sdata))
3033			break;
3034		goto queue;
3035	}
3036
3037	return RX_CONTINUE;
3038
3039 invalid:
3040	status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
3041	/* will return in the next handlers */
3042	return RX_CONTINUE;
3043
3044 handled:
3045	if (rx->sta)
3046		rx->sta->rx_stats.packets++;
3047	dev_kfree_skb(rx->skb);
3048	return RX_QUEUED;
3049
3050 queue:
3051	rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
3052	skb_queue_tail(&sdata->skb_queue, rx->skb);
3053	ieee80211_queue_work(&local->hw, &sdata->work);
3054	if (rx->sta)
3055		rx->sta->rx_stats.packets++;
3056	return RX_QUEUED;
3057}
3058
3059static ieee80211_rx_result debug_noinline
3060ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
3061{
3062	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3063	int sig = 0;
3064
3065	/* skip known-bad action frames and return them in the next handler */
3066	if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
3067		return RX_CONTINUE;
3068
3069	/*
3070	 * Getting here means the kernel doesn't know how to handle
3071	 * it, but maybe userspace does ... include returned frames
3072	 * so userspace can register for those to know whether ones
3073	 * it transmitted were processed or returned.
3074	 */
3075
3076	if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM))
 
3077		sig = status->signal;
3078
3079	if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
3080			     rx->skb->data, rx->skb->len, 0)) {
3081		if (rx->sta)
3082			rx->sta->rx_stats.packets++;
3083		dev_kfree_skb(rx->skb);
3084		return RX_QUEUED;
3085	}
3086
3087	return RX_CONTINUE;
3088}
3089
3090static ieee80211_rx_result debug_noinline
3091ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
3092{
3093	struct ieee80211_local *local = rx->local;
3094	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3095	struct sk_buff *nskb;
3096	struct ieee80211_sub_if_data *sdata = rx->sdata;
3097	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3098
3099	if (!ieee80211_is_action(mgmt->frame_control))
3100		return RX_CONTINUE;
3101
3102	/*
3103	 * For AP mode, hostapd is responsible for handling any action
3104	 * frames that we didn't handle, including returning unknown
3105	 * ones. For all other modes we will return them to the sender,
3106	 * setting the 0x80 bit in the action category, as required by
3107	 * 802.11-2012 9.24.4.
3108	 * Newer versions of hostapd shall also use the management frame
3109	 * registration mechanisms, but older ones still use cooked
3110	 * monitor interfaces so push all frames there.
3111	 */
3112	if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
3113	    (sdata->vif.type == NL80211_IFTYPE_AP ||
3114	     sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
3115		return RX_DROP_MONITOR;
3116
3117	if (is_multicast_ether_addr(mgmt->da))
3118		return RX_DROP_MONITOR;
3119
3120	/* do not return rejected action frames */
3121	if (mgmt->u.action.category & 0x80)
3122		return RX_DROP_UNUSABLE;
3123
3124	nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
3125			       GFP_ATOMIC);
3126	if (nskb) {
3127		struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
3128
3129		nmgmt->u.action.category |= 0x80;
3130		memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
3131		memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
3132
3133		memset(nskb->cb, 0, sizeof(nskb->cb));
3134
3135		if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
3136			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
3137
3138			info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
3139				      IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
3140				      IEEE80211_TX_CTL_NO_CCK_RATE;
3141			if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
3142				info->hw_queue =
3143					local->hw.offchannel_tx_hw_queue;
3144		}
3145
3146		__ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
3147					    status->band);
3148	}
3149	dev_kfree_skb(rx->skb);
3150	return RX_QUEUED;
3151}
3152
3153static ieee80211_rx_result debug_noinline
3154ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
3155{
3156	struct ieee80211_sub_if_data *sdata = rx->sdata;
3157	struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
3158	__le16 stype;
3159
3160	stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
3161
3162	if (!ieee80211_vif_is_mesh(&sdata->vif) &&
3163	    sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3164	    sdata->vif.type != NL80211_IFTYPE_OCB &&
3165	    sdata->vif.type != NL80211_IFTYPE_STATION)
3166		return RX_DROP_MONITOR;
3167
3168	switch (stype) {
3169	case cpu_to_le16(IEEE80211_STYPE_AUTH):
3170	case cpu_to_le16(IEEE80211_STYPE_BEACON):
3171	case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
3172		/* process for all: mesh, mlme, ibss */
3173		break;
3174	case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
3175	case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
3176	case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
3177	case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
3178		if (is_multicast_ether_addr(mgmt->da) &&
3179		    !is_broadcast_ether_addr(mgmt->da))
3180			return RX_DROP_MONITOR;
3181
3182		/* process only for station */
3183		if (sdata->vif.type != NL80211_IFTYPE_STATION)
3184			return RX_DROP_MONITOR;
3185		break;
3186	case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
3187		/* process only for ibss and mesh */
3188		if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3189		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3190			return RX_DROP_MONITOR;
3191		break;
3192	default:
3193		return RX_DROP_MONITOR;
3194	}
3195
3196	/* queue up frame and kick off work to process it */
3197	rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
3198	skb_queue_tail(&sdata->skb_queue, rx->skb);
3199	ieee80211_queue_work(&rx->local->hw, &sdata->work);
3200	if (rx->sta)
3201		rx->sta->rx_stats.packets++;
3202
3203	return RX_QUEUED;
3204}
3205
3206static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
3207					struct ieee80211_rate *rate)
3208{
3209	struct ieee80211_sub_if_data *sdata;
3210	struct ieee80211_local *local = rx->local;
3211	struct sk_buff *skb = rx->skb, *skb2;
3212	struct net_device *prev_dev = NULL;
3213	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3214	int needed_headroom;
3215
3216	/*
3217	 * If cooked monitor has been processed already, then
3218	 * don't do it again. If not, set the flag.
3219	 */
3220	if (rx->flags & IEEE80211_RX_CMNTR)
3221		goto out_free_skb;
3222	rx->flags |= IEEE80211_RX_CMNTR;
3223
3224	/* If there are no cooked monitor interfaces, just free the SKB */
3225	if (!local->cooked_mntrs)
3226		goto out_free_skb;
3227
3228	/* vendor data is long removed here */
3229	status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA;
3230	/* room for the radiotap header based on driver features */
3231	needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
3232
3233	if (skb_headroom(skb) < needed_headroom &&
3234	    pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
3235		goto out_free_skb;
3236
3237	/* prepend radiotap information */
3238	ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
3239					 false);
3240
3241	skb_reset_mac_header(skb);
3242	skb->ip_summed = CHECKSUM_UNNECESSARY;
3243	skb->pkt_type = PACKET_OTHERHOST;
3244	skb->protocol = htons(ETH_P_802_2);
3245
3246	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
3247		if (!ieee80211_sdata_running(sdata))
3248			continue;
3249
3250		if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
3251		    !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
3252			continue;
3253
3254		if (prev_dev) {
3255			skb2 = skb_clone(skb, GFP_ATOMIC);
3256			if (skb2) {
3257				skb2->dev = prev_dev;
3258				netif_receive_skb(skb2);
3259			}
3260		}
3261
3262		prev_dev = sdata->dev;
3263		ieee80211_rx_stats(sdata->dev, skb->len);
3264	}
3265
3266	if (prev_dev) {
3267		skb->dev = prev_dev;
3268		netif_receive_skb(skb);
3269		return;
3270	}
3271
3272 out_free_skb:
3273	dev_kfree_skb(skb);
3274}
3275
3276static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
3277					 ieee80211_rx_result res)
3278{
3279	switch (res) {
3280	case RX_DROP_MONITOR:
3281		I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3282		if (rx->sta)
3283			rx->sta->rx_stats.dropped++;
3284		/* fall through */
3285	case RX_CONTINUE: {
3286		struct ieee80211_rate *rate = NULL;
3287		struct ieee80211_supported_band *sband;
3288		struct ieee80211_rx_status *status;
3289
3290		status = IEEE80211_SKB_RXCB((rx->skb));
3291
3292		sband = rx->local->hw.wiphy->bands[status->band];
3293		if (!(status->flag & RX_FLAG_HT) &&
3294		    !(status->flag & RX_FLAG_VHT))
3295			rate = &sband->bitrates[status->rate_idx];
3296
3297		ieee80211_rx_cooked_monitor(rx, rate);
3298		break;
3299		}
3300	case RX_DROP_UNUSABLE:
3301		I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3302		if (rx->sta)
3303			rx->sta->rx_stats.dropped++;
3304		dev_kfree_skb(rx->skb);
3305		break;
3306	case RX_QUEUED:
3307		I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
3308		break;
3309	}
3310}
3311
3312static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
3313				  struct sk_buff_head *frames)
3314{
3315	ieee80211_rx_result res = RX_DROP_MONITOR;
3316	struct sk_buff *skb;
3317
3318#define CALL_RXH(rxh)			\
3319	do {				\
3320		res = rxh(rx);		\
3321		if (res != RX_CONTINUE)	\
3322			goto rxh_next;  \
3323	} while (0)
3324
3325	/* Lock here to avoid hitting all of the data used in the RX
3326	 * path (e.g. key data, station data, ...) concurrently when
3327	 * a frame is released from the reorder buffer due to timeout
3328	 * from the timer, potentially concurrently with RX from the
3329	 * driver.
3330	 */
3331	spin_lock_bh(&rx->local->rx_path_lock);
3332
3333	while ((skb = __skb_dequeue(frames))) {
3334		/*
3335		 * all the other fields are valid across frames
3336		 * that belong to an aMPDU since they are on the
3337		 * same TID from the same station
3338		 */
3339		rx->skb = skb;
3340
3341		CALL_RXH(ieee80211_rx_h_check_more_data);
3342		CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
3343		CALL_RXH(ieee80211_rx_h_sta_process);
3344		CALL_RXH(ieee80211_rx_h_decrypt);
3345		CALL_RXH(ieee80211_rx_h_defragment);
3346		CALL_RXH(ieee80211_rx_h_michael_mic_verify);
3347		/* must be after MMIC verify so header is counted in MPDU mic */
3348#ifdef CONFIG_MAC80211_MESH
3349		if (ieee80211_vif_is_mesh(&rx->sdata->vif))
3350			CALL_RXH(ieee80211_rx_h_mesh_fwding);
3351#endif
3352		CALL_RXH(ieee80211_rx_h_amsdu);
3353		CALL_RXH(ieee80211_rx_h_data);
3354
3355		/* special treatment -- needs the queue */
3356		res = ieee80211_rx_h_ctrl(rx, frames);
3357		if (res != RX_CONTINUE)
3358			goto rxh_next;
3359
3360		CALL_RXH(ieee80211_rx_h_mgmt_check);
3361		CALL_RXH(ieee80211_rx_h_action);
3362		CALL_RXH(ieee80211_rx_h_userspace_mgmt);
3363		CALL_RXH(ieee80211_rx_h_action_return);
3364		CALL_RXH(ieee80211_rx_h_mgmt);
3365
3366 rxh_next:
3367		ieee80211_rx_handlers_result(rx, res);
3368
3369#undef CALL_RXH
3370	}
3371
3372	spin_unlock_bh(&rx->local->rx_path_lock);
3373}
3374
3375static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
3376{
3377	struct sk_buff_head reorder_release;
3378	ieee80211_rx_result res = RX_DROP_MONITOR;
3379
3380	__skb_queue_head_init(&reorder_release);
3381
3382#define CALL_RXH(rxh)			\
3383	do {				\
3384		res = rxh(rx);		\
3385		if (res != RX_CONTINUE)	\
3386			goto rxh_next;  \
3387	} while (0)
3388
3389	CALL_RXH(ieee80211_rx_h_check_dup);
3390	CALL_RXH(ieee80211_rx_h_check);
3391
3392	ieee80211_rx_reorder_ampdu(rx, &reorder_release);
3393
3394	ieee80211_rx_handlers(rx, &reorder_release);
3395	return;
3396
3397 rxh_next:
3398	ieee80211_rx_handlers_result(rx, res);
3399
3400#undef CALL_RXH
3401}
3402
3403/*
3404 * This function makes calls into the RX path, therefore
3405 * it has to be invoked under RCU read lock.
3406 */
3407void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
3408{
3409	struct sk_buff_head frames;
3410	struct ieee80211_rx_data rx = {
3411		.sta = sta,
3412		.sdata = sta->sdata,
3413		.local = sta->local,
3414		/* This is OK -- must be QoS data frame */
3415		.security_idx = tid,
3416		.seqno_idx = tid,
3417		.napi = NULL, /* must be NULL to not have races */
3418	};
3419	struct tid_ampdu_rx *tid_agg_rx;
3420
3421	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3422	if (!tid_agg_rx)
3423		return;
3424
3425	__skb_queue_head_init(&frames);
3426
3427	spin_lock(&tid_agg_rx->reorder_lock);
3428	ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3429	spin_unlock(&tid_agg_rx->reorder_lock);
3430
3431	if (!skb_queue_empty(&frames)) {
3432		struct ieee80211_event event = {
3433			.type = BA_FRAME_TIMEOUT,
3434			.u.ba.tid = tid,
3435			.u.ba.sta = &sta->sta,
3436		};
3437		drv_event_callback(rx.local, rx.sdata, &event);
3438	}
3439
3440	ieee80211_rx_handlers(&rx, &frames);
3441}
3442
3443void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
3444					  u16 ssn, u64 filtered,
3445					  u16 received_mpdus)
3446{
3447	struct sta_info *sta;
3448	struct tid_ampdu_rx *tid_agg_rx;
3449	struct sk_buff_head frames;
3450	struct ieee80211_rx_data rx = {
3451		/* This is OK -- must be QoS data frame */
3452		.security_idx = tid,
3453		.seqno_idx = tid,
3454	};
3455	int i, diff;
3456
3457	if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
3458		return;
3459
3460	__skb_queue_head_init(&frames);
3461
3462	sta = container_of(pubsta, struct sta_info, sta);
3463
3464	rx.sta = sta;
3465	rx.sdata = sta->sdata;
3466	rx.local = sta->local;
3467
3468	rcu_read_lock();
3469	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3470	if (!tid_agg_rx)
3471		goto out;
3472
3473	spin_lock_bh(&tid_agg_rx->reorder_lock);
3474
3475	if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
3476		int release;
3477
3478		/* release all frames in the reorder buffer */
3479		release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
3480			   IEEE80211_SN_MODULO;
3481		ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
3482						 release, &frames);
3483		/* update ssn to match received ssn */
3484		tid_agg_rx->head_seq_num = ssn;
3485	} else {
3486		ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
3487						 &frames);
3488	}
3489
3490	/* handle the case that received ssn is behind the mac ssn.
3491	 * it can be tid_agg_rx->buf_size behind and still be valid */
3492	diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
3493	if (diff >= tid_agg_rx->buf_size) {
3494		tid_agg_rx->reorder_buf_filtered = 0;
3495		goto release;
3496	}
3497	filtered = filtered >> diff;
3498	ssn += diff;
3499
3500	/* update bitmap */
3501	for (i = 0; i < tid_agg_rx->buf_size; i++) {
3502		int index = (ssn + i) % tid_agg_rx->buf_size;
3503
3504		tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
3505		if (filtered & BIT_ULL(i))
3506			tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
3507	}
3508
3509	/* now process also frames that the filter marking released */
3510	ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3511
3512release:
3513	spin_unlock_bh(&tid_agg_rx->reorder_lock);
3514
3515	ieee80211_rx_handlers(&rx, &frames);
3516
3517 out:
3518	rcu_read_unlock();
3519}
3520EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
3521
3522/* main receive path */
3523
3524static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3525{
3526	struct ieee80211_sub_if_data *sdata = rx->sdata;
3527	struct sk_buff *skb = rx->skb;
3528	struct ieee80211_hdr *hdr = (void *)skb->data;
3529	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3530	u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
3531	int multicast = is_multicast_ether_addr(hdr->addr1);
3532
3533	switch (sdata->vif.type) {
3534	case NL80211_IFTYPE_STATION:
3535		if (!bssid && !sdata->u.mgd.use_4addr)
3536			return false;
3537		if (multicast)
3538			return true;
3539		return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3540	case NL80211_IFTYPE_ADHOC:
3541		if (!bssid)
3542			return false;
3543		if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
3544		    ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
3545			return false;
3546		if (ieee80211_is_beacon(hdr->frame_control))
3547			return true;
3548		if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
3549			return false;
3550		if (!multicast &&
3551		    !ether_addr_equal(sdata->vif.addr, hdr->addr1))
3552			return false;
3553		if (!rx->sta) {
3554			int rate_idx;
3555			if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
3556				rate_idx = 0; /* TODO: HT/VHT rates */
3557			else
3558				rate_idx = status->rate_idx;
3559			ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
3560						 BIT(rate_idx));
3561		}
3562		return true;
3563	case NL80211_IFTYPE_OCB:
3564		if (!bssid)
3565			return false;
3566		if (!ieee80211_is_data_present(hdr->frame_control))
3567			return false;
3568		if (!is_broadcast_ether_addr(bssid))
3569			return false;
3570		if (!multicast &&
3571		    !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
3572			return false;
3573		if (!rx->sta) {
3574			int rate_idx;
3575			if (status->flag & RX_FLAG_HT)
3576				rate_idx = 0; /* TODO: HT rates */
3577			else
3578				rate_idx = status->rate_idx;
3579			ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
3580						BIT(rate_idx));
3581		}
3582		return true;
3583	case NL80211_IFTYPE_MESH_POINT:
 
 
3584		if (multicast)
3585			return true;
3586		return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3587	case NL80211_IFTYPE_AP_VLAN:
3588	case NL80211_IFTYPE_AP:
3589		if (!bssid)
3590			return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3591
3592		if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
3593			/*
3594			 * Accept public action frames even when the
3595			 * BSSID doesn't match, this is used for P2P
3596			 * and location updates. Note that mac80211
3597			 * itself never looks at these frames.
3598			 */
3599			if (!multicast &&
3600			    !ether_addr_equal(sdata->vif.addr, hdr->addr1))
3601				return false;
3602			if (ieee80211_is_public_action(hdr, skb->len))
3603				return true;
3604			return ieee80211_is_beacon(hdr->frame_control);
3605		}
3606
3607		if (!ieee80211_has_tods(hdr->frame_control)) {
3608			/* ignore data frames to TDLS-peers */
3609			if (ieee80211_is_data(hdr->frame_control))
3610				return false;
3611			/* ignore action frames to TDLS-peers */
3612			if (ieee80211_is_action(hdr->frame_control) &&
3613			    !is_broadcast_ether_addr(bssid) &&
3614			    !ether_addr_equal(bssid, hdr->addr1))
3615				return false;
3616		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3617		return true;
3618	case NL80211_IFTYPE_WDS:
3619		if (bssid || !ieee80211_is_data(hdr->frame_control))
3620			return false;
3621		return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2);
3622	case NL80211_IFTYPE_P2P_DEVICE:
3623		return ieee80211_is_public_action(hdr, skb->len) ||
3624		       ieee80211_is_probe_req(hdr->frame_control) ||
3625		       ieee80211_is_probe_resp(hdr->frame_control) ||
3626		       ieee80211_is_beacon(hdr->frame_control);
3627	case NL80211_IFTYPE_NAN:
3628		/* Currently no frames on NAN interface are allowed */
3629		return false;
3630	default:
3631		break;
3632	}
3633
3634	WARN_ON_ONCE(1);
3635	return false;
3636}
3637
3638void ieee80211_check_fast_rx(struct sta_info *sta)
3639{
3640	struct ieee80211_sub_if_data *sdata = sta->sdata;
3641	struct ieee80211_local *local = sdata->local;
3642	struct ieee80211_key *key;
3643	struct ieee80211_fast_rx fastrx = {
3644		.dev = sdata->dev,
3645		.vif_type = sdata->vif.type,
3646		.control_port_protocol = sdata->control_port_protocol,
3647	}, *old, *new = NULL;
3648	bool assign = false;
3649
3650	/* use sparse to check that we don't return without updating */
3651	__acquire(check_fast_rx);
3652
3653	BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
3654	BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
3655	ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
3656	ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
3657
3658	fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
3659
3660	/* fast-rx doesn't do reordering */
3661	if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
3662	    !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
3663		goto clear;
3664
3665	switch (sdata->vif.type) {
3666	case NL80211_IFTYPE_STATION:
3667		/* 4-addr is harder to deal with, later maybe */
3668		if (sdata->u.mgd.use_4addr)
3669			goto clear;
3670		/* software powersave is a huge mess, avoid all of it */
3671		if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
3672			goto clear;
3673		if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
3674		    !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
3675			goto clear;
3676		if (sta->sta.tdls) {
3677			fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
3678			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
3679			fastrx.expected_ds_bits = 0;
3680		} else {
3681			fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0;
3682			fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
3683			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
3684			fastrx.expected_ds_bits =
3685				cpu_to_le16(IEEE80211_FCTL_FROMDS);
3686		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3687		break;
3688	case NL80211_IFTYPE_AP_VLAN:
3689	case NL80211_IFTYPE_AP:
3690		/* parallel-rx requires this, at least with calls to
3691		 * ieee80211_sta_ps_transition()
3692		 */
3693		if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
3694			goto clear;
3695		fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
3696		fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
3697		fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
3698
3699		fastrx.internal_forward =
3700			!(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
3701			(sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
3702			 !sdata->u.vlan.sta);
 
 
 
 
 
 
 
 
 
3703		break;
3704	default:
3705		goto clear;
3706	}
3707
3708	if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
3709		goto clear;
3710
3711	rcu_read_lock();
3712	key = rcu_dereference(sta->ptk[sta->ptk_idx]);
3713	if (key) {
3714		switch (key->conf.cipher) {
3715		case WLAN_CIPHER_SUITE_TKIP:
3716			/* we don't want to deal with MMIC in fast-rx */
3717			goto clear_rcu;
3718		case WLAN_CIPHER_SUITE_CCMP:
3719		case WLAN_CIPHER_SUITE_CCMP_256:
3720		case WLAN_CIPHER_SUITE_GCMP:
3721		case WLAN_CIPHER_SUITE_GCMP_256:
3722			break;
3723		default:
3724			/* we also don't want to deal with WEP or cipher scheme
3725			 * since those require looking up the key idx in the
3726			 * frame, rather than assuming the PTK is used
3727			 * (we need to revisit this once we implement the real
3728			 * PTK index, which is now valid in the spec, but we
3729			 * haven't implemented that part yet)
3730			 */
3731			goto clear_rcu;
3732		}
3733
3734		fastrx.key = true;
3735		fastrx.icv_len = key->conf.icv_len;
3736	}
3737
3738	assign = true;
3739 clear_rcu:
3740	rcu_read_unlock();
3741 clear:
3742	__release(check_fast_rx);
3743
3744	if (assign)
3745		new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
3746
3747	spin_lock_bh(&sta->lock);
3748	old = rcu_dereference_protected(sta->fast_rx, true);
3749	rcu_assign_pointer(sta->fast_rx, new);
3750	spin_unlock_bh(&sta->lock);
3751
3752	if (old)
3753		kfree_rcu(old, rcu_head);
3754}
3755
3756void ieee80211_clear_fast_rx(struct sta_info *sta)
3757{
3758	struct ieee80211_fast_rx *old;
3759
3760	spin_lock_bh(&sta->lock);
3761	old = rcu_dereference_protected(sta->fast_rx, true);
3762	RCU_INIT_POINTER(sta->fast_rx, NULL);
3763	spin_unlock_bh(&sta->lock);
3764
3765	if (old)
3766		kfree_rcu(old, rcu_head);
3767}
3768
3769void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
3770{
3771	struct ieee80211_local *local = sdata->local;
3772	struct sta_info *sta;
3773
3774	lockdep_assert_held(&local->sta_mtx);
3775
3776	list_for_each_entry_rcu(sta, &local->sta_list, list) {
3777		if (sdata != sta->sdata &&
3778		    (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
3779			continue;
3780		ieee80211_check_fast_rx(sta);
3781	}
3782}
3783
3784void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
3785{
3786	struct ieee80211_local *local = sdata->local;
3787
3788	mutex_lock(&local->sta_mtx);
3789	__ieee80211_check_fast_rx_iface(sdata);
3790	mutex_unlock(&local->sta_mtx);
3791}
3792
3793static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
3794				     struct ieee80211_fast_rx *fast_rx)
3795{
3796	struct sk_buff *skb = rx->skb;
3797	struct ieee80211_hdr *hdr = (void *)skb->data;
3798	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3799	struct sta_info *sta = rx->sta;
3800	int orig_len = skb->len;
3801	int snap_offs = ieee80211_hdrlen(hdr->frame_control);
 
3802	struct {
3803		u8 snap[sizeof(rfc1042_header)];
3804		__be16 proto;
3805	} *payload __aligned(2);
3806	struct {
3807		u8 da[ETH_ALEN];
3808		u8 sa[ETH_ALEN];
3809	} addrs __aligned(2);
3810	struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
3811
3812	if (fast_rx->uses_rss)
3813		stats = this_cpu_ptr(sta->pcpu_rx_stats);
3814
3815	/* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
3816	 * to a common data structure; drivers can implement that per queue
3817	 * but we don't have that information in mac80211
3818	 */
3819	if (!(status->flag & RX_FLAG_DUP_VALIDATED))
3820		return false;
3821
3822#define FAST_RX_CRYPT_FLAGS	(RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
3823
3824	/* If using encryption, we also need to have:
3825	 *  - PN_VALIDATED: similar, but the implementation is tricky
3826	 *  - DECRYPTED: necessary for PN_VALIDATED
3827	 */
3828	if (fast_rx->key &&
3829	    (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
3830		return false;
3831
3832	/* we don't deal with A-MSDU deaggregation here */
3833	if (status->rx_flags & IEEE80211_RX_AMSDU)
3834		return false;
3835
3836	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
3837		return false;
3838
3839	if (unlikely(ieee80211_is_frag(hdr)))
3840		return false;
3841
3842	/* Since our interface address cannot be multicast, this
3843	 * implicitly also rejects multicast frames without the
3844	 * explicit check.
3845	 *
3846	 * We shouldn't get any *data* frames not addressed to us
3847	 * (AP mode will accept multicast *management* frames), but
3848	 * punting here will make it go through the full checks in
3849	 * ieee80211_accept_frame().
3850	 */
3851	if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
3852		return false;
3853
3854	if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
3855					      IEEE80211_FCTL_TODS)) !=
3856	    fast_rx->expected_ds_bits)
3857		goto drop;
3858
3859	/* assign the key to drop unencrypted frames (later)
3860	 * and strip the IV/MIC if necessary
3861	 */
3862	if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
3863		/* GCMP header length is the same */
3864		snap_offs += IEEE80211_CCMP_HDR_LEN;
3865	}
3866
3867	if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
3868		goto drop;
3869	payload = (void *)(skb->data + snap_offs);
3870
3871	if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
3872		return false;
3873
3874	/* Don't handle these here since they require special code.
3875	 * Accept AARP and IPX even though they should come with a
3876	 * bridge-tunnel header - but if we get them this way then
3877	 * there's little point in discarding them.
3878	 */
3879	if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
3880		     payload->proto == fast_rx->control_port_protocol))
3881		return false;
 
 
 
 
3882
3883	/* after this point, don't punt to the slowpath! */
3884
3885	if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
3886	    pskb_trim(skb, skb->len - fast_rx->icv_len))
3887		goto drop;
3888
3889	if (unlikely(fast_rx->sta_notify)) {
3890		ieee80211_sta_rx_notify(rx->sdata, hdr);
3891		fast_rx->sta_notify = false;
3892	}
3893
3894	/* statistics part of ieee80211_rx_h_sta_process() */
3895	stats->last_rx = jiffies;
3896	stats->last_rate = sta_stats_encode_rate(status);
3897
3898	stats->fragments++;
3899
3900	if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
3901		stats->last_signal = status->signal;
3902		if (!fast_rx->uses_rss)
3903			ewma_signal_add(&sta->rx_stats_avg.signal,
3904					-status->signal);
3905	}
3906
3907	if (status->chains) {
3908		int i;
3909
3910		stats->chains = status->chains;
3911		for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
3912			int signal = status->chain_signal[i];
3913
3914			if (!(status->chains & BIT(i)))
3915				continue;
3916
3917			stats->chain_signal_last[i] = signal;
3918			if (!fast_rx->uses_rss)
3919				ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
3920						-signal);
3921		}
3922	}
3923	/* end of statistics */
3924
3925	if (rx->key && !ieee80211_has_protected(hdr->frame_control))
3926		goto drop;
3927
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3928	/* do the header conversion - first grab the addresses */
3929	ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
3930	ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
3931	/* remove the SNAP but leave the ethertype */
3932	skb_pull(skb, snap_offs + sizeof(rfc1042_header));
3933	/* push the addresses in front */
3934	memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
3935
3936	skb->dev = fast_rx->dev;
3937
3938	ieee80211_rx_stats(fast_rx->dev, skb->len);
3939
3940	/* The seqno index has the same property as needed
3941	 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
3942	 * for non-QoS-data frames. Here we know it's a data
3943	 * frame, so count MSDUs.
3944	 */
3945	u64_stats_update_begin(&stats->syncp);
3946	stats->msdu[rx->seqno_idx]++;
3947	stats->bytes += orig_len;
3948	u64_stats_update_end(&stats->syncp);
3949
3950	if (fast_rx->internal_forward) {
3951		struct sk_buff *xmit_skb = NULL;
3952		bool multicast = is_multicast_ether_addr(skb->data);
3953
3954		if (multicast) {
3955			xmit_skb = skb_copy(skb, GFP_ATOMIC);
3956		} else if (sta_info_get(rx->sdata, skb->data)) {
3957			xmit_skb = skb;
3958			skb = NULL;
3959		}
3960
3961		if (xmit_skb) {
3962			/*
3963			 * Send to wireless media and increase priority by 256
3964			 * to keep the received priority instead of
3965			 * reclassifying the frame (see cfg80211_classify8021d).
3966			 */
3967			xmit_skb->priority += 256;
3968			xmit_skb->protocol = htons(ETH_P_802_3);
3969			skb_reset_network_header(xmit_skb);
3970			skb_reset_mac_header(xmit_skb);
3971			dev_queue_xmit(xmit_skb);
3972		}
3973
3974		if (!skb)
3975			return true;
3976	}
3977
3978	/* deliver to local stack */
3979	skb->protocol = eth_type_trans(skb, fast_rx->dev);
3980	memset(skb->cb, 0, sizeof(skb->cb));
3981	if (rx->napi)
3982		napi_gro_receive(rx->napi, skb);
3983	else
3984		netif_receive_skb(skb);
3985
3986	return true;
3987 drop:
3988	dev_kfree_skb(skb);
3989	stats->dropped++;
3990	return true;
3991}
3992
3993/*
3994 * This function returns whether or not the SKB
3995 * was destined for RX processing or not, which,
3996 * if consume is true, is equivalent to whether
3997 * or not the skb was consumed.
3998 */
3999static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
4000					    struct sk_buff *skb, bool consume)
4001{
4002	struct ieee80211_local *local = rx->local;
4003	struct ieee80211_sub_if_data *sdata = rx->sdata;
4004
4005	rx->skb = skb;
4006
4007	/* See if we can do fast-rx; if we have to copy we already lost,
4008	 * so punt in that case. We should never have to deliver a data
4009	 * frame to multiple interfaces anyway.
4010	 *
4011	 * We skip the ieee80211_accept_frame() call and do the necessary
4012	 * checking inside ieee80211_invoke_fast_rx().
4013	 */
4014	if (consume && rx->sta) {
4015		struct ieee80211_fast_rx *fast_rx;
4016
4017		fast_rx = rcu_dereference(rx->sta->fast_rx);
4018		if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
4019			return true;
4020	}
4021
4022	if (!ieee80211_accept_frame(rx))
4023		return false;
4024
4025	if (!consume) {
4026		skb = skb_copy(skb, GFP_ATOMIC);
4027		if (!skb) {
4028			if (net_ratelimit())
4029				wiphy_debug(local->hw.wiphy,
4030					"failed to copy skb for %s\n",
4031					sdata->name);
4032			return true;
4033		}
4034
4035		rx->skb = skb;
4036	}
4037
4038	ieee80211_invoke_rx_handlers(rx);
4039	return true;
4040}
4041
4042/*
4043 * This is the actual Rx frames handler. as it belongs to Rx path it must
4044 * be called with rcu_read_lock protection.
4045 */
4046static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
4047					 struct ieee80211_sta *pubsta,
4048					 struct sk_buff *skb,
4049					 struct napi_struct *napi)
4050{
4051	struct ieee80211_local *local = hw_to_local(hw);
4052	struct ieee80211_sub_if_data *sdata;
4053	struct ieee80211_hdr *hdr;
4054	__le16 fc;
4055	struct ieee80211_rx_data rx;
4056	struct ieee80211_sub_if_data *prev;
4057	struct rhlist_head *tmp;
4058	int err = 0;
4059
4060	fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
4061	memset(&rx, 0, sizeof(rx));
4062	rx.skb = skb;
4063	rx.local = local;
4064	rx.napi = napi;
4065
4066	if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
4067		I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
4068
4069	if (ieee80211_is_mgmt(fc)) {
4070		/* drop frame if too short for header */
4071		if (skb->len < ieee80211_hdrlen(fc))
4072			err = -ENOBUFS;
4073		else
4074			err = skb_linearize(skb);
4075	} else {
4076		err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
4077	}
4078
4079	if (err) {
4080		dev_kfree_skb(skb);
4081		return;
4082	}
4083
4084	hdr = (struct ieee80211_hdr *)skb->data;
4085	ieee80211_parse_qos(&rx);
4086	ieee80211_verify_alignment(&rx);
4087
4088	if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
4089		     ieee80211_is_beacon(hdr->frame_control)))
4090		ieee80211_scan_rx(local, skb);
4091
4092	if (ieee80211_is_data(fc)) {
4093		struct sta_info *sta, *prev_sta;
4094
4095		if (pubsta) {
4096			rx.sta = container_of(pubsta, struct sta_info, sta);
4097			rx.sdata = rx.sta->sdata;
4098			if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4099				return;
4100			goto out;
4101		}
4102
4103		prev_sta = NULL;
4104
4105		for_each_sta_info(local, hdr->addr2, sta, tmp) {
4106			if (!prev_sta) {
4107				prev_sta = sta;
4108				continue;
4109			}
4110
4111			rx.sta = prev_sta;
4112			rx.sdata = prev_sta->sdata;
4113			ieee80211_prepare_and_rx_handle(&rx, skb, false);
4114
4115			prev_sta = sta;
4116		}
4117
4118		if (prev_sta) {
4119			rx.sta = prev_sta;
4120			rx.sdata = prev_sta->sdata;
4121
4122			if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4123				return;
4124			goto out;
4125		}
4126	}
4127
4128	prev = NULL;
4129
4130	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
4131		if (!ieee80211_sdata_running(sdata))
4132			continue;
4133
4134		if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
4135		    sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
4136			continue;
4137
4138		/*
4139		 * frame is destined for this interface, but if it's
4140		 * not also for the previous one we handle that after
4141		 * the loop to avoid copying the SKB once too much
4142		 */
4143
4144		if (!prev) {
4145			prev = sdata;
4146			continue;
4147		}
4148
4149		rx.sta = sta_info_get_bss(prev, hdr->addr2);
4150		rx.sdata = prev;
4151		ieee80211_prepare_and_rx_handle(&rx, skb, false);
4152
4153		prev = sdata;
4154	}
4155
4156	if (prev) {
4157		rx.sta = sta_info_get_bss(prev, hdr->addr2);
4158		rx.sdata = prev;
4159
4160		if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4161			return;
4162	}
4163
4164 out:
4165	dev_kfree_skb(skb);
4166}
4167
4168/*
4169 * This is the receive path handler. It is called by a low level driver when an
4170 * 802.11 MPDU is received from the hardware.
4171 */
4172void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
4173		       struct sk_buff *skb, struct napi_struct *napi)
4174{
4175	struct ieee80211_local *local = hw_to_local(hw);
4176	struct ieee80211_rate *rate = NULL;
4177	struct ieee80211_supported_band *sband;
4178	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4179
4180	WARN_ON_ONCE(softirq_count() == 0);
4181
4182	if (WARN_ON(status->band >= NUM_NL80211_BANDS))
4183		goto drop;
4184
4185	sband = local->hw.wiphy->bands[status->band];
4186	if (WARN_ON(!sband))
4187		goto drop;
4188
4189	/*
4190	 * If we're suspending, it is possible although not too likely
4191	 * that we'd be receiving frames after having already partially
4192	 * quiesced the stack. We can't process such frames then since
4193	 * that might, for example, cause stations to be added or other
4194	 * driver callbacks be invoked.
4195	 */
4196	if (unlikely(local->quiescing || local->suspended))
4197		goto drop;
4198
4199	/* We might be during a HW reconfig, prevent Rx for the same reason */
4200	if (unlikely(local->in_reconfig))
4201		goto drop;
4202
4203	/*
4204	 * The same happens when we're not even started,
4205	 * but that's worth a warning.
4206	 */
4207	if (WARN_ON(!local->started))
4208		goto drop;
4209
4210	if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
4211		/*
4212		 * Validate the rate, unless a PLCP error means that
4213		 * we probably can't have a valid rate here anyway.
4214		 */
4215
4216		if (status->flag & RX_FLAG_HT) {
 
4217			/*
4218			 * rate_idx is MCS index, which can be [0-76]
4219			 * as documented on:
4220			 *
4221			 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
4222			 *
4223			 * Anything else would be some sort of driver or
4224			 * hardware error. The driver should catch hardware
4225			 * errors.
4226			 */
4227			if (WARN(status->rate_idx > 76,
4228				 "Rate marked as an HT rate but passed "
4229				 "status->rate_idx is not "
4230				 "an MCS index [0-76]: %d (0x%02x)\n",
4231				 status->rate_idx,
4232				 status->rate_idx))
4233				goto drop;
4234		} else if (status->flag & RX_FLAG_VHT) {
 
4235			if (WARN_ONCE(status->rate_idx > 9 ||
4236				      !status->vht_nss ||
4237				      status->vht_nss > 8,
4238				      "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
4239				      status->rate_idx, status->vht_nss))
4240				goto drop;
4241		} else {
 
 
 
 
4242			if (WARN_ON(status->rate_idx >= sband->n_bitrates))
4243				goto drop;
4244			rate = &sband->bitrates[status->rate_idx];
4245		}
4246	}
4247
4248	status->rx_flags = 0;
4249
4250	/*
4251	 * key references and virtual interfaces are protected using RCU
4252	 * and this requires that we are in a read-side RCU section during
4253	 * receive processing
4254	 */
4255	rcu_read_lock();
4256
4257	/*
4258	 * Frames with failed FCS/PLCP checksum are not returned,
4259	 * all other frames are returned without radiotap header
4260	 * if it was previously present.
4261	 * Also, frames with less than 16 bytes are dropped.
4262	 */
4263	skb = ieee80211_rx_monitor(local, skb, rate);
4264	if (!skb) {
4265		rcu_read_unlock();
4266		return;
4267	}
4268
4269	ieee80211_tpt_led_trig_rx(local,
4270			((struct ieee80211_hdr *)skb->data)->frame_control,
4271			skb->len);
4272
4273	__ieee80211_rx_handle_packet(hw, pubsta, skb, napi);
4274
4275	rcu_read_unlock();
4276
4277	return;
4278 drop:
4279	kfree_skb(skb);
4280}
4281EXPORT_SYMBOL(ieee80211_rx_napi);
4282
4283/* This is a version of the rx handler that can be called from hard irq
4284 * context. Post the skb on the queue and schedule the tasklet */
4285void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
4286{
4287	struct ieee80211_local *local = hw_to_local(hw);
4288
4289	BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
4290
4291	skb->pkt_type = IEEE80211_RX_MSG;
4292	skb_queue_tail(&local->skb_queue, skb);
4293	tasklet_schedule(&local->tasklet);
4294}
4295EXPORT_SYMBOL(ieee80211_rx_irqsafe);
v4.17
   1/*
   2 * Copyright 2002-2005, Instant802 Networks, Inc.
   3 * Copyright 2005-2006, Devicescape Software, Inc.
   4 * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
   5 * Copyright 2007-2010	Johannes Berg <johannes@sipsolutions.net>
   6 * Copyright 2013-2014  Intel Mobile Communications GmbH
   7 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#include <linux/jiffies.h>
  15#include <linux/slab.h>
  16#include <linux/kernel.h>
  17#include <linux/skbuff.h>
  18#include <linux/netdevice.h>
  19#include <linux/etherdevice.h>
  20#include <linux/rcupdate.h>
  21#include <linux/export.h>
  22#include <linux/bitops.h>
  23#include <net/mac80211.h>
  24#include <net/ieee80211_radiotap.h>
  25#include <asm/unaligned.h>
  26
  27#include "ieee80211_i.h"
  28#include "driver-ops.h"
  29#include "led.h"
  30#include "mesh.h"
  31#include "wep.h"
  32#include "wpa.h"
  33#include "tkip.h"
  34#include "wme.h"
  35#include "rate.h"
  36
  37static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
  38{
  39	struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
  40
  41	u64_stats_update_begin(&tstats->syncp);
  42	tstats->rx_packets++;
  43	tstats->rx_bytes += len;
  44	u64_stats_update_end(&tstats->syncp);
  45}
  46
  47static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
  48			       enum nl80211_iftype type)
  49{
  50	__le16 fc = hdr->frame_control;
  51
  52	if (ieee80211_is_data(fc)) {
  53		if (len < 24) /* drop incorrect hdr len (data) */
  54			return NULL;
  55
  56		if (ieee80211_has_a4(fc))
  57			return NULL;
  58		if (ieee80211_has_tods(fc))
  59			return hdr->addr1;
  60		if (ieee80211_has_fromds(fc))
  61			return hdr->addr2;
  62
  63		return hdr->addr3;
  64	}
  65
  66	if (ieee80211_is_mgmt(fc)) {
  67		if (len < 24) /* drop incorrect hdr len (mgmt) */
  68			return NULL;
  69		return hdr->addr3;
  70	}
  71
  72	if (ieee80211_is_ctl(fc)) {
  73		if (ieee80211_is_pspoll(fc))
  74			return hdr->addr1;
  75
  76		if (ieee80211_is_back_req(fc)) {
  77			switch (type) {
  78			case NL80211_IFTYPE_STATION:
  79				return hdr->addr2;
  80			case NL80211_IFTYPE_AP:
  81			case NL80211_IFTYPE_AP_VLAN:
  82				return hdr->addr1;
  83			default:
  84				break; /* fall through to the return */
  85			}
  86		}
  87	}
  88
  89	return NULL;
  90}
  91
  92/*
  93 * monitor mode reception
  94 *
  95 * This function cleans up the SKB, i.e. it removes all the stuff
  96 * only useful for monitoring.
  97 */
  98static void remove_monitor_info(struct sk_buff *skb,
  99				unsigned int present_fcs_len,
 100				unsigned int rtap_vendor_space)
 101{
 102	if (present_fcs_len)
 103		__pskb_trim(skb, skb->len - present_fcs_len);
 
 
 
 
 
 
 
 
 
 104	__pskb_pull(skb, rtap_vendor_space);
 
 
 105}
 106
 107static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
 108				     unsigned int rtap_vendor_space)
 109{
 110	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 111	struct ieee80211_hdr *hdr;
 112
 113	hdr = (void *)(skb->data + rtap_vendor_space);
 114
 115	if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
 116			    RX_FLAG_FAILED_PLCP_CRC |
 117			    RX_FLAG_ONLY_MONITOR))
 118		return true;
 119
 120	if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
 121		return true;
 122
 123	if (ieee80211_is_ctl(hdr->frame_control) &&
 124	    !ieee80211_is_pspoll(hdr->frame_control) &&
 125	    !ieee80211_is_back_req(hdr->frame_control))
 126		return true;
 127
 128	return false;
 129}
 130
 131static int
 132ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
 133			     struct ieee80211_rx_status *status,
 134			     struct sk_buff *skb)
 135{
 136	int len;
 137
 138	/* always present fields */
 139	len = sizeof(struct ieee80211_radiotap_header) + 8;
 140
 141	/* allocate extra bitmaps */
 142	if (status->chains)
 143		len += 4 * hweight8(status->chains);
 144
 145	if (ieee80211_have_rx_timestamp(status)) {
 146		len = ALIGN(len, 8);
 147		len += 8;
 148	}
 149	if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
 150		len += 1;
 151
 152	/* antenna field, if we don't have per-chain info */
 153	if (!status->chains)
 154		len += 1;
 155
 156	/* padding for RX_FLAGS if necessary */
 157	len = ALIGN(len, 2);
 158
 159	if (status->encoding == RX_ENC_HT) /* HT info */
 160		len += 3;
 161
 162	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
 163		len = ALIGN(len, 4);
 164		len += 8;
 165	}
 166
 167	if (status->encoding == RX_ENC_VHT) {
 168		len = ALIGN(len, 2);
 169		len += 12;
 170	}
 171
 172	if (local->hw.radiotap_timestamp.units_pos >= 0) {
 173		len = ALIGN(len, 8);
 174		len += 12;
 175	}
 176
 177	if (status->chains) {
 178		/* antenna and antenna signal fields */
 179		len += 2 * hweight8(status->chains);
 180	}
 181
 182	if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
 183		struct ieee80211_vendor_radiotap *rtap = (void *)skb->data;
 184
 185		/* vendor presence bitmap */
 186		len += 4;
 187		/* alignment for fixed 6-byte vendor data header */
 188		len = ALIGN(len, 2);
 189		/* vendor data header */
 190		len += 6;
 191		if (WARN_ON(rtap->align == 0))
 192			rtap->align = 1;
 193		len = ALIGN(len, rtap->align);
 194		len += rtap->len + rtap->pad;
 195	}
 196
 197	return len;
 198}
 199
 200static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
 201					 struct sk_buff *skb,
 202					 int rtap_vendor_space)
 203{
 204	struct {
 205		struct ieee80211_hdr_3addr hdr;
 206		u8 category;
 207		u8 action_code;
 208	} __packed action;
 209
 210	if (!sdata)
 211		return;
 212
 213	BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
 214
 215	if (skb->len < rtap_vendor_space + sizeof(action) +
 216		       VHT_MUMIMO_GROUPS_DATA_LEN)
 217		return;
 218
 219	if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
 220		return;
 221
 222	skb_copy_bits(skb, rtap_vendor_space, &action, sizeof(action));
 223
 224	if (!ieee80211_is_action(action.hdr.frame_control))
 225		return;
 226
 227	if (action.category != WLAN_CATEGORY_VHT)
 228		return;
 229
 230	if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
 231		return;
 232
 233	if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
 234		return;
 235
 236	skb = skb_copy(skb, GFP_ATOMIC);
 237	if (!skb)
 238		return;
 239
 240	skb_queue_tail(&sdata->skb_queue, skb);
 241	ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 242}
 243
 244/*
 245 * ieee80211_add_rx_radiotap_header - add radiotap header
 246 *
 247 * add a radiotap header containing all the fields which the hardware provided.
 248 */
 249static void
 250ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
 251				 struct sk_buff *skb,
 252				 struct ieee80211_rate *rate,
 253				 int rtap_len, bool has_fcs)
 254{
 255	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 256	struct ieee80211_radiotap_header *rthdr;
 257	unsigned char *pos;
 258	__le32 *it_present;
 259	u32 it_present_val;
 260	u16 rx_flags = 0;
 261	u16 channel_flags = 0;
 262	int mpdulen, chain;
 263	unsigned long chains = status->chains;
 264	struct ieee80211_vendor_radiotap rtap = {};
 265
 266	if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
 267		rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
 268		/* rtap.len and rtap.pad are undone immediately */
 269		skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad);
 270	}
 271
 272	mpdulen = skb->len;
 273	if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
 274		mpdulen += FCS_LEN;
 275
 276	rthdr = skb_push(skb, rtap_len);
 277	memset(rthdr, 0, rtap_len - rtap.len - rtap.pad);
 278	it_present = &rthdr->it_present;
 279
 280	/* radiotap header, set always present flags */
 281	rthdr->it_len = cpu_to_le16(rtap_len);
 282	it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
 283			 BIT(IEEE80211_RADIOTAP_CHANNEL) |
 284			 BIT(IEEE80211_RADIOTAP_RX_FLAGS);
 285
 286	if (!status->chains)
 287		it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
 288
 289	for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
 290		it_present_val |=
 291			BIT(IEEE80211_RADIOTAP_EXT) |
 292			BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
 293		put_unaligned_le32(it_present_val, it_present);
 294		it_present++;
 295		it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
 296				 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
 297	}
 298
 299	if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
 300		it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
 301				  BIT(IEEE80211_RADIOTAP_EXT);
 302		put_unaligned_le32(it_present_val, it_present);
 303		it_present++;
 304		it_present_val = rtap.present;
 305	}
 306
 307	put_unaligned_le32(it_present_val, it_present);
 308
 309	pos = (void *)(it_present + 1);
 310
 311	/* the order of the following fields is important */
 312
 313	/* IEEE80211_RADIOTAP_TSFT */
 314	if (ieee80211_have_rx_timestamp(status)) {
 315		/* padding */
 316		while ((pos - (u8 *)rthdr) & 7)
 317			*pos++ = 0;
 318		put_unaligned_le64(
 319			ieee80211_calculate_rx_timestamp(local, status,
 320							 mpdulen, 0),
 321			pos);
 322		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
 323		pos += 8;
 324	}
 325
 326	/* IEEE80211_RADIOTAP_FLAGS */
 327	if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
 328		*pos |= IEEE80211_RADIOTAP_F_FCS;
 329	if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
 330		*pos |= IEEE80211_RADIOTAP_F_BADFCS;
 331	if (status->enc_flags & RX_ENC_FLAG_SHORTPRE)
 332		*pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
 333	pos++;
 334
 335	/* IEEE80211_RADIOTAP_RATE */
 336	if (!rate || status->encoding != RX_ENC_LEGACY) {
 337		/*
 338		 * Without rate information don't add it. If we have,
 339		 * MCS information is a separate field in radiotap,
 340		 * added below. The byte here is needed as padding
 341		 * for the channel though, so initialise it to 0.
 342		 */
 343		*pos = 0;
 344	} else {
 345		int shift = 0;
 346		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
 347		if (status->bw == RATE_INFO_BW_10)
 348			shift = 1;
 349		else if (status->bw == RATE_INFO_BW_5)
 350			shift = 2;
 351		*pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
 352	}
 353	pos++;
 354
 355	/* IEEE80211_RADIOTAP_CHANNEL */
 356	put_unaligned_le16(status->freq, pos);
 357	pos += 2;
 358	if (status->bw == RATE_INFO_BW_10)
 359		channel_flags |= IEEE80211_CHAN_HALF;
 360	else if (status->bw == RATE_INFO_BW_5)
 361		channel_flags |= IEEE80211_CHAN_QUARTER;
 362
 363	if (status->band == NL80211_BAND_5GHZ)
 364		channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
 365	else if (status->encoding != RX_ENC_LEGACY)
 366		channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
 367	else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
 368		channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
 369	else if (rate)
 370		channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
 371	else
 372		channel_flags |= IEEE80211_CHAN_2GHZ;
 373	put_unaligned_le16(channel_flags, pos);
 374	pos += 2;
 375
 376	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
 377	if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
 378	    !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
 379		*pos = status->signal;
 380		rthdr->it_present |=
 381			cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
 382		pos++;
 383	}
 384
 385	/* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
 386
 387	if (!status->chains) {
 388		/* IEEE80211_RADIOTAP_ANTENNA */
 389		*pos = status->antenna;
 390		pos++;
 391	}
 392
 393	/* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
 394
 395	/* IEEE80211_RADIOTAP_RX_FLAGS */
 396	/* ensure 2 byte alignment for the 2 byte field as required */
 397	if ((pos - (u8 *)rthdr) & 1)
 398		*pos++ = 0;
 399	if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
 400		rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
 401	put_unaligned_le16(rx_flags, pos);
 402	pos += 2;
 403
 404	if (status->encoding == RX_ENC_HT) {
 405		unsigned int stbc;
 406
 407		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
 408		*pos++ = local->hw.radiotap_mcs_details;
 409		*pos = 0;
 410		if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
 411			*pos |= IEEE80211_RADIOTAP_MCS_SGI;
 412		if (status->bw == RATE_INFO_BW_40)
 413			*pos |= IEEE80211_RADIOTAP_MCS_BW_40;
 414		if (status->enc_flags & RX_ENC_FLAG_HT_GF)
 415			*pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
 416		if (status->enc_flags & RX_ENC_FLAG_LDPC)
 417			*pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
 418		stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT;
 419		*pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
 420		pos++;
 421		*pos++ = status->rate_idx;
 422	}
 423
 424	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
 425		u16 flags = 0;
 426
 427		/* ensure 4 byte alignment */
 428		while ((pos - (u8 *)rthdr) & 3)
 429			pos++;
 430		rthdr->it_present |=
 431			cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
 432		put_unaligned_le32(status->ampdu_reference, pos);
 433		pos += 4;
 434		if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
 435			flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
 436		if (status->flag & RX_FLAG_AMPDU_IS_LAST)
 437			flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
 438		if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
 439			flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
 440		if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
 441			flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
 442		if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN)
 443			flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN;
 444		if (status->flag & RX_FLAG_AMPDU_EOF_BIT)
 445			flags |= IEEE80211_RADIOTAP_AMPDU_EOF;
 446		put_unaligned_le16(flags, pos);
 447		pos += 2;
 448		if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
 449			*pos++ = status->ampdu_delimiter_crc;
 450		else
 451			*pos++ = 0;
 452		*pos++ = 0;
 453	}
 454
 455	if (status->encoding == RX_ENC_VHT) {
 456		u16 known = local->hw.radiotap_vht_details;
 457
 458		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
 459		put_unaligned_le16(known, pos);
 460		pos += 2;
 461		/* flags */
 462		if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
 463			*pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
 464		/* in VHT, STBC is binary */
 465		if (status->enc_flags & RX_ENC_FLAG_STBC_MASK)
 466			*pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
 467		if (status->enc_flags & RX_ENC_FLAG_BF)
 468			*pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
 469		pos++;
 470		/* bandwidth */
 471		switch (status->bw) {
 472		case RATE_INFO_BW_80:
 473			*pos++ = 4;
 474			break;
 475		case RATE_INFO_BW_160:
 476			*pos++ = 11;
 477			break;
 478		case RATE_INFO_BW_40:
 479			*pos++ = 1;
 480			break;
 481		default:
 482			*pos++ = 0;
 483		}
 484		/* MCS/NSS */
 485		*pos = (status->rate_idx << 4) | status->nss;
 486		pos += 4;
 487		/* coding field */
 488		if (status->enc_flags & RX_ENC_FLAG_LDPC)
 489			*pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
 490		pos++;
 491		/* group ID */
 492		pos++;
 493		/* partial_aid */
 494		pos += 2;
 495	}
 496
 497	if (local->hw.radiotap_timestamp.units_pos >= 0) {
 498		u16 accuracy = 0;
 499		u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
 500
 501		rthdr->it_present |=
 502			cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP);
 503
 504		/* ensure 8 byte alignment */
 505		while ((pos - (u8 *)rthdr) & 7)
 506			pos++;
 507
 508		put_unaligned_le64(status->device_timestamp, pos);
 509		pos += sizeof(u64);
 510
 511		if (local->hw.radiotap_timestamp.accuracy >= 0) {
 512			accuracy = local->hw.radiotap_timestamp.accuracy;
 513			flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
 514		}
 515		put_unaligned_le16(accuracy, pos);
 516		pos += sizeof(u16);
 517
 518		*pos++ = local->hw.radiotap_timestamp.units_pos;
 519		*pos++ = flags;
 520	}
 521
 522	for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
 523		*pos++ = status->chain_signal[chain];
 524		*pos++ = chain;
 525	}
 526
 527	if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
 528		/* ensure 2 byte alignment for the vendor field as required */
 529		if ((pos - (u8 *)rthdr) & 1)
 530			*pos++ = 0;
 531		*pos++ = rtap.oui[0];
 532		*pos++ = rtap.oui[1];
 533		*pos++ = rtap.oui[2];
 534		*pos++ = rtap.subns;
 535		put_unaligned_le16(rtap.len, pos);
 536		pos += 2;
 537		/* align the actual payload as requested */
 538		while ((pos - (u8 *)rthdr) & (rtap.align - 1))
 539			*pos++ = 0;
 540		/* data (and possible padding) already follows */
 541	}
 542}
 543
 544static struct sk_buff *
 545ieee80211_make_monitor_skb(struct ieee80211_local *local,
 546			   struct sk_buff **origskb,
 547			   struct ieee80211_rate *rate,
 548			   int rtap_vendor_space, bool use_origskb)
 549{
 550	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb);
 551	int rt_hdrlen, needed_headroom;
 552	struct sk_buff *skb;
 553
 554	/* room for the radiotap header based on driver features */
 555	rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb);
 556	needed_headroom = rt_hdrlen - rtap_vendor_space;
 557
 558	if (use_origskb) {
 559		/* only need to expand headroom if necessary */
 560		skb = *origskb;
 561		*origskb = NULL;
 562
 563		/*
 564		 * This shouldn't trigger often because most devices have an
 565		 * RX header they pull before we get here, and that should
 566		 * be big enough for our radiotap information. We should
 567		 * probably export the length to drivers so that we can have
 568		 * them allocate enough headroom to start with.
 569		 */
 570		if (skb_headroom(skb) < needed_headroom &&
 571		    pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
 572			dev_kfree_skb(skb);
 573			return NULL;
 574		}
 575	} else {
 576		/*
 577		 * Need to make a copy and possibly remove radiotap header
 578		 * and FCS from the original.
 579		 */
 580		skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
 581
 582		if (!skb)
 583			return NULL;
 584	}
 585
 586	/* prepend radiotap information */
 587	ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
 588
 589	skb_reset_mac_header(skb);
 590	skb->ip_summed = CHECKSUM_UNNECESSARY;
 591	skb->pkt_type = PACKET_OTHERHOST;
 592	skb->protocol = htons(ETH_P_802_2);
 593
 594	return skb;
 595}
 596
 597/*
 598 * This function copies a received frame to all monitor interfaces and
 599 * returns a cleaned-up SKB that no longer includes the FCS nor the
 600 * radiotap header the driver might have added.
 601 */
 602static struct sk_buff *
 603ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
 604		     struct ieee80211_rate *rate)
 605{
 606	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
 607	struct ieee80211_sub_if_data *sdata;
 608	struct sk_buff *monskb = NULL;
 
 
 609	int present_fcs_len = 0;
 610	unsigned int rtap_vendor_space = 0;
 
 611	struct ieee80211_sub_if_data *monitor_sdata =
 612		rcu_dereference(local->monitor_sdata);
 613	bool only_monitor = false;
 614
 615	if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
 616		struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data;
 617
 618		rtap_vendor_space = sizeof(*rtap) + rtap->len + rtap->pad;
 619	}
 620
 621	/*
 622	 * First, we may need to make a copy of the skb because
 623	 *  (1) we need to modify it for radiotap (if not present), and
 624	 *  (2) the other RX handlers will modify the skb we got.
 625	 *
 626	 * We don't need to, of course, if we aren't going to return
 627	 * the SKB because it has a bad FCS/PLCP checksum.
 628	 */
 629
 630	if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
 631		if (unlikely(origskb->len <= FCS_LEN)) {
 632			/* driver bug */
 633			WARN_ON(1);
 634			dev_kfree_skb(origskb);
 635			return NULL;
 636		}
 637		present_fcs_len = FCS_LEN;
 638	}
 639
 640	/* ensure hdr->frame_control and vendor radiotap data are in skb head */
 641	if (!pskb_may_pull(origskb, 2 + rtap_vendor_space)) {
 642		dev_kfree_skb(origskb);
 643		return NULL;
 644	}
 645
 646	only_monitor = should_drop_frame(origskb, present_fcs_len,
 647					 rtap_vendor_space);
 648
 649	if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
 650		if (only_monitor) {
 
 651			dev_kfree_skb(origskb);
 652			return NULL;
 653		}
 654
 655		remove_monitor_info(origskb, present_fcs_len,
 656				    rtap_vendor_space);
 657		return origskb;
 658	}
 659
 660	ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space);
 661
 662	list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) {
 663		bool last_monitor = list_is_last(&sdata->u.mntr.list,
 664						 &local->mon_list);
 665
 666		if (!monskb)
 667			monskb = ieee80211_make_monitor_skb(local, &origskb,
 668							    rate,
 669							    rtap_vendor_space,
 670							    only_monitor &&
 671							    last_monitor);
 672
 673		if (monskb) {
 674			struct sk_buff *skb;
 675
 676			if (last_monitor) {
 677				skb = monskb;
 678				monskb = NULL;
 679			} else {
 680				skb = skb_clone(monskb, GFP_ATOMIC);
 681			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 682
 683			if (skb) {
 684				skb->dev = sdata->dev;
 685				ieee80211_rx_stats(skb->dev, skb->len);
 686				netif_receive_skb(skb);
 
 687			}
 688		}
 689
 690		if (last_monitor)
 691			break;
 692	}
 693
 694	/* this happens if last_monitor was erroneously false */
 695	dev_kfree_skb(monskb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 696
 697	/* ditto */
 698	if (!origskb)
 699		return NULL;
 
 
 700
 701	remove_monitor_info(origskb, present_fcs_len, rtap_vendor_space);
 702	return origskb;
 703}
 704
 705static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
 706{
 707	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
 708	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
 709	int tid, seqno_idx, security_idx;
 710
 711	/* does the frame have a qos control field? */
 712	if (ieee80211_is_data_qos(hdr->frame_control)) {
 713		u8 *qc = ieee80211_get_qos_ctl(hdr);
 714		/* frame has qos control */
 715		tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
 716		if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
 717			status->rx_flags |= IEEE80211_RX_AMSDU;
 718
 719		seqno_idx = tid;
 720		security_idx = tid;
 721	} else {
 722		/*
 723		 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
 724		 *
 725		 *	Sequence numbers for management frames, QoS data
 726		 *	frames with a broadcast/multicast address in the
 727		 *	Address 1 field, and all non-QoS data frames sent
 728		 *	by QoS STAs are assigned using an additional single
 729		 *	modulo-4096 counter, [...]
 730		 *
 731		 * We also use that counter for non-QoS STAs.
 732		 */
 733		seqno_idx = IEEE80211_NUM_TIDS;
 734		security_idx = 0;
 735		if (ieee80211_is_mgmt(hdr->frame_control))
 736			security_idx = IEEE80211_NUM_TIDS;
 737		tid = 0;
 738	}
 739
 740	rx->seqno_idx = seqno_idx;
 741	rx->security_idx = security_idx;
 742	/* Set skb->priority to 1d tag if highest order bit of TID is not set.
 743	 * For now, set skb->priority to 0 for other cases. */
 744	rx->skb->priority = (tid > 7) ? 0 : tid;
 745}
 746
 747/**
 748 * DOC: Packet alignment
 749 *
 750 * Drivers always need to pass packets that are aligned to two-byte boundaries
 751 * to the stack.
 752 *
 753 * Additionally, should, if possible, align the payload data in a way that
 754 * guarantees that the contained IP header is aligned to a four-byte
 755 * boundary. In the case of regular frames, this simply means aligning the
 756 * payload to a four-byte boundary (because either the IP header is directly
 757 * contained, or IV/RFC1042 headers that have a length divisible by four are
 758 * in front of it).  If the payload data is not properly aligned and the
 759 * architecture doesn't support efficient unaligned operations, mac80211
 760 * will align the data.
 761 *
 762 * With A-MSDU frames, however, the payload data address must yield two modulo
 763 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
 764 * push the IP header further back to a multiple of four again. Thankfully, the
 765 * specs were sane enough this time around to require padding each A-MSDU
 766 * subframe to a length that is a multiple of four.
 767 *
 768 * Padding like Atheros hardware adds which is between the 802.11 header and
 769 * the payload is not supported, the driver is required to move the 802.11
 770 * header to be directly in front of the payload in that case.
 771 */
 772static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
 773{
 774#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 775	WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
 776#endif
 777}
 778
 779
 780/* rx handlers */
 781
 782static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
 783{
 784	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 785
 786	if (is_multicast_ether_addr(hdr->addr1))
 787		return 0;
 788
 789	return ieee80211_is_robust_mgmt_frame(skb);
 790}
 791
 792
 793static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
 794{
 795	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 796
 797	if (!is_multicast_ether_addr(hdr->addr1))
 798		return 0;
 799
 800	return ieee80211_is_robust_mgmt_frame(skb);
 801}
 802
 803
 804/* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
 805static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
 806{
 807	struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
 808	struct ieee80211_mmie *mmie;
 809	struct ieee80211_mmie_16 *mmie16;
 810
 811	if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
 812		return -1;
 813
 814	if (!ieee80211_is_robust_mgmt_frame(skb))
 815		return -1; /* not a robust management frame */
 816
 817	mmie = (struct ieee80211_mmie *)
 818		(skb->data + skb->len - sizeof(*mmie));
 819	if (mmie->element_id == WLAN_EID_MMIE &&
 820	    mmie->length == sizeof(*mmie) - 2)
 821		return le16_to_cpu(mmie->key_id);
 822
 823	mmie16 = (struct ieee80211_mmie_16 *)
 824		(skb->data + skb->len - sizeof(*mmie16));
 825	if (skb->len >= 24 + sizeof(*mmie16) &&
 826	    mmie16->element_id == WLAN_EID_MMIE &&
 827	    mmie16->length == sizeof(*mmie16) - 2)
 828		return le16_to_cpu(mmie16->key_id);
 829
 830	return -1;
 831}
 832
 833static int ieee80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs,
 834				  struct sk_buff *skb)
 835{
 836	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 837	__le16 fc;
 838	int hdrlen;
 839	u8 keyid;
 840
 841	fc = hdr->frame_control;
 842	hdrlen = ieee80211_hdrlen(fc);
 843
 844	if (skb->len < hdrlen + cs->hdr_len)
 845		return -EINVAL;
 846
 847	skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1);
 848	keyid &= cs->key_idx_mask;
 849	keyid >>= cs->key_idx_shift;
 850
 851	return keyid;
 852}
 853
 854static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
 855{
 856	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
 857	char *dev_addr = rx->sdata->vif.addr;
 858
 859	if (ieee80211_is_data(hdr->frame_control)) {
 860		if (is_multicast_ether_addr(hdr->addr1)) {
 861			if (ieee80211_has_tods(hdr->frame_control) ||
 862			    !ieee80211_has_fromds(hdr->frame_control))
 863				return RX_DROP_MONITOR;
 864			if (ether_addr_equal(hdr->addr3, dev_addr))
 865				return RX_DROP_MONITOR;
 866		} else {
 867			if (!ieee80211_has_a4(hdr->frame_control))
 868				return RX_DROP_MONITOR;
 869			if (ether_addr_equal(hdr->addr4, dev_addr))
 870				return RX_DROP_MONITOR;
 871		}
 872	}
 873
 874	/* If there is not an established peer link and this is not a peer link
 875	 * establisment frame, beacon or probe, drop the frame.
 876	 */
 877
 878	if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
 879		struct ieee80211_mgmt *mgmt;
 880
 881		if (!ieee80211_is_mgmt(hdr->frame_control))
 882			return RX_DROP_MONITOR;
 883
 884		if (ieee80211_is_action(hdr->frame_control)) {
 885			u8 category;
 886
 887			/* make sure category field is present */
 888			if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
 889				return RX_DROP_MONITOR;
 890
 891			mgmt = (struct ieee80211_mgmt *)hdr;
 892			category = mgmt->u.action.category;
 893			if (category != WLAN_CATEGORY_MESH_ACTION &&
 894			    category != WLAN_CATEGORY_SELF_PROTECTED)
 895				return RX_DROP_MONITOR;
 896			return RX_CONTINUE;
 897		}
 898
 899		if (ieee80211_is_probe_req(hdr->frame_control) ||
 900		    ieee80211_is_probe_resp(hdr->frame_control) ||
 901		    ieee80211_is_beacon(hdr->frame_control) ||
 902		    ieee80211_is_auth(hdr->frame_control))
 903			return RX_CONTINUE;
 904
 905		return RX_DROP_MONITOR;
 906	}
 907
 908	return RX_CONTINUE;
 909}
 910
 911static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
 912					      int index)
 913{
 914	struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
 915	struct sk_buff *tail = skb_peek_tail(frames);
 916	struct ieee80211_rx_status *status;
 917
 918	if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
 919		return true;
 920
 921	if (!tail)
 922		return false;
 923
 924	status = IEEE80211_SKB_RXCB(tail);
 925	if (status->flag & RX_FLAG_AMSDU_MORE)
 926		return false;
 927
 928	return true;
 929}
 930
 931static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
 932					    struct tid_ampdu_rx *tid_agg_rx,
 933					    int index,
 934					    struct sk_buff_head *frames)
 935{
 936	struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
 937	struct sk_buff *skb;
 938	struct ieee80211_rx_status *status;
 939
 940	lockdep_assert_held(&tid_agg_rx->reorder_lock);
 941
 942	if (skb_queue_empty(skb_list))
 943		goto no_frame;
 944
 945	if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
 946		__skb_queue_purge(skb_list);
 947		goto no_frame;
 948	}
 949
 950	/* release frames from the reorder ring buffer */
 951	tid_agg_rx->stored_mpdu_num--;
 952	while ((skb = __skb_dequeue(skb_list))) {
 953		status = IEEE80211_SKB_RXCB(skb);
 954		status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
 955		__skb_queue_tail(frames, skb);
 956	}
 957
 958no_frame:
 959	tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
 960	tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
 961}
 962
 963static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
 964					     struct tid_ampdu_rx *tid_agg_rx,
 965					     u16 head_seq_num,
 966					     struct sk_buff_head *frames)
 967{
 968	int index;
 969
 970	lockdep_assert_held(&tid_agg_rx->reorder_lock);
 971
 972	while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
 973		index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
 974		ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
 975						frames);
 976	}
 977}
 978
 979/*
 980 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
 981 * the skb was added to the buffer longer than this time ago, the earlier
 982 * frames that have not yet been received are assumed to be lost and the skb
 983 * can be released for processing. This may also release other skb's from the
 984 * reorder buffer if there are no additional gaps between the frames.
 985 *
 986 * Callers must hold tid_agg_rx->reorder_lock.
 987 */
 988#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
 989
 990static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
 991					  struct tid_ampdu_rx *tid_agg_rx,
 992					  struct sk_buff_head *frames)
 993{
 994	int index, i, j;
 995
 996	lockdep_assert_held(&tid_agg_rx->reorder_lock);
 997
 998	/* release the buffer until next missing frame */
 999	index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1000	if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
1001	    tid_agg_rx->stored_mpdu_num) {
1002		/*
1003		 * No buffers ready to be released, but check whether any
1004		 * frames in the reorder buffer have timed out.
1005		 */
1006		int skipped = 1;
1007		for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
1008		     j = (j + 1) % tid_agg_rx->buf_size) {
1009			if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
1010				skipped++;
1011				continue;
1012			}
1013			if (skipped &&
1014			    !time_after(jiffies, tid_agg_rx->reorder_time[j] +
1015					HT_RX_REORDER_BUF_TIMEOUT))
1016				goto set_release_timer;
1017
1018			/* don't leave incomplete A-MSDUs around */
1019			for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
1020			     i = (i + 1) % tid_agg_rx->buf_size)
1021				__skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
1022
1023			ht_dbg_ratelimited(sdata,
1024					   "release an RX reorder frame due to timeout on earlier frames\n");
1025			ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
1026							frames);
1027
1028			/*
1029			 * Increment the head seq# also for the skipped slots.
1030			 */
1031			tid_agg_rx->head_seq_num =
1032				(tid_agg_rx->head_seq_num +
1033				 skipped) & IEEE80211_SN_MASK;
1034			skipped = 0;
1035		}
1036	} else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1037		ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1038						frames);
1039		index =	tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1040	}
1041
1042	if (tid_agg_rx->stored_mpdu_num) {
1043		j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1044
1045		for (; j != (index - 1) % tid_agg_rx->buf_size;
1046		     j = (j + 1) % tid_agg_rx->buf_size) {
1047			if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
1048				break;
1049		}
1050
1051 set_release_timer:
1052
1053		if (!tid_agg_rx->removed)
1054			mod_timer(&tid_agg_rx->reorder_timer,
1055				  tid_agg_rx->reorder_time[j] + 1 +
1056				  HT_RX_REORDER_BUF_TIMEOUT);
1057	} else {
1058		del_timer(&tid_agg_rx->reorder_timer);
1059	}
1060}
1061
1062/*
1063 * As this function belongs to the RX path it must be under
1064 * rcu_read_lock protection. It returns false if the frame
1065 * can be processed immediately, true if it was consumed.
1066 */
1067static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
1068					     struct tid_ampdu_rx *tid_agg_rx,
1069					     struct sk_buff *skb,
1070					     struct sk_buff_head *frames)
1071{
1072	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1073	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1074	u16 sc = le16_to_cpu(hdr->seq_ctrl);
1075	u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
1076	u16 head_seq_num, buf_size;
1077	int index;
1078	bool ret = true;
1079
1080	spin_lock(&tid_agg_rx->reorder_lock);
1081
1082	/*
1083	 * Offloaded BA sessions have no known starting sequence number so pick
1084	 * one from first Rxed frame for this tid after BA was started.
1085	 */
1086	if (unlikely(tid_agg_rx->auto_seq)) {
1087		tid_agg_rx->auto_seq = false;
1088		tid_agg_rx->ssn = mpdu_seq_num;
1089		tid_agg_rx->head_seq_num = mpdu_seq_num;
1090	}
1091
1092	buf_size = tid_agg_rx->buf_size;
1093	head_seq_num = tid_agg_rx->head_seq_num;
1094
1095	/*
1096	 * If the current MPDU's SN is smaller than the SSN, it shouldn't
1097	 * be reordered.
1098	 */
1099	if (unlikely(!tid_agg_rx->started)) {
1100		if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1101			ret = false;
1102			goto out;
1103		}
1104		tid_agg_rx->started = true;
1105	}
1106
1107	/* frame with out of date sequence number */
1108	if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1109		dev_kfree_skb(skb);
1110		goto out;
1111	}
1112
1113	/*
1114	 * If frame the sequence number exceeds our buffering window
1115	 * size release some previous frames to make room for this one.
1116	 */
1117	if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
1118		head_seq_num = ieee80211_sn_inc(
1119				ieee80211_sn_sub(mpdu_seq_num, buf_size));
1120		/* release stored frames up to new head to stack */
1121		ieee80211_release_reorder_frames(sdata, tid_agg_rx,
1122						 head_seq_num, frames);
1123	}
1124
1125	/* Now the new frame is always in the range of the reordering buffer */
1126
1127	index = mpdu_seq_num % tid_agg_rx->buf_size;
1128
1129	/* check if we already stored this frame */
1130	if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1131		dev_kfree_skb(skb);
1132		goto out;
1133	}
1134
1135	/*
1136	 * If the current MPDU is in the right order and nothing else
1137	 * is stored we can process it directly, no need to buffer it.
1138	 * If it is first but there's something stored, we may be able
1139	 * to release frames after this one.
1140	 */
1141	if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
1142	    tid_agg_rx->stored_mpdu_num == 0) {
1143		if (!(status->flag & RX_FLAG_AMSDU_MORE))
1144			tid_agg_rx->head_seq_num =
1145				ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1146		ret = false;
1147		goto out;
1148	}
1149
1150	/* put the frame in the reordering buffer */
1151	__skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
1152	if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1153		tid_agg_rx->reorder_time[index] = jiffies;
1154		tid_agg_rx->stored_mpdu_num++;
1155		ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
1156	}
1157
1158 out:
1159	spin_unlock(&tid_agg_rx->reorder_lock);
1160	return ret;
1161}
1162
1163/*
1164 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
1165 * true if the MPDU was buffered, false if it should be processed.
1166 */
1167static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
1168				       struct sk_buff_head *frames)
1169{
1170	struct sk_buff *skb = rx->skb;
1171	struct ieee80211_local *local = rx->local;
1172	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1173	struct sta_info *sta = rx->sta;
1174	struct tid_ampdu_rx *tid_agg_rx;
1175	u16 sc;
1176	u8 tid, ack_policy;
1177
1178	if (!ieee80211_is_data_qos(hdr->frame_control) ||
1179	    is_multicast_ether_addr(hdr->addr1))
1180		goto dont_reorder;
1181
1182	/*
1183	 * filter the QoS data rx stream according to
1184	 * STA/TID and check if this STA/TID is on aggregation
1185	 */
1186
1187	if (!sta)
1188		goto dont_reorder;
1189
1190	ack_policy = *ieee80211_get_qos_ctl(hdr) &
1191		     IEEE80211_QOS_CTL_ACK_POLICY_MASK;
1192	tid = ieee80211_get_tid(hdr);
1193
1194	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
1195	if (!tid_agg_rx) {
1196		if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1197		    !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
1198		    !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
1199			ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
1200					     WLAN_BACK_RECIPIENT,
1201					     WLAN_REASON_QSTA_REQUIRE_SETUP);
1202		goto dont_reorder;
1203	}
1204
1205	/* qos null data frames are excluded */
1206	if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
1207		goto dont_reorder;
1208
1209	/* not part of a BA session */
1210	if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1211	    ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
1212		goto dont_reorder;
1213
1214	/* new, potentially un-ordered, ampdu frame - process it */
1215
1216	/* reset session timer */
1217	if (tid_agg_rx->timeout)
1218		tid_agg_rx->last_rx = jiffies;
1219
1220	/* if this mpdu is fragmented - terminate rx aggregation session */
1221	sc = le16_to_cpu(hdr->seq_ctrl);
1222	if (sc & IEEE80211_SCTL_FRAG) {
 
1223		skb_queue_tail(&rx->sdata->skb_queue, skb);
1224		ieee80211_queue_work(&local->hw, &rx->sdata->work);
1225		return;
1226	}
1227
1228	/*
1229	 * No locking needed -- we will only ever process one
1230	 * RX packet at a time, and thus own tid_agg_rx. All
1231	 * other code manipulating it needs to (and does) make
1232	 * sure that we cannot get to it any more before doing
1233	 * anything with it.
1234	 */
1235	if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
1236					     frames))
1237		return;
1238
1239 dont_reorder:
1240	__skb_queue_tail(frames, skb);
1241}
1242
1243static ieee80211_rx_result debug_noinline
1244ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
1245{
1246	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1247	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1248
1249	if (status->flag & RX_FLAG_DUP_VALIDATED)
1250		return RX_CONTINUE;
1251
1252	/*
1253	 * Drop duplicate 802.11 retransmissions
1254	 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
1255	 */
1256
1257	if (rx->skb->len < 24)
1258		return RX_CONTINUE;
1259
1260	if (ieee80211_is_ctl(hdr->frame_control) ||
1261	    ieee80211_is_qos_nullfunc(hdr->frame_control) ||
1262	    is_multicast_ether_addr(hdr->addr1))
1263		return RX_CONTINUE;
1264
1265	if (!rx->sta)
1266		return RX_CONTINUE;
1267
1268	if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
1269		     rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
1270		I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
1271		rx->sta->rx_stats.num_duplicates++;
1272		return RX_DROP_UNUSABLE;
1273	} else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1274		rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
1275	}
1276
1277	return RX_CONTINUE;
1278}
1279
1280static ieee80211_rx_result debug_noinline
1281ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
1282{
1283	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1284
1285	/* Drop disallowed frame classes based on STA auth/assoc state;
1286	 * IEEE 802.11, Chap 5.5.
1287	 *
1288	 * mac80211 filters only based on association state, i.e. it drops
1289	 * Class 3 frames from not associated stations. hostapd sends
1290	 * deauth/disassoc frames when needed. In addition, hostapd is
1291	 * responsible for filtering on both auth and assoc states.
1292	 */
1293
1294	if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1295		return ieee80211_rx_mesh_check(rx);
1296
1297	if (unlikely((ieee80211_is_data(hdr->frame_control) ||
1298		      ieee80211_is_pspoll(hdr->frame_control)) &&
1299		     rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1300		     rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
1301		     rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
1302		     (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
1303		/*
1304		 * accept port control frames from the AP even when it's not
1305		 * yet marked ASSOC to prevent a race where we don't set the
1306		 * assoc bit quickly enough before it sends the first frame
1307		 */
1308		if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1309		    ieee80211_is_data_present(hdr->frame_control)) {
1310			unsigned int hdrlen;
1311			__be16 ethertype;
1312
1313			hdrlen = ieee80211_hdrlen(hdr->frame_control);
1314
1315			if (rx->skb->len < hdrlen + 8)
1316				return RX_DROP_MONITOR;
1317
1318			skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2);
1319			if (ethertype == rx->sdata->control_port_protocol)
1320				return RX_CONTINUE;
1321		}
1322
1323		if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
1324		    cfg80211_rx_spurious_frame(rx->sdata->dev,
1325					       hdr->addr2,
1326					       GFP_ATOMIC))
1327			return RX_DROP_UNUSABLE;
1328
1329		return RX_DROP_MONITOR;
1330	}
1331
1332	return RX_CONTINUE;
1333}
1334
1335
1336static ieee80211_rx_result debug_noinline
1337ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1338{
1339	struct ieee80211_local *local;
1340	struct ieee80211_hdr *hdr;
1341	struct sk_buff *skb;
1342
1343	local = rx->local;
1344	skb = rx->skb;
1345	hdr = (struct ieee80211_hdr *) skb->data;
1346
1347	if (!local->pspolling)
1348		return RX_CONTINUE;
1349
1350	if (!ieee80211_has_fromds(hdr->frame_control))
1351		/* this is not from AP */
1352		return RX_CONTINUE;
1353
1354	if (!ieee80211_is_data(hdr->frame_control))
1355		return RX_CONTINUE;
1356
1357	if (!ieee80211_has_moredata(hdr->frame_control)) {
1358		/* AP has no more frames buffered for us */
1359		local->pspolling = false;
1360		return RX_CONTINUE;
1361	}
1362
1363	/* more data bit is set, let's request a new frame from the AP */
1364	ieee80211_send_pspoll(local, rx->sdata);
1365
1366	return RX_CONTINUE;
1367}
1368
1369static void sta_ps_start(struct sta_info *sta)
1370{
1371	struct ieee80211_sub_if_data *sdata = sta->sdata;
1372	struct ieee80211_local *local = sdata->local;
1373	struct ps_data *ps;
1374	int tid;
1375
1376	if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1377	    sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1378		ps = &sdata->bss->ps;
1379	else
1380		return;
1381
1382	atomic_inc(&ps->num_sta_ps);
1383	set_sta_flag(sta, WLAN_STA_PS_STA);
1384	if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
1385		drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1386	ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1387	       sta->sta.addr, sta->sta.aid);
1388
1389	ieee80211_clear_fast_xmit(sta);
1390
1391	if (!sta->sta.txq[0])
1392		return;
1393
1394	for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
1395		if (txq_has_queue(sta->sta.txq[tid]))
1396			set_bit(tid, &sta->txq_buffered_tids);
1397		else
1398			clear_bit(tid, &sta->txq_buffered_tids);
1399	}
1400}
1401
1402static void sta_ps_end(struct sta_info *sta)
1403{
1404	ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1405	       sta->sta.addr, sta->sta.aid);
1406
1407	if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1408		/*
1409		 * Clear the flag only if the other one is still set
1410		 * so that the TX path won't start TX'ing new frames
1411		 * directly ... In the case that the driver flag isn't
1412		 * set ieee80211_sta_ps_deliver_wakeup() will clear it.
1413		 */
1414		clear_sta_flag(sta, WLAN_STA_PS_STA);
1415		ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1416		       sta->sta.addr, sta->sta.aid);
1417		return;
1418	}
1419
1420	set_sta_flag(sta, WLAN_STA_PS_DELIVER);
1421	clear_sta_flag(sta, WLAN_STA_PS_STA);
1422	ieee80211_sta_ps_deliver_wakeup(sta);
1423}
1424
1425int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
1426{
1427	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1428	bool in_ps;
1429
1430	WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
1431
1432	/* Don't let the same PS state be set twice */
1433	in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
1434	if ((start && in_ps) || (!start && !in_ps))
1435		return -EINVAL;
1436
1437	if (start)
1438		sta_ps_start(sta);
1439	else
1440		sta_ps_end(sta);
1441
1442	return 0;
1443}
1444EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1445
1446void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
1447{
1448	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1449
1450	if (test_sta_flag(sta, WLAN_STA_SP))
1451		return;
1452
1453	if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1454		ieee80211_sta_ps_deliver_poll_response(sta);
1455	else
1456		set_sta_flag(sta, WLAN_STA_PSPOLL);
1457}
1458EXPORT_SYMBOL(ieee80211_sta_pspoll);
1459
1460void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
1461{
1462	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1463	int ac = ieee80211_ac_from_tid(tid);
1464
1465	/*
1466	 * If this AC is not trigger-enabled do nothing unless the
1467	 * driver is calling us after it already checked.
1468	 *
1469	 * NB: This could/should check a separate bitmap of trigger-
1470	 * enabled queues, but for now we only implement uAPSD w/o
1471	 * TSPEC changes to the ACs, so they're always the same.
1472	 */
1473	if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) &&
1474	    tid != IEEE80211_NUM_TIDS)
1475		return;
1476
1477	/* if we are in a service period, do nothing */
1478	if (test_sta_flag(sta, WLAN_STA_SP))
1479		return;
1480
1481	if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1482		ieee80211_sta_ps_deliver_uapsd(sta);
1483	else
1484		set_sta_flag(sta, WLAN_STA_UAPSD);
1485}
1486EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger);
1487
1488static ieee80211_rx_result debug_noinline
1489ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1490{
1491	struct ieee80211_sub_if_data *sdata = rx->sdata;
1492	struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1493	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1494
1495	if (!rx->sta)
1496		return RX_CONTINUE;
1497
1498	if (sdata->vif.type != NL80211_IFTYPE_AP &&
1499	    sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1500		return RX_CONTINUE;
1501
1502	/*
1503	 * The device handles station powersave, so don't do anything about
1504	 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1505	 * it to mac80211 since they're handled.)
1506	 */
1507	if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
1508		return RX_CONTINUE;
1509
1510	/*
1511	 * Don't do anything if the station isn't already asleep. In
1512	 * the uAPSD case, the station will probably be marked asleep,
1513	 * in the PS-Poll case the station must be confused ...
1514	 */
1515	if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1516		return RX_CONTINUE;
1517
1518	if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1519		ieee80211_sta_pspoll(&rx->sta->sta);
1520
1521		/* Free PS Poll skb here instead of returning RX_DROP that would
1522		 * count as an dropped frame. */
1523		dev_kfree_skb(rx->skb);
1524
1525		return RX_QUEUED;
1526	} else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1527		   !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1528		   ieee80211_has_pm(hdr->frame_control) &&
1529		   (ieee80211_is_data_qos(hdr->frame_control) ||
1530		    ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1531		u8 tid = ieee80211_get_tid(hdr);
 
 
1532
1533		ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
1534	}
1535
1536	return RX_CONTINUE;
1537}
1538
1539static ieee80211_rx_result debug_noinline
1540ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1541{
1542	struct sta_info *sta = rx->sta;
1543	struct sk_buff *skb = rx->skb;
1544	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1545	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1546	int i;
1547
1548	if (!sta)
1549		return RX_CONTINUE;
1550
1551	/*
1552	 * Update last_rx only for IBSS packets which are for the current
1553	 * BSSID and for station already AUTHORIZED to avoid keeping the
1554	 * current IBSS network alive in cases where other STAs start
1555	 * using different BSSID. This will also give the station another
1556	 * chance to restart the authentication/authorization in case
1557	 * something went wrong the first time.
1558	 */
1559	if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1560		u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1561						NL80211_IFTYPE_ADHOC);
1562		if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1563		    test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1564			sta->rx_stats.last_rx = jiffies;
1565			if (ieee80211_is_data(hdr->frame_control) &&
1566			    !is_multicast_ether_addr(hdr->addr1))
1567				sta->rx_stats.last_rate =
1568					sta_stats_encode_rate(status);
1569		}
1570	} else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
1571		sta->rx_stats.last_rx = jiffies;
1572	} else if (!is_multicast_ether_addr(hdr->addr1)) {
1573		/*
1574		 * Mesh beacons will update last_rx when if they are found to
1575		 * match the current local configuration when processed.
1576		 */
1577		sta->rx_stats.last_rx = jiffies;
1578		if (ieee80211_is_data(hdr->frame_control))
1579			sta->rx_stats.last_rate = sta_stats_encode_rate(status);
1580	}
1581
1582	if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1583		ieee80211_sta_rx_notify(rx->sdata, hdr);
1584
1585	sta->rx_stats.fragments++;
1586
1587	u64_stats_update_begin(&rx->sta->rx_stats.syncp);
1588	sta->rx_stats.bytes += rx->skb->len;
1589	u64_stats_update_end(&rx->sta->rx_stats.syncp);
1590
1591	if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1592		sta->rx_stats.last_signal = status->signal;
1593		ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
1594	}
1595
1596	if (status->chains) {
1597		sta->rx_stats.chains = status->chains;
1598		for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
1599			int signal = status->chain_signal[i];
1600
1601			if (!(status->chains & BIT(i)))
1602				continue;
1603
1604			sta->rx_stats.chain_signal_last[i] = signal;
1605			ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
1606					-signal);
1607		}
1608	}
1609
1610	/*
1611	 * Change STA power saving mode only at the end of a frame
1612	 * exchange sequence, and only for a data or management
1613	 * frame as specified in IEEE 802.11-2016 11.2.3.2
1614	 */
1615	if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1616	    !ieee80211_has_morefrags(hdr->frame_control) &&
1617	    (ieee80211_is_mgmt(hdr->frame_control) ||
1618	     ieee80211_is_data(hdr->frame_control)) &&
1619	    !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1620	    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1621	     rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
 
 
 
 
 
 
1622		if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1623			if (!ieee80211_has_pm(hdr->frame_control))
1624				sta_ps_end(sta);
1625		} else {
1626			if (ieee80211_has_pm(hdr->frame_control))
1627				sta_ps_start(sta);
1628		}
1629	}
1630
1631	/* mesh power save support */
1632	if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1633		ieee80211_mps_rx_h_sta_process(sta, hdr);
1634
1635	/*
1636	 * Drop (qos-)data::nullfunc frames silently, since they
1637	 * are used only to control station power saving mode.
1638	 */
1639	if (ieee80211_is_nullfunc(hdr->frame_control) ||
1640	    ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1641		I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1642
1643		/*
1644		 * If we receive a 4-addr nullfunc frame from a STA
1645		 * that was not moved to a 4-addr STA vlan yet send
1646		 * the event to userspace and for older hostapd drop
1647		 * the frame to the monitor interface.
1648		 */
1649		if (ieee80211_has_a4(hdr->frame_control) &&
1650		    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1651		     (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1652		      !rx->sdata->u.vlan.sta))) {
1653			if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1654				cfg80211_rx_unexpected_4addr_frame(
1655					rx->sdata->dev, sta->sta.addr,
1656					GFP_ATOMIC);
1657			return RX_DROP_MONITOR;
1658		}
1659		/*
1660		 * Update counter and free packet here to avoid
1661		 * counting this as a dropped packed.
1662		 */
1663		sta->rx_stats.packets++;
1664		dev_kfree_skb(rx->skb);
1665		return RX_QUEUED;
1666	}
1667
1668	return RX_CONTINUE;
1669} /* ieee80211_rx_h_sta_process */
1670
1671static ieee80211_rx_result debug_noinline
1672ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1673{
1674	struct sk_buff *skb = rx->skb;
1675	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1676	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1677	int keyidx;
1678	int hdrlen;
1679	ieee80211_rx_result result = RX_DROP_UNUSABLE;
1680	struct ieee80211_key *sta_ptk = NULL;
1681	int mmie_keyidx = -1;
1682	__le16 fc;
1683	const struct ieee80211_cipher_scheme *cs = NULL;
1684
1685	/*
1686	 * Key selection 101
1687	 *
1688	 * There are four types of keys:
1689	 *  - GTK (group keys)
1690	 *  - IGTK (group keys for management frames)
1691	 *  - PTK (pairwise keys)
1692	 *  - STK (station-to-station pairwise keys)
1693	 *
1694	 * When selecting a key, we have to distinguish between multicast
1695	 * (including broadcast) and unicast frames, the latter can only
1696	 * use PTKs and STKs while the former always use GTKs and IGTKs.
1697	 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
1698	 * unicast frames can also use key indices like GTKs. Hence, if we
1699	 * don't have a PTK/STK we check the key index for a WEP key.
1700	 *
1701	 * Note that in a regular BSS, multicast frames are sent by the
1702	 * AP only, associated stations unicast the frame to the AP first
1703	 * which then multicasts it on their behalf.
1704	 *
1705	 * There is also a slight problem in IBSS mode: GTKs are negotiated
1706	 * with each station, that is something we don't currently handle.
1707	 * The spec seems to expect that one negotiates the same key with
1708	 * every station but there's no such requirement; VLANs could be
1709	 * possible.
1710	 */
1711
1712	/* start without a key */
1713	rx->key = NULL;
1714	fc = hdr->frame_control;
1715
1716	if (rx->sta) {
1717		int keyid = rx->sta->ptk_idx;
1718
1719		if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) {
1720			cs = rx->sta->cipher_scheme;
1721			keyid = ieee80211_get_cs_keyid(cs, rx->skb);
1722			if (unlikely(keyid < 0))
1723				return RX_DROP_UNUSABLE;
1724		}
1725		sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
1726	}
1727
1728	if (!ieee80211_has_protected(fc))
1729		mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
1730
1731	if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
1732		rx->key = sta_ptk;
1733		if ((status->flag & RX_FLAG_DECRYPTED) &&
1734		    (status->flag & RX_FLAG_IV_STRIPPED))
1735			return RX_CONTINUE;
1736		/* Skip decryption if the frame is not protected. */
1737		if (!ieee80211_has_protected(fc))
1738			return RX_CONTINUE;
1739	} else if (mmie_keyidx >= 0) {
1740		/* Broadcast/multicast robust management frame / BIP */
1741		if ((status->flag & RX_FLAG_DECRYPTED) &&
1742		    (status->flag & RX_FLAG_IV_STRIPPED))
1743			return RX_CONTINUE;
1744
1745		if (mmie_keyidx < NUM_DEFAULT_KEYS ||
1746		    mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1747			return RX_DROP_MONITOR; /* unexpected BIP keyidx */
1748		if (rx->sta) {
1749			if (ieee80211_is_group_privacy_action(skb) &&
1750			    test_sta_flag(rx->sta, WLAN_STA_MFP))
1751				return RX_DROP_MONITOR;
1752
1753			rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
1754		}
1755		if (!rx->key)
1756			rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
1757	} else if (!ieee80211_has_protected(fc)) {
1758		/*
1759		 * The frame was not protected, so skip decryption. However, we
1760		 * need to set rx->key if there is a key that could have been
1761		 * used so that the frame may be dropped if encryption would
1762		 * have been expected.
1763		 */
1764		struct ieee80211_key *key = NULL;
1765		struct ieee80211_sub_if_data *sdata = rx->sdata;
1766		int i;
1767
1768		if (ieee80211_is_mgmt(fc) &&
1769		    is_multicast_ether_addr(hdr->addr1) &&
1770		    (key = rcu_dereference(rx->sdata->default_mgmt_key)))
1771			rx->key = key;
1772		else {
1773			if (rx->sta) {
1774				for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1775					key = rcu_dereference(rx->sta->gtk[i]);
1776					if (key)
1777						break;
1778				}
1779			}
1780			if (!key) {
1781				for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1782					key = rcu_dereference(sdata->keys[i]);
1783					if (key)
1784						break;
1785				}
1786			}
1787			if (key)
1788				rx->key = key;
1789		}
1790		return RX_CONTINUE;
1791	} else {
1792		u8 keyid;
1793
1794		/*
1795		 * The device doesn't give us the IV so we won't be
1796		 * able to look up the key. That's ok though, we
1797		 * don't need to decrypt the frame, we just won't
1798		 * be able to keep statistics accurate.
1799		 * Except for key threshold notifications, should
1800		 * we somehow allow the driver to tell us which key
1801		 * the hardware used if this flag is set?
1802		 */
1803		if ((status->flag & RX_FLAG_DECRYPTED) &&
1804		    (status->flag & RX_FLAG_IV_STRIPPED))
1805			return RX_CONTINUE;
1806
1807		hdrlen = ieee80211_hdrlen(fc);
1808
1809		if (cs) {
1810			keyidx = ieee80211_get_cs_keyid(cs, rx->skb);
1811
1812			if (unlikely(keyidx < 0))
1813				return RX_DROP_UNUSABLE;
1814		} else {
1815			if (rx->skb->len < 8 + hdrlen)
1816				return RX_DROP_UNUSABLE; /* TODO: count this? */
1817			/*
1818			 * no need to call ieee80211_wep_get_keyidx,
1819			 * it verifies a bunch of things we've done already
1820			 */
1821			skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
1822			keyidx = keyid >> 6;
1823		}
1824
1825		/* check per-station GTK first, if multicast packet */
1826		if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
1827			rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
1828
1829		/* if not found, try default key */
1830		if (!rx->key) {
1831			rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
1832
1833			/*
1834			 * RSNA-protected unicast frames should always be
1835			 * sent with pairwise or station-to-station keys,
1836			 * but for WEP we allow using a key index as well.
1837			 */
1838			if (rx->key &&
1839			    rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1840			    rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1841			    !is_multicast_ether_addr(hdr->addr1))
1842				rx->key = NULL;
1843		}
1844	}
1845
1846	if (rx->key) {
1847		if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
1848			return RX_DROP_MONITOR;
1849
1850		/* TODO: add threshold stuff again */
1851	} else {
1852		return RX_DROP_MONITOR;
1853	}
1854
1855	switch (rx->key->conf.cipher) {
1856	case WLAN_CIPHER_SUITE_WEP40:
1857	case WLAN_CIPHER_SUITE_WEP104:
1858		result = ieee80211_crypto_wep_decrypt(rx);
1859		break;
1860	case WLAN_CIPHER_SUITE_TKIP:
1861		result = ieee80211_crypto_tkip_decrypt(rx);
1862		break;
1863	case WLAN_CIPHER_SUITE_CCMP:
1864		result = ieee80211_crypto_ccmp_decrypt(
1865			rx, IEEE80211_CCMP_MIC_LEN);
1866		break;
1867	case WLAN_CIPHER_SUITE_CCMP_256:
1868		result = ieee80211_crypto_ccmp_decrypt(
1869			rx, IEEE80211_CCMP_256_MIC_LEN);
1870		break;
1871	case WLAN_CIPHER_SUITE_AES_CMAC:
1872		result = ieee80211_crypto_aes_cmac_decrypt(rx);
1873		break;
1874	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
1875		result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
1876		break;
1877	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
1878	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
1879		result = ieee80211_crypto_aes_gmac_decrypt(rx);
1880		break;
1881	case WLAN_CIPHER_SUITE_GCMP:
1882	case WLAN_CIPHER_SUITE_GCMP_256:
1883		result = ieee80211_crypto_gcmp_decrypt(rx);
1884		break;
1885	default:
1886		result = ieee80211_crypto_hw_decrypt(rx);
1887	}
1888
1889	/* the hdr variable is invalid after the decrypt handlers */
1890
1891	/* either the frame has been decrypted or will be dropped */
1892	status->flag |= RX_FLAG_DECRYPTED;
1893
1894	return result;
1895}
1896
1897static inline struct ieee80211_fragment_entry *
1898ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1899			 unsigned int frag, unsigned int seq, int rx_queue,
1900			 struct sk_buff **skb)
1901{
1902	struct ieee80211_fragment_entry *entry;
1903
1904	entry = &sdata->fragments[sdata->fragment_next++];
1905	if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1906		sdata->fragment_next = 0;
1907
1908	if (!skb_queue_empty(&entry->skb_list))
1909		__skb_queue_purge(&entry->skb_list);
1910
1911	__skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1912	*skb = NULL;
1913	entry->first_frag_time = jiffies;
1914	entry->seq = seq;
1915	entry->rx_queue = rx_queue;
1916	entry->last_frag = frag;
1917	entry->check_sequential_pn = false;
1918	entry->extra_len = 0;
1919
1920	return entry;
1921}
1922
1923static inline struct ieee80211_fragment_entry *
1924ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1925			  unsigned int frag, unsigned int seq,
1926			  int rx_queue, struct ieee80211_hdr *hdr)
1927{
1928	struct ieee80211_fragment_entry *entry;
1929	int i, idx;
1930
1931	idx = sdata->fragment_next;
1932	for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1933		struct ieee80211_hdr *f_hdr;
1934
1935		idx--;
1936		if (idx < 0)
1937			idx = IEEE80211_FRAGMENT_MAX - 1;
1938
1939		entry = &sdata->fragments[idx];
1940		if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1941		    entry->rx_queue != rx_queue ||
1942		    entry->last_frag + 1 != frag)
1943			continue;
1944
1945		f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1946
1947		/*
1948		 * Check ftype and addresses are equal, else check next fragment
1949		 */
1950		if (((hdr->frame_control ^ f_hdr->frame_control) &
1951		     cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1952		    !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
1953		    !ether_addr_equal(hdr->addr2, f_hdr->addr2))
1954			continue;
1955
1956		if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1957			__skb_queue_purge(&entry->skb_list);
1958			continue;
1959		}
1960		return entry;
1961	}
1962
1963	return NULL;
1964}
1965
1966static ieee80211_rx_result debug_noinline
1967ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1968{
1969	struct ieee80211_hdr *hdr;
1970	u16 sc;
1971	__le16 fc;
1972	unsigned int frag, seq;
1973	struct ieee80211_fragment_entry *entry;
1974	struct sk_buff *skb;
 
1975
1976	hdr = (struct ieee80211_hdr *)rx->skb->data;
1977	fc = hdr->frame_control;
1978
1979	if (ieee80211_is_ctl(fc))
1980		return RX_CONTINUE;
1981
1982	sc = le16_to_cpu(hdr->seq_ctrl);
1983	frag = sc & IEEE80211_SCTL_FRAG;
1984
1985	if (is_multicast_ether_addr(hdr->addr1)) {
1986		I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
1987		goto out_no_led;
1988	}
1989
1990	if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
1991		goto out;
1992
1993	I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1994
1995	if (skb_linearize(rx->skb))
1996		return RX_DROP_UNUSABLE;
1997
1998	/*
1999	 *  skb_linearize() might change the skb->data and
2000	 *  previously cached variables (in this case, hdr) need to
2001	 *  be refreshed with the new data.
2002	 */
2003	hdr = (struct ieee80211_hdr *)rx->skb->data;
2004	seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2005
2006	if (frag == 0) {
2007		/* This is the first fragment of a new frame. */
2008		entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
2009						 rx->seqno_idx, &(rx->skb));
2010		if (rx->key &&
2011		    (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
2012		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
2013		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
2014		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
2015		    ieee80211_has_protected(fc)) {
2016			int queue = rx->security_idx;
2017
2018			/* Store CCMP/GCMP PN so that we can verify that the
2019			 * next fragment has a sequential PN value.
2020			 */
2021			entry->check_sequential_pn = true;
2022			memcpy(entry->last_pn,
2023			       rx->key->u.ccmp.rx_pn[queue],
2024			       IEEE80211_CCMP_PN_LEN);
2025			BUILD_BUG_ON(offsetof(struct ieee80211_key,
2026					      u.ccmp.rx_pn) !=
2027				     offsetof(struct ieee80211_key,
2028					      u.gcmp.rx_pn));
2029			BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
2030				     sizeof(rx->key->u.gcmp.rx_pn[queue]));
2031			BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
2032				     IEEE80211_GCMP_PN_LEN);
2033		}
2034		return RX_QUEUED;
2035	}
2036
2037	/* This is a fragment for a frame that should already be pending in
2038	 * fragment cache. Add this fragment to the end of the pending entry.
2039	 */
2040	entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
2041					  rx->seqno_idx, hdr);
2042	if (!entry) {
2043		I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2044		return RX_DROP_MONITOR;
2045	}
2046
2047	/* "The receiver shall discard MSDUs and MMPDUs whose constituent
2048	 *  MPDU PN values are not incrementing in steps of 1."
2049	 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
2050	 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
2051	 */
2052	if (entry->check_sequential_pn) {
2053		int i;
2054		u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
2055		int queue;
2056
2057		if (!rx->key ||
2058		    (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
2059		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
2060		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
2061		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
2062			return RX_DROP_UNUSABLE;
2063		memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
2064		for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
2065			pn[i]++;
2066			if (pn[i])
2067				break;
2068		}
2069		queue = rx->security_idx;
2070		rpn = rx->key->u.ccmp.rx_pn[queue];
2071		if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
2072			return RX_DROP_UNUSABLE;
2073		memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
2074	}
2075
2076	skb_pull(rx->skb, ieee80211_hdrlen(fc));
2077	__skb_queue_tail(&entry->skb_list, rx->skb);
2078	entry->last_frag = frag;
2079	entry->extra_len += rx->skb->len;
2080	if (ieee80211_has_morefrags(fc)) {
2081		rx->skb = NULL;
2082		return RX_QUEUED;
2083	}
2084
2085	rx->skb = __skb_dequeue(&entry->skb_list);
2086	if (skb_tailroom(rx->skb) < entry->extra_len) {
2087		I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
2088		if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
2089					      GFP_ATOMIC))) {
2090			I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2091			__skb_queue_purge(&entry->skb_list);
2092			return RX_DROP_UNUSABLE;
2093		}
2094	}
2095	while ((skb = __skb_dequeue(&entry->skb_list))) {
2096		skb_put_data(rx->skb, skb->data, skb->len);
2097		dev_kfree_skb(skb);
2098	}
2099
 
 
 
2100 out:
2101	ieee80211_led_rx(rx->local);
2102 out_no_led:
2103	if (rx->sta)
2104		rx->sta->rx_stats.packets++;
2105	return RX_CONTINUE;
2106}
2107
2108static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
2109{
2110	if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
2111		return -EACCES;
2112
2113	return 0;
2114}
2115
2116static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
2117{
2118	struct sk_buff *skb = rx->skb;
2119	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2120
2121	/*
2122	 * Pass through unencrypted frames if the hardware has
2123	 * decrypted them already.
2124	 */
2125	if (status->flag & RX_FLAG_DECRYPTED)
2126		return 0;
2127
2128	/* Drop unencrypted frames if key is set. */
2129	if (unlikely(!ieee80211_has_protected(fc) &&
2130		     !ieee80211_is_nullfunc(fc) &&
2131		     ieee80211_is_data(fc) && rx->key))
2132		return -EACCES;
2133
2134	return 0;
2135}
2136
2137static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
2138{
2139	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2140	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2141	__le16 fc = hdr->frame_control;
2142
2143	/*
2144	 * Pass through unencrypted frames if the hardware has
2145	 * decrypted them already.
2146	 */
2147	if (status->flag & RX_FLAG_DECRYPTED)
2148		return 0;
2149
2150	if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
2151		if (unlikely(!ieee80211_has_protected(fc) &&
2152			     ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
2153			     rx->key)) {
2154			if (ieee80211_is_deauth(fc) ||
2155			    ieee80211_is_disassoc(fc))
2156				cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2157							     rx->skb->data,
2158							     rx->skb->len);
2159			return -EACCES;
2160		}
2161		/* BIP does not use Protected field, so need to check MMIE */
2162		if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
2163			     ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2164			if (ieee80211_is_deauth(fc) ||
2165			    ieee80211_is_disassoc(fc))
2166				cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2167							     rx->skb->data,
2168							     rx->skb->len);
2169			return -EACCES;
2170		}
2171		/*
2172		 * When using MFP, Action frames are not allowed prior to
2173		 * having configured keys.
2174		 */
2175		if (unlikely(ieee80211_is_action(fc) && !rx->key &&
2176			     ieee80211_is_robust_mgmt_frame(rx->skb)))
2177			return -EACCES;
2178	}
2179
2180	return 0;
2181}
2182
2183static int
2184__ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
2185{
2186	struct ieee80211_sub_if_data *sdata = rx->sdata;
2187	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2188	bool check_port_control = false;
2189	struct ethhdr *ehdr;
2190	int ret;
2191
2192	*port_control = false;
2193	if (ieee80211_has_a4(hdr->frame_control) &&
2194	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
2195		return -1;
2196
2197	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
2198	    !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
2199
2200		if (!sdata->u.mgd.use_4addr)
2201			return -1;
2202		else
2203			check_port_control = true;
2204	}
2205
2206	if (is_multicast_ether_addr(hdr->addr1) &&
2207	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
2208		return -1;
2209
2210	ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
2211	if (ret < 0)
2212		return ret;
2213
2214	ehdr = (struct ethhdr *) rx->skb->data;
2215	if (ehdr->h_proto == rx->sdata->control_port_protocol)
2216		*port_control = true;
2217	else if (check_port_control)
2218		return -1;
2219
2220	return 0;
2221}
2222
2223/*
2224 * requires that rx->skb is a frame with ethernet header
2225 */
2226static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
2227{
2228	static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
2229		= { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
2230	struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2231
2232	/*
2233	 * Allow EAPOL frames to us/the PAE group address regardless
2234	 * of whether the frame was encrypted or not.
2235	 */
2236	if (ehdr->h_proto == rx->sdata->control_port_protocol &&
2237	    (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
2238	     ether_addr_equal(ehdr->h_dest, pae_group_addr)))
2239		return true;
2240
2241	if (ieee80211_802_1x_port_control(rx) ||
2242	    ieee80211_drop_unencrypted(rx, fc))
2243		return false;
2244
2245	return true;
2246}
2247
2248static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
2249						 struct ieee80211_rx_data *rx)
2250{
2251	struct ieee80211_sub_if_data *sdata = rx->sdata;
2252	struct net_device *dev = sdata->dev;
2253
2254	if (unlikely((skb->protocol == sdata->control_port_protocol ||
2255		      skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) &&
2256		     sdata->control_port_over_nl80211)) {
2257		struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2258		bool noencrypt = status->flag & RX_FLAG_DECRYPTED;
2259		struct ethhdr *ehdr = eth_hdr(skb);
2260
2261		cfg80211_rx_control_port(dev, skb->data, skb->len,
2262					 ehdr->h_source,
2263					 be16_to_cpu(skb->protocol), noencrypt);
2264		dev_kfree_skb(skb);
2265	} else {
2266		/* deliver to local stack */
2267		if (rx->napi)
2268			napi_gro_receive(rx->napi, skb);
2269		else
2270			netif_receive_skb(skb);
2271	}
2272}
2273
2274/*
2275 * requires that rx->skb is a frame with ethernet header
2276 */
2277static void
2278ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2279{
2280	struct ieee80211_sub_if_data *sdata = rx->sdata;
2281	struct net_device *dev = sdata->dev;
2282	struct sk_buff *skb, *xmit_skb;
2283	struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2284	struct sta_info *dsta;
2285
2286	skb = rx->skb;
2287	xmit_skb = NULL;
2288
2289	ieee80211_rx_stats(dev, skb->len);
2290
2291	if (rx->sta) {
2292		/* The seqno index has the same property as needed
2293		 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
2294		 * for non-QoS-data frames. Here we know it's a data
2295		 * frame, so count MSDUs.
2296		 */
2297		u64_stats_update_begin(&rx->sta->rx_stats.syncp);
2298		rx->sta->rx_stats.msdu[rx->seqno_idx]++;
2299		u64_stats_update_end(&rx->sta->rx_stats.syncp);
2300	}
2301
2302	if ((sdata->vif.type == NL80211_IFTYPE_AP ||
2303	     sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
2304	    !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
2305	    (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
2306		if (is_multicast_ether_addr(ehdr->h_dest) &&
2307		    ieee80211_vif_get_num_mcast_if(sdata) != 0) {
2308			/*
2309			 * send multicast frames both to higher layers in
2310			 * local net stack and back to the wireless medium
2311			 */
2312			xmit_skb = skb_copy(skb, GFP_ATOMIC);
2313			if (!xmit_skb)
2314				net_info_ratelimited("%s: failed to clone multicast frame\n",
2315						    dev->name);
2316		} else if (!is_multicast_ether_addr(ehdr->h_dest)) {
2317			dsta = sta_info_get(sdata, skb->data);
2318			if (dsta) {
2319				/*
2320				 * The destination station is associated to
2321				 * this AP (in this VLAN), so send the frame
2322				 * directly to it and do not pass it to local
2323				 * net stack.
2324				 */
2325				xmit_skb = skb;
2326				skb = NULL;
2327			}
2328		}
2329	}
2330
2331#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2332	if (skb) {
2333		/* 'align' will only take the values 0 or 2 here since all
2334		 * frames are required to be aligned to 2-byte boundaries
2335		 * when being passed to mac80211; the code here works just
2336		 * as well if that isn't true, but mac80211 assumes it can
2337		 * access fields as 2-byte aligned (e.g. for ether_addr_equal)
2338		 */
2339		int align;
2340
2341		align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
2342		if (align) {
2343			if (WARN_ON(skb_headroom(skb) < 3)) {
2344				dev_kfree_skb(skb);
2345				skb = NULL;
2346			} else {
2347				u8 *data = skb->data;
2348				size_t len = skb_headlen(skb);
2349				skb->data -= align;
2350				memmove(skb->data, data, len);
2351				skb_set_tail_pointer(skb, len);
2352			}
2353		}
2354	}
2355#endif
2356
2357	if (skb) {
 
2358		skb->protocol = eth_type_trans(skb, dev);
2359		memset(skb->cb, 0, sizeof(skb->cb));
2360
2361		ieee80211_deliver_skb_to_local_stack(skb, rx);
 
 
2362	}
2363
2364	if (xmit_skb) {
2365		/*
2366		 * Send to wireless media and increase priority by 256 to
2367		 * keep the received priority instead of reclassifying
2368		 * the frame (see cfg80211_classify8021d).
2369		 */
2370		xmit_skb->priority += 256;
2371		xmit_skb->protocol = htons(ETH_P_802_3);
2372		skb_reset_network_header(xmit_skb);
2373		skb_reset_mac_header(xmit_skb);
2374		dev_queue_xmit(xmit_skb);
2375	}
2376}
2377
2378static ieee80211_rx_result debug_noinline
2379__ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
2380{
2381	struct net_device *dev = rx->sdata->dev;
2382	struct sk_buff *skb = rx->skb;
2383	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2384	__le16 fc = hdr->frame_control;
2385	struct sk_buff_head frame_list;
 
2386	struct ethhdr ethhdr;
2387	const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
2388
 
 
 
 
 
 
 
 
 
2389	if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
 
 
 
 
 
 
 
 
 
 
 
 
2390		check_da = NULL;
2391		check_sa = NULL;
2392	} else switch (rx->sdata->vif.type) {
2393		case NL80211_IFTYPE_AP:
2394		case NL80211_IFTYPE_AP_VLAN:
2395			check_da = NULL;
2396			break;
2397		case NL80211_IFTYPE_STATION:
2398			if (!rx->sta ||
2399			    !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
2400				check_sa = NULL;
2401			break;
2402		case NL80211_IFTYPE_MESH_POINT:
2403			check_sa = NULL;
2404			break;
2405		default:
2406			break;
2407	}
2408
 
 
 
2409	skb->dev = dev;
2410	__skb_queue_head_init(&frame_list);
2411
2412	if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
2413					  rx->sdata->vif.addr,
2414					  rx->sdata->vif.type,
2415					  data_offset))
2416		return RX_DROP_UNUSABLE;
2417
2418	ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
2419				 rx->sdata->vif.type,
2420				 rx->local->hw.extra_tx_headroom,
2421				 check_da, check_sa);
2422
2423	while (!skb_queue_empty(&frame_list)) {
2424		rx->skb = __skb_dequeue(&frame_list);
2425
2426		if (!ieee80211_frame_allowed(rx, fc)) {
2427			dev_kfree_skb(rx->skb);
2428			continue;
2429		}
2430
2431		ieee80211_deliver_skb(rx);
2432	}
2433
2434	return RX_QUEUED;
2435}
2436
2437static ieee80211_rx_result debug_noinline
2438ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
2439{
2440	struct sk_buff *skb = rx->skb;
2441	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2442	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2443	__le16 fc = hdr->frame_control;
2444
2445	if (!(status->rx_flags & IEEE80211_RX_AMSDU))
2446		return RX_CONTINUE;
2447
2448	if (unlikely(!ieee80211_is_data(fc)))
2449		return RX_CONTINUE;
2450
2451	if (unlikely(!ieee80211_is_data_present(fc)))
2452		return RX_DROP_MONITOR;
2453
2454	if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2455		switch (rx->sdata->vif.type) {
2456		case NL80211_IFTYPE_AP_VLAN:
2457			if (!rx->sdata->u.vlan.sta)
2458				return RX_DROP_UNUSABLE;
2459			break;
2460		case NL80211_IFTYPE_STATION:
2461			if (!rx->sdata->u.mgd.use_4addr)
2462				return RX_DROP_UNUSABLE;
2463			break;
2464		default:
2465			return RX_DROP_UNUSABLE;
2466		}
2467	}
2468
2469	if (is_multicast_ether_addr(hdr->addr1))
2470		return RX_DROP_UNUSABLE;
2471
2472	return __ieee80211_rx_h_amsdu(rx, 0);
2473}
2474
2475#ifdef CONFIG_MAC80211_MESH
2476static ieee80211_rx_result
2477ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2478{
2479	struct ieee80211_hdr *fwd_hdr, *hdr;
2480	struct ieee80211_tx_info *info;
2481	struct ieee80211s_hdr *mesh_hdr;
2482	struct sk_buff *skb = rx->skb, *fwd_skb;
2483	struct ieee80211_local *local = rx->local;
2484	struct ieee80211_sub_if_data *sdata = rx->sdata;
2485	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2486	u16 ac, q, hdrlen;
2487
2488	hdr = (struct ieee80211_hdr *) skb->data;
2489	hdrlen = ieee80211_hdrlen(hdr->frame_control);
2490
2491	/* make sure fixed part of mesh header is there, also checks skb len */
2492	if (!pskb_may_pull(rx->skb, hdrlen + 6))
2493		return RX_DROP_MONITOR;
2494
2495	mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2496
2497	/* make sure full mesh header is there, also checks skb len */
2498	if (!pskb_may_pull(rx->skb,
2499			   hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
2500		return RX_DROP_MONITOR;
2501
2502	/* reload pointers */
2503	hdr = (struct ieee80211_hdr *) skb->data;
2504	mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2505
2506	if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
2507		return RX_DROP_MONITOR;
2508
2509	/* frame is in RMC, don't forward */
2510	if (ieee80211_is_data(hdr->frame_control) &&
2511	    is_multicast_ether_addr(hdr->addr1) &&
2512	    mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
2513		return RX_DROP_MONITOR;
2514
2515	if (!ieee80211_is_data(hdr->frame_control))
2516		return RX_CONTINUE;
2517
2518	if (!mesh_hdr->ttl)
2519		return RX_DROP_MONITOR;
2520
2521	if (mesh_hdr->flags & MESH_FLAGS_AE) {
2522		struct mesh_path *mppath;
2523		char *proxied_addr;
2524		char *mpp_addr;
2525
2526		if (is_multicast_ether_addr(hdr->addr1)) {
2527			mpp_addr = hdr->addr3;
2528			proxied_addr = mesh_hdr->eaddr1;
2529		} else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
2530			    MESH_FLAGS_AE_A5_A6) {
2531			/* has_a4 already checked in ieee80211_rx_mesh_check */
2532			mpp_addr = hdr->addr4;
2533			proxied_addr = mesh_hdr->eaddr2;
2534		} else {
2535			return RX_DROP_MONITOR;
2536		}
2537
2538		rcu_read_lock();
2539		mppath = mpp_path_lookup(sdata, proxied_addr);
2540		if (!mppath) {
2541			mpp_path_add(sdata, proxied_addr, mpp_addr);
2542		} else {
2543			spin_lock_bh(&mppath->state_lock);
2544			if (!ether_addr_equal(mppath->mpp, mpp_addr))
2545				memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
2546			mppath->exp_time = jiffies;
2547			spin_unlock_bh(&mppath->state_lock);
2548		}
2549		rcu_read_unlock();
2550	}
2551
2552	/* Frame has reached destination.  Don't forward */
2553	if (!is_multicast_ether_addr(hdr->addr1) &&
2554	    ether_addr_equal(sdata->vif.addr, hdr->addr3))
2555		return RX_CONTINUE;
2556
2557	ac = ieee80211_select_queue_80211(sdata, skb, hdr);
2558	q = sdata->vif.hw_queue[ac];
2559	if (ieee80211_queue_stopped(&local->hw, q)) {
2560		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
2561		return RX_DROP_MONITOR;
2562	}
2563	skb_set_queue_mapping(skb, q);
2564
2565	if (!--mesh_hdr->ttl) {
2566		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
2567		goto out;
2568	}
2569
2570	if (!ifmsh->mshcfg.dot11MeshForwarding)
2571		goto out;
2572
2573	fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2574				       sdata->encrypt_headroom, 0, GFP_ATOMIC);
2575	if (!fwd_skb)
 
 
2576		goto out;
 
2577
2578	fwd_hdr =  (struct ieee80211_hdr *) fwd_skb->data;
2579	fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
2580	info = IEEE80211_SKB_CB(fwd_skb);
2581	memset(info, 0, sizeof(*info));
2582	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
2583	info->control.vif = &rx->sdata->vif;
2584	info->control.jiffies = jiffies;
2585	if (is_multicast_ether_addr(fwd_hdr->addr1)) {
2586		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
2587		memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
2588		/* update power mode indication when forwarding */
2589		ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
2590	} else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
2591		/* mesh power mode flags updated in mesh_nexthop_lookup */
2592		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
2593	} else {
2594		/* unable to resolve next hop */
2595		mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
2596				   fwd_hdr->addr3, 0,
2597				   WLAN_REASON_MESH_PATH_NOFORWARD,
2598				   fwd_hdr->addr2);
2599		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
2600		kfree_skb(fwd_skb);
2601		return RX_DROP_MONITOR;
2602	}
2603
2604	IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
2605	ieee80211_add_pending_skb(local, fwd_skb);
2606 out:
2607	if (is_multicast_ether_addr(hdr->addr1))
2608		return RX_CONTINUE;
2609	return RX_DROP_MONITOR;
2610}
2611#endif
2612
2613static ieee80211_rx_result debug_noinline
2614ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2615{
2616	struct ieee80211_sub_if_data *sdata = rx->sdata;
2617	struct ieee80211_local *local = rx->local;
2618	struct net_device *dev = sdata->dev;
2619	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2620	__le16 fc = hdr->frame_control;
2621	bool port_control;
2622	int err;
2623
2624	if (unlikely(!ieee80211_is_data(hdr->frame_control)))
2625		return RX_CONTINUE;
2626
2627	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
2628		return RX_DROP_MONITOR;
2629
2630	/*
2631	 * Send unexpected-4addr-frame event to hostapd. For older versions,
2632	 * also drop the frame to cooked monitor interfaces.
2633	 */
2634	if (ieee80211_has_a4(hdr->frame_control) &&
2635	    sdata->vif.type == NL80211_IFTYPE_AP) {
2636		if (rx->sta &&
2637		    !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
2638			cfg80211_rx_unexpected_4addr_frame(
2639				rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
2640		return RX_DROP_MONITOR;
2641	}
2642
2643	err = __ieee80211_data_to_8023(rx, &port_control);
2644	if (unlikely(err))
2645		return RX_DROP_UNUSABLE;
2646
2647	if (!ieee80211_frame_allowed(rx, fc))
2648		return RX_DROP_MONITOR;
2649
2650	/* directly handle TDLS channel switch requests/responses */
2651	if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
2652						cpu_to_be16(ETH_P_TDLS))) {
2653		struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
2654
2655		if (pskb_may_pull(rx->skb,
2656				  offsetof(struct ieee80211_tdls_data, u)) &&
2657		    tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
2658		    tf->category == WLAN_CATEGORY_TDLS &&
2659		    (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
2660		     tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
2661			skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
2662			schedule_work(&local->tdls_chsw_work);
2663			if (rx->sta)
2664				rx->sta->rx_stats.packets++;
2665
2666			return RX_QUEUED;
2667		}
2668	}
2669
2670	if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2671	    unlikely(port_control) && sdata->bss) {
2672		sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2673				     u.ap);
2674		dev = sdata->dev;
2675		rx->sdata = sdata;
2676	}
2677
2678	rx->skb->dev = dev;
2679
2680	if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
2681	    local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2682	    !is_multicast_ether_addr(
2683		    ((struct ethhdr *)rx->skb->data)->h_dest) &&
2684	    (!local->scanning &&
2685	     !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
2686		mod_timer(&local->dynamic_ps_timer, jiffies +
2687			  msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2688
2689	ieee80211_deliver_skb(rx);
2690
2691	return RX_QUEUED;
2692}
2693
2694static ieee80211_rx_result debug_noinline
2695ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
2696{
2697	struct sk_buff *skb = rx->skb;
2698	struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2699	struct tid_ampdu_rx *tid_agg_rx;
2700	u16 start_seq_num;
2701	u16 tid;
2702
2703	if (likely(!ieee80211_is_ctl(bar->frame_control)))
2704		return RX_CONTINUE;
2705
2706	if (ieee80211_is_back_req(bar->frame_control)) {
2707		struct {
2708			__le16 control, start_seq_num;
2709		} __packed bar_data;
2710		struct ieee80211_event event = {
2711			.type = BAR_RX_EVENT,
2712		};
2713
2714		if (!rx->sta)
2715			return RX_DROP_MONITOR;
2716
2717		if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2718				  &bar_data, sizeof(bar_data)))
2719			return RX_DROP_MONITOR;
2720
2721		tid = le16_to_cpu(bar_data.control) >> 12;
2722
2723		if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
2724		    !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
2725			ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
2726					     WLAN_BACK_RECIPIENT,
2727					     WLAN_REASON_QSTA_REQUIRE_SETUP);
2728
2729		tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2730		if (!tid_agg_rx)
2731			return RX_DROP_MONITOR;
2732
2733		start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
2734		event.u.ba.tid = tid;
2735		event.u.ba.ssn = start_seq_num;
2736		event.u.ba.sta = &rx->sta->sta;
2737
2738		/* reset session timer */
2739		if (tid_agg_rx->timeout)
2740			mod_timer(&tid_agg_rx->session_timer,
2741				  TU_TO_EXP_TIME(tid_agg_rx->timeout));
2742
2743		spin_lock(&tid_agg_rx->reorder_lock);
2744		/* release stored frames up to start of BAR */
2745		ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
2746						 start_seq_num, frames);
2747		spin_unlock(&tid_agg_rx->reorder_lock);
2748
2749		drv_event_callback(rx->local, rx->sdata, &event);
2750
2751		kfree_skb(skb);
2752		return RX_QUEUED;
2753	}
2754
2755	/*
2756	 * After this point, we only want management frames,
2757	 * so we can drop all remaining control frames to
2758	 * cooked monitor interfaces.
2759	 */
2760	return RX_DROP_MONITOR;
2761}
2762
2763static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
2764					   struct ieee80211_mgmt *mgmt,
2765					   size_t len)
2766{
2767	struct ieee80211_local *local = sdata->local;
2768	struct sk_buff *skb;
2769	struct ieee80211_mgmt *resp;
2770
2771	if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
2772		/* Not to own unicast address */
2773		return;
2774	}
2775
2776	if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
2777	    !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
2778		/* Not from the current AP or not associated yet. */
2779		return;
2780	}
2781
2782	if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
2783		/* Too short SA Query request frame */
2784		return;
2785	}
2786
2787	skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
2788	if (skb == NULL)
2789		return;
2790
2791	skb_reserve(skb, local->hw.extra_tx_headroom);
2792	resp = skb_put_zero(skb, 24);
 
2793	memcpy(resp->da, mgmt->sa, ETH_ALEN);
2794	memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
2795	memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2796	resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2797					  IEEE80211_STYPE_ACTION);
2798	skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
2799	resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
2800	resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
2801	memcpy(resp->u.action.u.sa_query.trans_id,
2802	       mgmt->u.action.u.sa_query.trans_id,
2803	       WLAN_SA_QUERY_TR_ID_LEN);
2804
2805	ieee80211_tx_skb(sdata, skb);
2806}
2807
2808static ieee80211_rx_result debug_noinline
2809ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2810{
2811	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2812	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2813
2814	/*
2815	 * From here on, look only at management frames.
2816	 * Data and control frames are already handled,
2817	 * and unknown (reserved) frames are useless.
2818	 */
2819	if (rx->skb->len < 24)
2820		return RX_DROP_MONITOR;
2821
2822	if (!ieee80211_is_mgmt(mgmt->frame_control))
2823		return RX_DROP_MONITOR;
2824
2825	if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
2826	    ieee80211_is_beacon(mgmt->frame_control) &&
2827	    !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
2828		int sig = 0;
2829
2830		if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
2831		    !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
2832			sig = status->signal;
2833
2834		cfg80211_report_obss_beacon(rx->local->hw.wiphy,
2835					    rx->skb->data, rx->skb->len,
2836					    status->freq, sig);
2837		rx->flags |= IEEE80211_RX_BEACON_REPORTED;
2838	}
2839
2840	if (ieee80211_drop_unencrypted_mgmt(rx))
2841		return RX_DROP_UNUSABLE;
2842
2843	return RX_CONTINUE;
2844}
2845
2846static ieee80211_rx_result debug_noinline
2847ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2848{
2849	struct ieee80211_local *local = rx->local;
2850	struct ieee80211_sub_if_data *sdata = rx->sdata;
2851	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2852	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2853	int len = rx->skb->len;
2854
2855	if (!ieee80211_is_action(mgmt->frame_control))
2856		return RX_CONTINUE;
2857
2858	/* drop too small frames */
2859	if (len < IEEE80211_MIN_ACTION_SIZE)
2860		return RX_DROP_UNUSABLE;
2861
2862	if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
2863	    mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
2864	    mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
2865		return RX_DROP_UNUSABLE;
2866
2867	switch (mgmt->u.action.category) {
2868	case WLAN_CATEGORY_HT:
2869		/* reject HT action frames from stations not supporting HT */
2870		if (!rx->sta->sta.ht_cap.ht_supported)
2871			goto invalid;
2872
2873		if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2874		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2875		    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2876		    sdata->vif.type != NL80211_IFTYPE_AP &&
2877		    sdata->vif.type != NL80211_IFTYPE_ADHOC)
2878			break;
2879
2880		/* verify action & smps_control/chanwidth are present */
2881		if (len < IEEE80211_MIN_ACTION_SIZE + 2)
2882			goto invalid;
2883
2884		switch (mgmt->u.action.u.ht_smps.action) {
2885		case WLAN_HT_ACTION_SMPS: {
2886			struct ieee80211_supported_band *sband;
2887			enum ieee80211_smps_mode smps_mode;
2888			struct sta_opmode_info sta_opmode = {};
2889
2890			/* convert to HT capability */
2891			switch (mgmt->u.action.u.ht_smps.smps_control) {
2892			case WLAN_HT_SMPS_CONTROL_DISABLED:
2893				smps_mode = IEEE80211_SMPS_OFF;
2894				break;
2895			case WLAN_HT_SMPS_CONTROL_STATIC:
2896				smps_mode = IEEE80211_SMPS_STATIC;
2897				break;
2898			case WLAN_HT_SMPS_CONTROL_DYNAMIC:
2899				smps_mode = IEEE80211_SMPS_DYNAMIC;
2900				break;
2901			default:
2902				goto invalid;
2903			}
2904
2905			/* if no change do nothing */
2906			if (rx->sta->sta.smps_mode == smps_mode)
2907				goto handled;
2908			rx->sta->sta.smps_mode = smps_mode;
2909			sta_opmode.smps_mode =
2910				ieee80211_smps_mode_to_smps_mode(smps_mode);
2911			sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED;
2912
2913			sband = rx->local->hw.wiphy->bands[status->band];
2914
2915			rate_control_rate_update(local, sband, rx->sta,
2916						 IEEE80211_RC_SMPS_CHANGED);
2917			cfg80211_sta_opmode_change_notify(sdata->dev,
2918							  rx->sta->addr,
2919							  &sta_opmode,
2920							  GFP_KERNEL);
2921			goto handled;
2922		}
2923		case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
2924			struct ieee80211_supported_band *sband;
2925			u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
2926			enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
2927			struct sta_opmode_info sta_opmode = {};
2928
2929			/* If it doesn't support 40 MHz it can't change ... */
2930			if (!(rx->sta->sta.ht_cap.cap &
2931					IEEE80211_HT_CAP_SUP_WIDTH_20_40))
2932				goto handled;
2933
2934			if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
2935				max_bw = IEEE80211_STA_RX_BW_20;
2936			else
2937				max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
2938
2939			/* set cur_max_bandwidth and recalc sta bw */
2940			rx->sta->cur_max_bandwidth = max_bw;
2941			new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
2942
2943			if (rx->sta->sta.bandwidth == new_bw)
2944				goto handled;
2945
2946			rx->sta->sta.bandwidth = new_bw;
2947			sband = rx->local->hw.wiphy->bands[status->band];
2948			sta_opmode.bw =
2949				ieee80211_sta_rx_bw_to_chan_width(rx->sta);
2950			sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
2951
2952			rate_control_rate_update(local, sband, rx->sta,
2953						 IEEE80211_RC_BW_CHANGED);
2954			cfg80211_sta_opmode_change_notify(sdata->dev,
2955							  rx->sta->addr,
2956							  &sta_opmode,
2957							  GFP_KERNEL);
2958			goto handled;
2959		}
2960		default:
2961			goto invalid;
2962		}
2963
2964		break;
2965	case WLAN_CATEGORY_PUBLIC:
2966		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2967			goto invalid;
2968		if (sdata->vif.type != NL80211_IFTYPE_STATION)
2969			break;
2970		if (!rx->sta)
2971			break;
2972		if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
2973			break;
2974		if (mgmt->u.action.u.ext_chan_switch.action_code !=
2975				WLAN_PUB_ACTION_EXT_CHANSW_ANN)
2976			break;
2977		if (len < offsetof(struct ieee80211_mgmt,
2978				   u.action.u.ext_chan_switch.variable))
2979			goto invalid;
2980		goto queue;
2981	case WLAN_CATEGORY_VHT:
2982		if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2983		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2984		    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2985		    sdata->vif.type != NL80211_IFTYPE_AP &&
2986		    sdata->vif.type != NL80211_IFTYPE_ADHOC)
2987			break;
2988
2989		/* verify action code is present */
2990		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2991			goto invalid;
2992
2993		switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
2994		case WLAN_VHT_ACTION_OPMODE_NOTIF: {
2995			/* verify opmode is present */
2996			if (len < IEEE80211_MIN_ACTION_SIZE + 2)
2997				goto invalid;
2998			goto queue;
2999		}
3000		case WLAN_VHT_ACTION_GROUPID_MGMT: {
3001			if (len < IEEE80211_MIN_ACTION_SIZE + 25)
3002				goto invalid;
3003			goto queue;
3004		}
3005		default:
3006			break;
3007		}
3008		break;
3009	case WLAN_CATEGORY_BACK:
3010		if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3011		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3012		    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3013		    sdata->vif.type != NL80211_IFTYPE_AP &&
3014		    sdata->vif.type != NL80211_IFTYPE_ADHOC)
3015			break;
3016
3017		/* verify action_code is present */
3018		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3019			break;
3020
3021		switch (mgmt->u.action.u.addba_req.action_code) {
3022		case WLAN_ACTION_ADDBA_REQ:
3023			if (len < (IEEE80211_MIN_ACTION_SIZE +
3024				   sizeof(mgmt->u.action.u.addba_req)))
3025				goto invalid;
3026			break;
3027		case WLAN_ACTION_ADDBA_RESP:
3028			if (len < (IEEE80211_MIN_ACTION_SIZE +
3029				   sizeof(mgmt->u.action.u.addba_resp)))
3030				goto invalid;
3031			break;
3032		case WLAN_ACTION_DELBA:
3033			if (len < (IEEE80211_MIN_ACTION_SIZE +
3034				   sizeof(mgmt->u.action.u.delba)))
3035				goto invalid;
3036			break;
3037		default:
3038			goto invalid;
3039		}
3040
3041		goto queue;
3042	case WLAN_CATEGORY_SPECTRUM_MGMT:
3043		/* verify action_code is present */
3044		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3045			break;
3046
3047		switch (mgmt->u.action.u.measurement.action_code) {
3048		case WLAN_ACTION_SPCT_MSR_REQ:
3049			if (status->band != NL80211_BAND_5GHZ)
3050				break;
3051
3052			if (len < (IEEE80211_MIN_ACTION_SIZE +
3053				   sizeof(mgmt->u.action.u.measurement)))
3054				break;
3055
3056			if (sdata->vif.type != NL80211_IFTYPE_STATION)
3057				break;
3058
3059			ieee80211_process_measurement_req(sdata, mgmt, len);
3060			goto handled;
3061		case WLAN_ACTION_SPCT_CHL_SWITCH: {
3062			u8 *bssid;
3063			if (len < (IEEE80211_MIN_ACTION_SIZE +
3064				   sizeof(mgmt->u.action.u.chan_switch)))
3065				break;
3066
3067			if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3068			    sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3069			    sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3070				break;
3071
3072			if (sdata->vif.type == NL80211_IFTYPE_STATION)
3073				bssid = sdata->u.mgd.bssid;
3074			else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
3075				bssid = sdata->u.ibss.bssid;
3076			else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
3077				bssid = mgmt->sa;
3078			else
3079				break;
3080
3081			if (!ether_addr_equal(mgmt->bssid, bssid))
3082				break;
3083
3084			goto queue;
3085			}
3086		}
3087		break;
3088	case WLAN_CATEGORY_SA_QUERY:
3089		if (len < (IEEE80211_MIN_ACTION_SIZE +
3090			   sizeof(mgmt->u.action.u.sa_query)))
3091			break;
3092
3093		switch (mgmt->u.action.u.sa_query.action) {
3094		case WLAN_ACTION_SA_QUERY_REQUEST:
3095			if (sdata->vif.type != NL80211_IFTYPE_STATION)
3096				break;
3097			ieee80211_process_sa_query_req(sdata, mgmt, len);
3098			goto handled;
3099		}
3100		break;
3101	case WLAN_CATEGORY_SELF_PROTECTED:
3102		if (len < (IEEE80211_MIN_ACTION_SIZE +
3103			   sizeof(mgmt->u.action.u.self_prot.action_code)))
3104			break;
3105
3106		switch (mgmt->u.action.u.self_prot.action_code) {
3107		case WLAN_SP_MESH_PEERING_OPEN:
3108		case WLAN_SP_MESH_PEERING_CLOSE:
3109		case WLAN_SP_MESH_PEERING_CONFIRM:
3110			if (!ieee80211_vif_is_mesh(&sdata->vif))
3111				goto invalid;
3112			if (sdata->u.mesh.user_mpm)
3113				/* userspace handles this frame */
3114				break;
3115			goto queue;
3116		case WLAN_SP_MGK_INFORM:
3117		case WLAN_SP_MGK_ACK:
3118			if (!ieee80211_vif_is_mesh(&sdata->vif))
3119				goto invalid;
3120			break;
3121		}
3122		break;
3123	case WLAN_CATEGORY_MESH_ACTION:
3124		if (len < (IEEE80211_MIN_ACTION_SIZE +
3125			   sizeof(mgmt->u.action.u.mesh_action.action_code)))
3126			break;
3127
3128		if (!ieee80211_vif_is_mesh(&sdata->vif))
3129			break;
3130		if (mesh_action_is_path_sel(mgmt) &&
3131		    !mesh_path_sel_is_hwmp(sdata))
3132			break;
3133		goto queue;
3134	}
3135
3136	return RX_CONTINUE;
3137
3138 invalid:
3139	status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
3140	/* will return in the next handlers */
3141	return RX_CONTINUE;
3142
3143 handled:
3144	if (rx->sta)
3145		rx->sta->rx_stats.packets++;
3146	dev_kfree_skb(rx->skb);
3147	return RX_QUEUED;
3148
3149 queue:
 
3150	skb_queue_tail(&sdata->skb_queue, rx->skb);
3151	ieee80211_queue_work(&local->hw, &sdata->work);
3152	if (rx->sta)
3153		rx->sta->rx_stats.packets++;
3154	return RX_QUEUED;
3155}
3156
3157static ieee80211_rx_result debug_noinline
3158ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
3159{
3160	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3161	int sig = 0;
3162
3163	/* skip known-bad action frames and return them in the next handler */
3164	if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
3165		return RX_CONTINUE;
3166
3167	/*
3168	 * Getting here means the kernel doesn't know how to handle
3169	 * it, but maybe userspace does ... include returned frames
3170	 * so userspace can register for those to know whether ones
3171	 * it transmitted were processed or returned.
3172	 */
3173
3174	if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3175	    !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3176		sig = status->signal;
3177
3178	if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
3179			     rx->skb->data, rx->skb->len, 0)) {
3180		if (rx->sta)
3181			rx->sta->rx_stats.packets++;
3182		dev_kfree_skb(rx->skb);
3183		return RX_QUEUED;
3184	}
3185
3186	return RX_CONTINUE;
3187}
3188
3189static ieee80211_rx_result debug_noinline
3190ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
3191{
3192	struct ieee80211_local *local = rx->local;
3193	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3194	struct sk_buff *nskb;
3195	struct ieee80211_sub_if_data *sdata = rx->sdata;
3196	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3197
3198	if (!ieee80211_is_action(mgmt->frame_control))
3199		return RX_CONTINUE;
3200
3201	/*
3202	 * For AP mode, hostapd is responsible for handling any action
3203	 * frames that we didn't handle, including returning unknown
3204	 * ones. For all other modes we will return them to the sender,
3205	 * setting the 0x80 bit in the action category, as required by
3206	 * 802.11-2012 9.24.4.
3207	 * Newer versions of hostapd shall also use the management frame
3208	 * registration mechanisms, but older ones still use cooked
3209	 * monitor interfaces so push all frames there.
3210	 */
3211	if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
3212	    (sdata->vif.type == NL80211_IFTYPE_AP ||
3213	     sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
3214		return RX_DROP_MONITOR;
3215
3216	if (is_multicast_ether_addr(mgmt->da))
3217		return RX_DROP_MONITOR;
3218
3219	/* do not return rejected action frames */
3220	if (mgmt->u.action.category & 0x80)
3221		return RX_DROP_UNUSABLE;
3222
3223	nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
3224			       GFP_ATOMIC);
3225	if (nskb) {
3226		struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
3227
3228		nmgmt->u.action.category |= 0x80;
3229		memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
3230		memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
3231
3232		memset(nskb->cb, 0, sizeof(nskb->cb));
3233
3234		if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
3235			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
3236
3237			info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
3238				      IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
3239				      IEEE80211_TX_CTL_NO_CCK_RATE;
3240			if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
3241				info->hw_queue =
3242					local->hw.offchannel_tx_hw_queue;
3243		}
3244
3245		__ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
3246					    status->band);
3247	}
3248	dev_kfree_skb(rx->skb);
3249	return RX_QUEUED;
3250}
3251
3252static ieee80211_rx_result debug_noinline
3253ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
3254{
3255	struct ieee80211_sub_if_data *sdata = rx->sdata;
3256	struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
3257	__le16 stype;
3258
3259	stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
3260
3261	if (!ieee80211_vif_is_mesh(&sdata->vif) &&
3262	    sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3263	    sdata->vif.type != NL80211_IFTYPE_OCB &&
3264	    sdata->vif.type != NL80211_IFTYPE_STATION)
3265		return RX_DROP_MONITOR;
3266
3267	switch (stype) {
3268	case cpu_to_le16(IEEE80211_STYPE_AUTH):
3269	case cpu_to_le16(IEEE80211_STYPE_BEACON):
3270	case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
3271		/* process for all: mesh, mlme, ibss */
3272		break;
3273	case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
3274	case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
3275	case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
3276	case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
3277		if (is_multicast_ether_addr(mgmt->da) &&
3278		    !is_broadcast_ether_addr(mgmt->da))
3279			return RX_DROP_MONITOR;
3280
3281		/* process only for station */
3282		if (sdata->vif.type != NL80211_IFTYPE_STATION)
3283			return RX_DROP_MONITOR;
3284		break;
3285	case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
3286		/* process only for ibss and mesh */
3287		if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3288		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3289			return RX_DROP_MONITOR;
3290		break;
3291	default:
3292		return RX_DROP_MONITOR;
3293	}
3294
3295	/* queue up frame and kick off work to process it */
 
3296	skb_queue_tail(&sdata->skb_queue, rx->skb);
3297	ieee80211_queue_work(&rx->local->hw, &sdata->work);
3298	if (rx->sta)
3299		rx->sta->rx_stats.packets++;
3300
3301	return RX_QUEUED;
3302}
3303
3304static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
3305					struct ieee80211_rate *rate)
3306{
3307	struct ieee80211_sub_if_data *sdata;
3308	struct ieee80211_local *local = rx->local;
3309	struct sk_buff *skb = rx->skb, *skb2;
3310	struct net_device *prev_dev = NULL;
3311	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3312	int needed_headroom;
3313
3314	/*
3315	 * If cooked monitor has been processed already, then
3316	 * don't do it again. If not, set the flag.
3317	 */
3318	if (rx->flags & IEEE80211_RX_CMNTR)
3319		goto out_free_skb;
3320	rx->flags |= IEEE80211_RX_CMNTR;
3321
3322	/* If there are no cooked monitor interfaces, just free the SKB */
3323	if (!local->cooked_mntrs)
3324		goto out_free_skb;
3325
3326	/* vendor data is long removed here */
3327	status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA;
3328	/* room for the radiotap header based on driver features */
3329	needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
3330
3331	if (skb_headroom(skb) < needed_headroom &&
3332	    pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
3333		goto out_free_skb;
3334
3335	/* prepend radiotap information */
3336	ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
3337					 false);
3338
3339	skb_reset_mac_header(skb);
3340	skb->ip_summed = CHECKSUM_UNNECESSARY;
3341	skb->pkt_type = PACKET_OTHERHOST;
3342	skb->protocol = htons(ETH_P_802_2);
3343
3344	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
3345		if (!ieee80211_sdata_running(sdata))
3346			continue;
3347
3348		if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
3349		    !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
3350			continue;
3351
3352		if (prev_dev) {
3353			skb2 = skb_clone(skb, GFP_ATOMIC);
3354			if (skb2) {
3355				skb2->dev = prev_dev;
3356				netif_receive_skb(skb2);
3357			}
3358		}
3359
3360		prev_dev = sdata->dev;
3361		ieee80211_rx_stats(sdata->dev, skb->len);
3362	}
3363
3364	if (prev_dev) {
3365		skb->dev = prev_dev;
3366		netif_receive_skb(skb);
3367		return;
3368	}
3369
3370 out_free_skb:
3371	dev_kfree_skb(skb);
3372}
3373
3374static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
3375					 ieee80211_rx_result res)
3376{
3377	switch (res) {
3378	case RX_DROP_MONITOR:
3379		I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3380		if (rx->sta)
3381			rx->sta->rx_stats.dropped++;
3382		/* fall through */
3383	case RX_CONTINUE: {
3384		struct ieee80211_rate *rate = NULL;
3385		struct ieee80211_supported_band *sband;
3386		struct ieee80211_rx_status *status;
3387
3388		status = IEEE80211_SKB_RXCB((rx->skb));
3389
3390		sband = rx->local->hw.wiphy->bands[status->band];
3391		if (!(status->encoding == RX_ENC_HT) &&
3392		    !(status->encoding == RX_ENC_VHT))
3393			rate = &sband->bitrates[status->rate_idx];
3394
3395		ieee80211_rx_cooked_monitor(rx, rate);
3396		break;
3397		}
3398	case RX_DROP_UNUSABLE:
3399		I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3400		if (rx->sta)
3401			rx->sta->rx_stats.dropped++;
3402		dev_kfree_skb(rx->skb);
3403		break;
3404	case RX_QUEUED:
3405		I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
3406		break;
3407	}
3408}
3409
3410static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
3411				  struct sk_buff_head *frames)
3412{
3413	ieee80211_rx_result res = RX_DROP_MONITOR;
3414	struct sk_buff *skb;
3415
3416#define CALL_RXH(rxh)			\
3417	do {				\
3418		res = rxh(rx);		\
3419		if (res != RX_CONTINUE)	\
3420			goto rxh_next;  \
3421	} while (0)
3422
3423	/* Lock here to avoid hitting all of the data used in the RX
3424	 * path (e.g. key data, station data, ...) concurrently when
3425	 * a frame is released from the reorder buffer due to timeout
3426	 * from the timer, potentially concurrently with RX from the
3427	 * driver.
3428	 */
3429	spin_lock_bh(&rx->local->rx_path_lock);
3430
3431	while ((skb = __skb_dequeue(frames))) {
3432		/*
3433		 * all the other fields are valid across frames
3434		 * that belong to an aMPDU since they are on the
3435		 * same TID from the same station
3436		 */
3437		rx->skb = skb;
3438
3439		CALL_RXH(ieee80211_rx_h_check_more_data);
3440		CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
3441		CALL_RXH(ieee80211_rx_h_sta_process);
3442		CALL_RXH(ieee80211_rx_h_decrypt);
3443		CALL_RXH(ieee80211_rx_h_defragment);
3444		CALL_RXH(ieee80211_rx_h_michael_mic_verify);
3445		/* must be after MMIC verify so header is counted in MPDU mic */
3446#ifdef CONFIG_MAC80211_MESH
3447		if (ieee80211_vif_is_mesh(&rx->sdata->vif))
3448			CALL_RXH(ieee80211_rx_h_mesh_fwding);
3449#endif
3450		CALL_RXH(ieee80211_rx_h_amsdu);
3451		CALL_RXH(ieee80211_rx_h_data);
3452
3453		/* special treatment -- needs the queue */
3454		res = ieee80211_rx_h_ctrl(rx, frames);
3455		if (res != RX_CONTINUE)
3456			goto rxh_next;
3457
3458		CALL_RXH(ieee80211_rx_h_mgmt_check);
3459		CALL_RXH(ieee80211_rx_h_action);
3460		CALL_RXH(ieee80211_rx_h_userspace_mgmt);
3461		CALL_RXH(ieee80211_rx_h_action_return);
3462		CALL_RXH(ieee80211_rx_h_mgmt);
3463
3464 rxh_next:
3465		ieee80211_rx_handlers_result(rx, res);
3466
3467#undef CALL_RXH
3468	}
3469
3470	spin_unlock_bh(&rx->local->rx_path_lock);
3471}
3472
3473static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
3474{
3475	struct sk_buff_head reorder_release;
3476	ieee80211_rx_result res = RX_DROP_MONITOR;
3477
3478	__skb_queue_head_init(&reorder_release);
3479
3480#define CALL_RXH(rxh)			\
3481	do {				\
3482		res = rxh(rx);		\
3483		if (res != RX_CONTINUE)	\
3484			goto rxh_next;  \
3485	} while (0)
3486
3487	CALL_RXH(ieee80211_rx_h_check_dup);
3488	CALL_RXH(ieee80211_rx_h_check);
3489
3490	ieee80211_rx_reorder_ampdu(rx, &reorder_release);
3491
3492	ieee80211_rx_handlers(rx, &reorder_release);
3493	return;
3494
3495 rxh_next:
3496	ieee80211_rx_handlers_result(rx, res);
3497
3498#undef CALL_RXH
3499}
3500
3501/*
3502 * This function makes calls into the RX path, therefore
3503 * it has to be invoked under RCU read lock.
3504 */
3505void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
3506{
3507	struct sk_buff_head frames;
3508	struct ieee80211_rx_data rx = {
3509		.sta = sta,
3510		.sdata = sta->sdata,
3511		.local = sta->local,
3512		/* This is OK -- must be QoS data frame */
3513		.security_idx = tid,
3514		.seqno_idx = tid,
3515		.napi = NULL, /* must be NULL to not have races */
3516	};
3517	struct tid_ampdu_rx *tid_agg_rx;
3518
3519	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3520	if (!tid_agg_rx)
3521		return;
3522
3523	__skb_queue_head_init(&frames);
3524
3525	spin_lock(&tid_agg_rx->reorder_lock);
3526	ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3527	spin_unlock(&tid_agg_rx->reorder_lock);
3528
3529	if (!skb_queue_empty(&frames)) {
3530		struct ieee80211_event event = {
3531			.type = BA_FRAME_TIMEOUT,
3532			.u.ba.tid = tid,
3533			.u.ba.sta = &sta->sta,
3534		};
3535		drv_event_callback(rx.local, rx.sdata, &event);
3536	}
3537
3538	ieee80211_rx_handlers(&rx, &frames);
3539}
3540
3541void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
3542					  u16 ssn, u64 filtered,
3543					  u16 received_mpdus)
3544{
3545	struct sta_info *sta;
3546	struct tid_ampdu_rx *tid_agg_rx;
3547	struct sk_buff_head frames;
3548	struct ieee80211_rx_data rx = {
3549		/* This is OK -- must be QoS data frame */
3550		.security_idx = tid,
3551		.seqno_idx = tid,
3552	};
3553	int i, diff;
3554
3555	if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
3556		return;
3557
3558	__skb_queue_head_init(&frames);
3559
3560	sta = container_of(pubsta, struct sta_info, sta);
3561
3562	rx.sta = sta;
3563	rx.sdata = sta->sdata;
3564	rx.local = sta->local;
3565
3566	rcu_read_lock();
3567	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3568	if (!tid_agg_rx)
3569		goto out;
3570
3571	spin_lock_bh(&tid_agg_rx->reorder_lock);
3572
3573	if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
3574		int release;
3575
3576		/* release all frames in the reorder buffer */
3577		release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
3578			   IEEE80211_SN_MODULO;
3579		ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
3580						 release, &frames);
3581		/* update ssn to match received ssn */
3582		tid_agg_rx->head_seq_num = ssn;
3583	} else {
3584		ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
3585						 &frames);
3586	}
3587
3588	/* handle the case that received ssn is behind the mac ssn.
3589	 * it can be tid_agg_rx->buf_size behind and still be valid */
3590	diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
3591	if (diff >= tid_agg_rx->buf_size) {
3592		tid_agg_rx->reorder_buf_filtered = 0;
3593		goto release;
3594	}
3595	filtered = filtered >> diff;
3596	ssn += diff;
3597
3598	/* update bitmap */
3599	for (i = 0; i < tid_agg_rx->buf_size; i++) {
3600		int index = (ssn + i) % tid_agg_rx->buf_size;
3601
3602		tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
3603		if (filtered & BIT_ULL(i))
3604			tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
3605	}
3606
3607	/* now process also frames that the filter marking released */
3608	ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3609
3610release:
3611	spin_unlock_bh(&tid_agg_rx->reorder_lock);
3612
3613	ieee80211_rx_handlers(&rx, &frames);
3614
3615 out:
3616	rcu_read_unlock();
3617}
3618EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
3619
3620/* main receive path */
3621
3622static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3623{
3624	struct ieee80211_sub_if_data *sdata = rx->sdata;
3625	struct sk_buff *skb = rx->skb;
3626	struct ieee80211_hdr *hdr = (void *)skb->data;
3627	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3628	u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
3629	bool multicast = is_multicast_ether_addr(hdr->addr1);
3630
3631	switch (sdata->vif.type) {
3632	case NL80211_IFTYPE_STATION:
3633		if (!bssid && !sdata->u.mgd.use_4addr)
3634			return false;
3635		if (multicast)
3636			return true;
3637		return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3638	case NL80211_IFTYPE_ADHOC:
3639		if (!bssid)
3640			return false;
3641		if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
3642		    ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
3643			return false;
3644		if (ieee80211_is_beacon(hdr->frame_control))
3645			return true;
3646		if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
3647			return false;
3648		if (!multicast &&
3649		    !ether_addr_equal(sdata->vif.addr, hdr->addr1))
3650			return false;
3651		if (!rx->sta) {
3652			int rate_idx;
3653			if (status->encoding != RX_ENC_LEGACY)
3654				rate_idx = 0; /* TODO: HT/VHT rates */
3655			else
3656				rate_idx = status->rate_idx;
3657			ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
3658						 BIT(rate_idx));
3659		}
3660		return true;
3661	case NL80211_IFTYPE_OCB:
3662		if (!bssid)
3663			return false;
3664		if (!ieee80211_is_data_present(hdr->frame_control))
3665			return false;
3666		if (!is_broadcast_ether_addr(bssid))
3667			return false;
3668		if (!multicast &&
3669		    !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
3670			return false;
3671		if (!rx->sta) {
3672			int rate_idx;
3673			if (status->encoding != RX_ENC_LEGACY)
3674				rate_idx = 0; /* TODO: HT rates */
3675			else
3676				rate_idx = status->rate_idx;
3677			ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
3678						BIT(rate_idx));
3679		}
3680		return true;
3681	case NL80211_IFTYPE_MESH_POINT:
3682		if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
3683			return false;
3684		if (multicast)
3685			return true;
3686		return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3687	case NL80211_IFTYPE_AP_VLAN:
3688	case NL80211_IFTYPE_AP:
3689		if (!bssid)
3690			return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3691
3692		if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
3693			/*
3694			 * Accept public action frames even when the
3695			 * BSSID doesn't match, this is used for P2P
3696			 * and location updates. Note that mac80211
3697			 * itself never looks at these frames.
3698			 */
3699			if (!multicast &&
3700			    !ether_addr_equal(sdata->vif.addr, hdr->addr1))
3701				return false;
3702			if (ieee80211_is_public_action(hdr, skb->len))
3703				return true;
3704			return ieee80211_is_beacon(hdr->frame_control);
3705		}
3706
3707		if (!ieee80211_has_tods(hdr->frame_control)) {
3708			/* ignore data frames to TDLS-peers */
3709			if (ieee80211_is_data(hdr->frame_control))
3710				return false;
3711			/* ignore action frames to TDLS-peers */
3712			if (ieee80211_is_action(hdr->frame_control) &&
3713			    !is_broadcast_ether_addr(bssid) &&
3714			    !ether_addr_equal(bssid, hdr->addr1))
3715				return false;
3716		}
3717
3718		/*
3719		 * 802.11-2016 Table 9-26 says that for data frames, A1 must be
3720		 * the BSSID - we've checked that already but may have accepted
3721		 * the wildcard (ff:ff:ff:ff:ff:ff).
3722		 *
3723		 * It also says:
3724		 *	The BSSID of the Data frame is determined as follows:
3725		 *	a) If the STA is contained within an AP or is associated
3726		 *	   with an AP, the BSSID is the address currently in use
3727		 *	   by the STA contained in the AP.
3728		 *
3729		 * So we should not accept data frames with an address that's
3730		 * multicast.
3731		 *
3732		 * Accepting it also opens a security problem because stations
3733		 * could encrypt it with the GTK and inject traffic that way.
3734		 */
3735		if (ieee80211_is_data(hdr->frame_control) && multicast)
3736			return false;
3737
3738		return true;
3739	case NL80211_IFTYPE_WDS:
3740		if (bssid || !ieee80211_is_data(hdr->frame_control))
3741			return false;
3742		return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2);
3743	case NL80211_IFTYPE_P2P_DEVICE:
3744		return ieee80211_is_public_action(hdr, skb->len) ||
3745		       ieee80211_is_probe_req(hdr->frame_control) ||
3746		       ieee80211_is_probe_resp(hdr->frame_control) ||
3747		       ieee80211_is_beacon(hdr->frame_control);
3748	case NL80211_IFTYPE_NAN:
3749		/* Currently no frames on NAN interface are allowed */
3750		return false;
3751	default:
3752		break;
3753	}
3754
3755	WARN_ON_ONCE(1);
3756	return false;
3757}
3758
3759void ieee80211_check_fast_rx(struct sta_info *sta)
3760{
3761	struct ieee80211_sub_if_data *sdata = sta->sdata;
3762	struct ieee80211_local *local = sdata->local;
3763	struct ieee80211_key *key;
3764	struct ieee80211_fast_rx fastrx = {
3765		.dev = sdata->dev,
3766		.vif_type = sdata->vif.type,
3767		.control_port_protocol = sdata->control_port_protocol,
3768	}, *old, *new = NULL;
3769	bool assign = false;
3770
3771	/* use sparse to check that we don't return without updating */
3772	__acquire(check_fast_rx);
3773
3774	BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
3775	BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
3776	ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
3777	ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
3778
3779	fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
3780
3781	/* fast-rx doesn't do reordering */
3782	if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
3783	    !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
3784		goto clear;
3785
3786	switch (sdata->vif.type) {
3787	case NL80211_IFTYPE_STATION:
 
 
 
 
 
 
 
 
 
3788		if (sta->sta.tdls) {
3789			fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
3790			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
3791			fastrx.expected_ds_bits = 0;
3792		} else {
3793			fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0;
3794			fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
3795			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
3796			fastrx.expected_ds_bits =
3797				cpu_to_le16(IEEE80211_FCTL_FROMDS);
3798		}
3799
3800		if (sdata->u.mgd.use_4addr && !sta->sta.tdls) {
3801			fastrx.expected_ds_bits |=
3802				cpu_to_le16(IEEE80211_FCTL_TODS);
3803			fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
3804			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
3805		}
3806
3807		if (!sdata->u.mgd.powersave)
3808			break;
3809
3810		/* software powersave is a huge mess, avoid all of it */
3811		if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
3812			goto clear;
3813		if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
3814		    !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
3815			goto clear;
3816		break;
3817	case NL80211_IFTYPE_AP_VLAN:
3818	case NL80211_IFTYPE_AP:
3819		/* parallel-rx requires this, at least with calls to
3820		 * ieee80211_sta_ps_transition()
3821		 */
3822		if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
3823			goto clear;
3824		fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
3825		fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
3826		fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
3827
3828		fastrx.internal_forward =
3829			!(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
3830			(sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
3831			 !sdata->u.vlan.sta);
3832
3833		if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
3834		    sdata->u.vlan.sta) {
3835			fastrx.expected_ds_bits |=
3836				cpu_to_le16(IEEE80211_FCTL_FROMDS);
3837			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
3838			fastrx.internal_forward = 0;
3839		}
3840
3841		break;
3842	default:
3843		goto clear;
3844	}
3845
3846	if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
3847		goto clear;
3848
3849	rcu_read_lock();
3850	key = rcu_dereference(sta->ptk[sta->ptk_idx]);
3851	if (key) {
3852		switch (key->conf.cipher) {
3853		case WLAN_CIPHER_SUITE_TKIP:
3854			/* we don't want to deal with MMIC in fast-rx */
3855			goto clear_rcu;
3856		case WLAN_CIPHER_SUITE_CCMP:
3857		case WLAN_CIPHER_SUITE_CCMP_256:
3858		case WLAN_CIPHER_SUITE_GCMP:
3859		case WLAN_CIPHER_SUITE_GCMP_256:
3860			break;
3861		default:
3862			/* we also don't want to deal with WEP or cipher scheme
3863			 * since those require looking up the key idx in the
3864			 * frame, rather than assuming the PTK is used
3865			 * (we need to revisit this once we implement the real
3866			 * PTK index, which is now valid in the spec, but we
3867			 * haven't implemented that part yet)
3868			 */
3869			goto clear_rcu;
3870		}
3871
3872		fastrx.key = true;
3873		fastrx.icv_len = key->conf.icv_len;
3874	}
3875
3876	assign = true;
3877 clear_rcu:
3878	rcu_read_unlock();
3879 clear:
3880	__release(check_fast_rx);
3881
3882	if (assign)
3883		new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
3884
3885	spin_lock_bh(&sta->lock);
3886	old = rcu_dereference_protected(sta->fast_rx, true);
3887	rcu_assign_pointer(sta->fast_rx, new);
3888	spin_unlock_bh(&sta->lock);
3889
3890	if (old)
3891		kfree_rcu(old, rcu_head);
3892}
3893
3894void ieee80211_clear_fast_rx(struct sta_info *sta)
3895{
3896	struct ieee80211_fast_rx *old;
3897
3898	spin_lock_bh(&sta->lock);
3899	old = rcu_dereference_protected(sta->fast_rx, true);
3900	RCU_INIT_POINTER(sta->fast_rx, NULL);
3901	spin_unlock_bh(&sta->lock);
3902
3903	if (old)
3904		kfree_rcu(old, rcu_head);
3905}
3906
3907void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
3908{
3909	struct ieee80211_local *local = sdata->local;
3910	struct sta_info *sta;
3911
3912	lockdep_assert_held(&local->sta_mtx);
3913
3914	list_for_each_entry_rcu(sta, &local->sta_list, list) {
3915		if (sdata != sta->sdata &&
3916		    (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
3917			continue;
3918		ieee80211_check_fast_rx(sta);
3919	}
3920}
3921
3922void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
3923{
3924	struct ieee80211_local *local = sdata->local;
3925
3926	mutex_lock(&local->sta_mtx);
3927	__ieee80211_check_fast_rx_iface(sdata);
3928	mutex_unlock(&local->sta_mtx);
3929}
3930
3931static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
3932				     struct ieee80211_fast_rx *fast_rx)
3933{
3934	struct sk_buff *skb = rx->skb;
3935	struct ieee80211_hdr *hdr = (void *)skb->data;
3936	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3937	struct sta_info *sta = rx->sta;
3938	int orig_len = skb->len;
3939	int hdrlen = ieee80211_hdrlen(hdr->frame_control);
3940	int snap_offs = hdrlen;
3941	struct {
3942		u8 snap[sizeof(rfc1042_header)];
3943		__be16 proto;
3944	} *payload __aligned(2);
3945	struct {
3946		u8 da[ETH_ALEN];
3947		u8 sa[ETH_ALEN];
3948	} addrs __aligned(2);
3949	struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
3950
3951	if (fast_rx->uses_rss)
3952		stats = this_cpu_ptr(sta->pcpu_rx_stats);
3953
3954	/* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
3955	 * to a common data structure; drivers can implement that per queue
3956	 * but we don't have that information in mac80211
3957	 */
3958	if (!(status->flag & RX_FLAG_DUP_VALIDATED))
3959		return false;
3960
3961#define FAST_RX_CRYPT_FLAGS	(RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
3962
3963	/* If using encryption, we also need to have:
3964	 *  - PN_VALIDATED: similar, but the implementation is tricky
3965	 *  - DECRYPTED: necessary for PN_VALIDATED
3966	 */
3967	if (fast_rx->key &&
3968	    (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
3969		return false;
3970
 
 
 
 
3971	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
3972		return false;
3973
3974	if (unlikely(ieee80211_is_frag(hdr)))
3975		return false;
3976
3977	/* Since our interface address cannot be multicast, this
3978	 * implicitly also rejects multicast frames without the
3979	 * explicit check.
3980	 *
3981	 * We shouldn't get any *data* frames not addressed to us
3982	 * (AP mode will accept multicast *management* frames), but
3983	 * punting here will make it go through the full checks in
3984	 * ieee80211_accept_frame().
3985	 */
3986	if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
3987		return false;
3988
3989	if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
3990					      IEEE80211_FCTL_TODS)) !=
3991	    fast_rx->expected_ds_bits)
3992		return false;
3993
3994	/* assign the key to drop unencrypted frames (later)
3995	 * and strip the IV/MIC if necessary
3996	 */
3997	if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
3998		/* GCMP header length is the same */
3999		snap_offs += IEEE80211_CCMP_HDR_LEN;
4000	}
4001
4002	if (!(status->rx_flags & IEEE80211_RX_AMSDU)) {
4003		if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
4004			goto drop;
4005
4006		payload = (void *)(skb->data + snap_offs);
 
4007
4008		if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
4009			return false;
4010
4011		/* Don't handle these here since they require special code.
4012		 * Accept AARP and IPX even though they should come with a
4013		 * bridge-tunnel header - but if we get them this way then
4014		 * there's little point in discarding them.
4015		 */
4016		if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
4017			     payload->proto == fast_rx->control_port_protocol))
4018			return false;
4019	}
4020
4021	/* after this point, don't punt to the slowpath! */
4022
4023	if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
4024	    pskb_trim(skb, skb->len - fast_rx->icv_len))
4025		goto drop;
4026
4027	if (unlikely(fast_rx->sta_notify)) {
4028		ieee80211_sta_rx_notify(rx->sdata, hdr);
4029		fast_rx->sta_notify = false;
4030	}
4031
4032	/* statistics part of ieee80211_rx_h_sta_process() */
 
 
 
 
 
4033	if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
4034		stats->last_signal = status->signal;
4035		if (!fast_rx->uses_rss)
4036			ewma_signal_add(&sta->rx_stats_avg.signal,
4037					-status->signal);
4038	}
4039
4040	if (status->chains) {
4041		int i;
4042
4043		stats->chains = status->chains;
4044		for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
4045			int signal = status->chain_signal[i];
4046
4047			if (!(status->chains & BIT(i)))
4048				continue;
4049
4050			stats->chain_signal_last[i] = signal;
4051			if (!fast_rx->uses_rss)
4052				ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
4053						-signal);
4054		}
4055	}
4056	/* end of statistics */
4057
4058	if (rx->key && !ieee80211_has_protected(hdr->frame_control))
4059		goto drop;
4060
4061	if (status->rx_flags & IEEE80211_RX_AMSDU) {
4062		if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) !=
4063		    RX_QUEUED)
4064			goto drop;
4065
4066		return true;
4067	}
4068
4069	stats->last_rx = jiffies;
4070	stats->last_rate = sta_stats_encode_rate(status);
4071
4072	stats->fragments++;
4073	stats->packets++;
4074
4075	/* do the header conversion - first grab the addresses */
4076	ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
4077	ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
4078	/* remove the SNAP but leave the ethertype */
4079	skb_pull(skb, snap_offs + sizeof(rfc1042_header));
4080	/* push the addresses in front */
4081	memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
4082
4083	skb->dev = fast_rx->dev;
4084
4085	ieee80211_rx_stats(fast_rx->dev, skb->len);
4086
4087	/* The seqno index has the same property as needed
4088	 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
4089	 * for non-QoS-data frames. Here we know it's a data
4090	 * frame, so count MSDUs.
4091	 */
4092	u64_stats_update_begin(&stats->syncp);
4093	stats->msdu[rx->seqno_idx]++;
4094	stats->bytes += orig_len;
4095	u64_stats_update_end(&stats->syncp);
4096
4097	if (fast_rx->internal_forward) {
4098		struct sk_buff *xmit_skb = NULL;
4099		bool multicast = is_multicast_ether_addr(skb->data);
4100
4101		if (multicast) {
4102			xmit_skb = skb_copy(skb, GFP_ATOMIC);
4103		} else if (sta_info_get(rx->sdata, skb->data)) {
4104			xmit_skb = skb;
4105			skb = NULL;
4106		}
4107
4108		if (xmit_skb) {
4109			/*
4110			 * Send to wireless media and increase priority by 256
4111			 * to keep the received priority instead of
4112			 * reclassifying the frame (see cfg80211_classify8021d).
4113			 */
4114			xmit_skb->priority += 256;
4115			xmit_skb->protocol = htons(ETH_P_802_3);
4116			skb_reset_network_header(xmit_skb);
4117			skb_reset_mac_header(xmit_skb);
4118			dev_queue_xmit(xmit_skb);
4119		}
4120
4121		if (!skb)
4122			return true;
4123	}
4124
4125	/* deliver to local stack */
4126	skb->protocol = eth_type_trans(skb, fast_rx->dev);
4127	memset(skb->cb, 0, sizeof(skb->cb));
4128	if (rx->napi)
4129		napi_gro_receive(rx->napi, skb);
4130	else
4131		netif_receive_skb(skb);
4132
4133	return true;
4134 drop:
4135	dev_kfree_skb(skb);
4136	stats->dropped++;
4137	return true;
4138}
4139
4140/*
4141 * This function returns whether or not the SKB
4142 * was destined for RX processing or not, which,
4143 * if consume is true, is equivalent to whether
4144 * or not the skb was consumed.
4145 */
4146static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
4147					    struct sk_buff *skb, bool consume)
4148{
4149	struct ieee80211_local *local = rx->local;
4150	struct ieee80211_sub_if_data *sdata = rx->sdata;
4151
4152	rx->skb = skb;
4153
4154	/* See if we can do fast-rx; if we have to copy we already lost,
4155	 * so punt in that case. We should never have to deliver a data
4156	 * frame to multiple interfaces anyway.
4157	 *
4158	 * We skip the ieee80211_accept_frame() call and do the necessary
4159	 * checking inside ieee80211_invoke_fast_rx().
4160	 */
4161	if (consume && rx->sta) {
4162		struct ieee80211_fast_rx *fast_rx;
4163
4164		fast_rx = rcu_dereference(rx->sta->fast_rx);
4165		if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
4166			return true;
4167	}
4168
4169	if (!ieee80211_accept_frame(rx))
4170		return false;
4171
4172	if (!consume) {
4173		skb = skb_copy(skb, GFP_ATOMIC);
4174		if (!skb) {
4175			if (net_ratelimit())
4176				wiphy_debug(local->hw.wiphy,
4177					"failed to copy skb for %s\n",
4178					sdata->name);
4179			return true;
4180		}
4181
4182		rx->skb = skb;
4183	}
4184
4185	ieee80211_invoke_rx_handlers(rx);
4186	return true;
4187}
4188
4189/*
4190 * This is the actual Rx frames handler. as it belongs to Rx path it must
4191 * be called with rcu_read_lock protection.
4192 */
4193static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
4194					 struct ieee80211_sta *pubsta,
4195					 struct sk_buff *skb,
4196					 struct napi_struct *napi)
4197{
4198	struct ieee80211_local *local = hw_to_local(hw);
4199	struct ieee80211_sub_if_data *sdata;
4200	struct ieee80211_hdr *hdr;
4201	__le16 fc;
4202	struct ieee80211_rx_data rx;
4203	struct ieee80211_sub_if_data *prev;
4204	struct rhlist_head *tmp;
4205	int err = 0;
4206
4207	fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
4208	memset(&rx, 0, sizeof(rx));
4209	rx.skb = skb;
4210	rx.local = local;
4211	rx.napi = napi;
4212
4213	if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
4214		I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
4215
4216	if (ieee80211_is_mgmt(fc)) {
4217		/* drop frame if too short for header */
4218		if (skb->len < ieee80211_hdrlen(fc))
4219			err = -ENOBUFS;
4220		else
4221			err = skb_linearize(skb);
4222	} else {
4223		err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
4224	}
4225
4226	if (err) {
4227		dev_kfree_skb(skb);
4228		return;
4229	}
4230
4231	hdr = (struct ieee80211_hdr *)skb->data;
4232	ieee80211_parse_qos(&rx);
4233	ieee80211_verify_alignment(&rx);
4234
4235	if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
4236		     ieee80211_is_beacon(hdr->frame_control)))
4237		ieee80211_scan_rx(local, skb);
4238
4239	if (ieee80211_is_data(fc)) {
4240		struct sta_info *sta, *prev_sta;
4241
4242		if (pubsta) {
4243			rx.sta = container_of(pubsta, struct sta_info, sta);
4244			rx.sdata = rx.sta->sdata;
4245			if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4246				return;
4247			goto out;
4248		}
4249
4250		prev_sta = NULL;
4251
4252		for_each_sta_info(local, hdr->addr2, sta, tmp) {
4253			if (!prev_sta) {
4254				prev_sta = sta;
4255				continue;
4256			}
4257
4258			rx.sta = prev_sta;
4259			rx.sdata = prev_sta->sdata;
4260			ieee80211_prepare_and_rx_handle(&rx, skb, false);
4261
4262			prev_sta = sta;
4263		}
4264
4265		if (prev_sta) {
4266			rx.sta = prev_sta;
4267			rx.sdata = prev_sta->sdata;
4268
4269			if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4270				return;
4271			goto out;
4272		}
4273	}
4274
4275	prev = NULL;
4276
4277	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
4278		if (!ieee80211_sdata_running(sdata))
4279			continue;
4280
4281		if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
4282		    sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
4283			continue;
4284
4285		/*
4286		 * frame is destined for this interface, but if it's
4287		 * not also for the previous one we handle that after
4288		 * the loop to avoid copying the SKB once too much
4289		 */
4290
4291		if (!prev) {
4292			prev = sdata;
4293			continue;
4294		}
4295
4296		rx.sta = sta_info_get_bss(prev, hdr->addr2);
4297		rx.sdata = prev;
4298		ieee80211_prepare_and_rx_handle(&rx, skb, false);
4299
4300		prev = sdata;
4301	}
4302
4303	if (prev) {
4304		rx.sta = sta_info_get_bss(prev, hdr->addr2);
4305		rx.sdata = prev;
4306
4307		if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4308			return;
4309	}
4310
4311 out:
4312	dev_kfree_skb(skb);
4313}
4314
4315/*
4316 * This is the receive path handler. It is called by a low level driver when an
4317 * 802.11 MPDU is received from the hardware.
4318 */
4319void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
4320		       struct sk_buff *skb, struct napi_struct *napi)
4321{
4322	struct ieee80211_local *local = hw_to_local(hw);
4323	struct ieee80211_rate *rate = NULL;
4324	struct ieee80211_supported_band *sband;
4325	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4326
4327	WARN_ON_ONCE(softirq_count() == 0);
4328
4329	if (WARN_ON(status->band >= NUM_NL80211_BANDS))
4330		goto drop;
4331
4332	sband = local->hw.wiphy->bands[status->band];
4333	if (WARN_ON(!sband))
4334		goto drop;
4335
4336	/*
4337	 * If we're suspending, it is possible although not too likely
4338	 * that we'd be receiving frames after having already partially
4339	 * quiesced the stack. We can't process such frames then since
4340	 * that might, for example, cause stations to be added or other
4341	 * driver callbacks be invoked.
4342	 */
4343	if (unlikely(local->quiescing || local->suspended))
4344		goto drop;
4345
4346	/* We might be during a HW reconfig, prevent Rx for the same reason */
4347	if (unlikely(local->in_reconfig))
4348		goto drop;
4349
4350	/*
4351	 * The same happens when we're not even started,
4352	 * but that's worth a warning.
4353	 */
4354	if (WARN_ON(!local->started))
4355		goto drop;
4356
4357	if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
4358		/*
4359		 * Validate the rate, unless a PLCP error means that
4360		 * we probably can't have a valid rate here anyway.
4361		 */
4362
4363		switch (status->encoding) {
4364		case RX_ENC_HT:
4365			/*
4366			 * rate_idx is MCS index, which can be [0-76]
4367			 * as documented on:
4368			 *
4369			 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
4370			 *
4371			 * Anything else would be some sort of driver or
4372			 * hardware error. The driver should catch hardware
4373			 * errors.
4374			 */
4375			if (WARN(status->rate_idx > 76,
4376				 "Rate marked as an HT rate but passed "
4377				 "status->rate_idx is not "
4378				 "an MCS index [0-76]: %d (0x%02x)\n",
4379				 status->rate_idx,
4380				 status->rate_idx))
4381				goto drop;
4382			break;
4383		case RX_ENC_VHT:
4384			if (WARN_ONCE(status->rate_idx > 9 ||
4385				      !status->nss ||
4386				      status->nss > 8,
4387				      "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
4388				      status->rate_idx, status->nss))
4389				goto drop;
4390			break;
4391		default:
4392			WARN_ON_ONCE(1);
4393			/* fall through */
4394		case RX_ENC_LEGACY:
4395			if (WARN_ON(status->rate_idx >= sband->n_bitrates))
4396				goto drop;
4397			rate = &sband->bitrates[status->rate_idx];
4398		}
4399	}
4400
4401	status->rx_flags = 0;
4402
4403	/*
4404	 * key references and virtual interfaces are protected using RCU
4405	 * and this requires that we are in a read-side RCU section during
4406	 * receive processing
4407	 */
4408	rcu_read_lock();
4409
4410	/*
4411	 * Frames with failed FCS/PLCP checksum are not returned,
4412	 * all other frames are returned without radiotap header
4413	 * if it was previously present.
4414	 * Also, frames with less than 16 bytes are dropped.
4415	 */
4416	skb = ieee80211_rx_monitor(local, skb, rate);
4417	if (!skb) {
4418		rcu_read_unlock();
4419		return;
4420	}
4421
4422	ieee80211_tpt_led_trig_rx(local,
4423			((struct ieee80211_hdr *)skb->data)->frame_control,
4424			skb->len);
4425
4426	__ieee80211_rx_handle_packet(hw, pubsta, skb, napi);
4427
4428	rcu_read_unlock();
4429
4430	return;
4431 drop:
4432	kfree_skb(skb);
4433}
4434EXPORT_SYMBOL(ieee80211_rx_napi);
4435
4436/* This is a version of the rx handler that can be called from hard irq
4437 * context. Post the skb on the queue and schedule the tasklet */
4438void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
4439{
4440	struct ieee80211_local *local = hw_to_local(hw);
4441
4442	BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
4443
4444	skb->pkt_type = IEEE80211_RX_MSG;
4445	skb_queue_tail(&local->skb_queue, skb);
4446	tasklet_schedule(&local->tasklet);
4447}
4448EXPORT_SYMBOL(ieee80211_rx_irqsafe);