Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * Copyright (c) 2008, 2009 open80211s Ltd.
 
   3 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 */
   9
  10#include <linux/slab.h>
  11#include <linux/etherdevice.h>
  12#include <asm/unaligned.h>
  13#include "wme.h"
  14#include "mesh.h"
  15
  16#define TEST_FRAME_LEN	8192
  17#define MAX_METRIC	0xffffffff
  18#define ARITH_SHIFT	8
 
  19
  20#define MAX_PREQ_QUEUE_LEN	64
  21
  22static void mesh_queue_preq(struct mesh_path *, u8);
  23
  24static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
  25{
  26	if (ae)
  27		offset += 6;
  28	return get_unaligned_le32(preq_elem + offset);
  29}
  30
  31static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae)
  32{
  33	if (ae)
  34		offset += 6;
  35	return get_unaligned_le16(preq_elem + offset);
  36}
  37
  38/* HWMP IE processing macros */
  39#define AE_F			(1<<6)
  40#define AE_F_SET(x)		(*x & AE_F)
  41#define PREQ_IE_FLAGS(x)	(*(x))
  42#define PREQ_IE_HOPCOUNT(x)	(*(x + 1))
  43#define PREQ_IE_TTL(x)		(*(x + 2))
  44#define PREQ_IE_PREQ_ID(x)	u32_field_get(x, 3, 0)
  45#define PREQ_IE_ORIG_ADDR(x)	(x + 7)
  46#define PREQ_IE_ORIG_SN(x)	u32_field_get(x, 13, 0)
  47#define PREQ_IE_LIFETIME(x)	u32_field_get(x, 17, AE_F_SET(x))
  48#define PREQ_IE_METRIC(x) 	u32_field_get(x, 21, AE_F_SET(x))
  49#define PREQ_IE_TARGET_F(x)	(*(AE_F_SET(x) ? x + 32 : x + 26))
  50#define PREQ_IE_TARGET_ADDR(x) 	(AE_F_SET(x) ? x + 33 : x + 27)
  51#define PREQ_IE_TARGET_SN(x) 	u32_field_get(x, 33, AE_F_SET(x))
  52
  53
  54#define PREP_IE_FLAGS(x)	PREQ_IE_FLAGS(x)
  55#define PREP_IE_HOPCOUNT(x)	PREQ_IE_HOPCOUNT(x)
  56#define PREP_IE_TTL(x)		PREQ_IE_TTL(x)
  57#define PREP_IE_ORIG_ADDR(x)	(AE_F_SET(x) ? x + 27 : x + 21)
  58#define PREP_IE_ORIG_SN(x)	u32_field_get(x, 27, AE_F_SET(x))
  59#define PREP_IE_LIFETIME(x)	u32_field_get(x, 13, AE_F_SET(x))
  60#define PREP_IE_METRIC(x)	u32_field_get(x, 17, AE_F_SET(x))
  61#define PREP_IE_TARGET_ADDR(x)	(x + 3)
  62#define PREP_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
  63
  64#define PERR_IE_TTL(x)		(*(x))
  65#define PERR_IE_TARGET_FLAGS(x)	(*(x + 2))
  66#define PERR_IE_TARGET_ADDR(x)	(x + 3)
  67#define PERR_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
  68#define PERR_IE_TARGET_RCODE(x)	u16_field_get(x, 13, 0)
  69
  70#define MSEC_TO_TU(x) (x*1000/1024)
  71#define SN_GT(x, y) ((s32)(y - x) < 0)
  72#define SN_LT(x, y) ((s32)(x - y) < 0)
  73#define MAX_SANE_SN_DELTA 32
  74
  75static inline u32 SN_DELTA(u32 x, u32 y)
  76{
  77	return x >= y ? x - y : y - x;
  78}
  79
  80#define net_traversal_jiffies(s) \
  81	msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
  82#define default_lifetime(s) \
  83	MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
  84#define min_preq_int_jiff(s) \
  85	(msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
  86#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
  87#define disc_timeout_jiff(s) \
  88	msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
  89#define root_path_confirmation_jiffies(s) \
  90	msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval)
  91
  92enum mpath_frame_type {
  93	MPATH_PREQ = 0,
  94	MPATH_PREP,
  95	MPATH_PERR,
  96	MPATH_RANN
  97};
  98
  99static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 100
 101static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
 102				  const u8 *orig_addr, u32 orig_sn,
 103				  u8 target_flags, const u8 *target,
 104				  u32 target_sn, const u8 *da,
 105				  u8 hop_count, u8 ttl,
 106				  u32 lifetime, u32 metric, u32 preq_id,
 107				  struct ieee80211_sub_if_data *sdata)
 108{
 109	struct ieee80211_local *local = sdata->local;
 110	struct sk_buff *skb;
 111	struct ieee80211_mgmt *mgmt;
 112	u8 *pos, ie_len;
 113	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
 114		      sizeof(mgmt->u.action.u.mesh_action);
 115
 116	skb = dev_alloc_skb(local->tx_headroom +
 117			    hdr_len +
 118			    2 + 37); /* max HWMP IE */
 119	if (!skb)
 120		return -1;
 121	skb_reserve(skb, local->tx_headroom);
 122	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
 123	memset(mgmt, 0, hdr_len);
 124	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 125					  IEEE80211_STYPE_ACTION);
 126
 127	memcpy(mgmt->da, da, ETH_ALEN);
 128	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
 129	/* BSSID == SA */
 130	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
 131	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
 132	mgmt->u.action.u.mesh_action.action_code =
 133					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
 134
 135	switch (action) {
 136	case MPATH_PREQ:
 137		mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
 138		ie_len = 37;
 139		pos = skb_put(skb, 2 + ie_len);
 140		*pos++ = WLAN_EID_PREQ;
 141		break;
 142	case MPATH_PREP:
 143		mhwmp_dbg(sdata, "sending PREP to %pM\n", orig_addr);
 144		ie_len = 31;
 145		pos = skb_put(skb, 2 + ie_len);
 146		*pos++ = WLAN_EID_PREP;
 147		break;
 148	case MPATH_RANN:
 149		mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
 150		ie_len = sizeof(struct ieee80211_rann_ie);
 151		pos = skb_put(skb, 2 + ie_len);
 152		*pos++ = WLAN_EID_RANN;
 153		break;
 154	default:
 155		kfree_skb(skb);
 156		return -ENOTSUPP;
 157	}
 158	*pos++ = ie_len;
 159	*pos++ = flags;
 160	*pos++ = hop_count;
 161	*pos++ = ttl;
 162	if (action == MPATH_PREP) {
 163		memcpy(pos, target, ETH_ALEN);
 164		pos += ETH_ALEN;
 165		put_unaligned_le32(target_sn, pos);
 166		pos += 4;
 167	} else {
 168		if (action == MPATH_PREQ) {
 169			put_unaligned_le32(preq_id, pos);
 170			pos += 4;
 171		}
 172		memcpy(pos, orig_addr, ETH_ALEN);
 173		pos += ETH_ALEN;
 174		put_unaligned_le32(orig_sn, pos);
 175		pos += 4;
 176	}
 177	put_unaligned_le32(lifetime, pos); /* interval for RANN */
 178	pos += 4;
 179	put_unaligned_le32(metric, pos);
 180	pos += 4;
 181	if (action == MPATH_PREQ) {
 182		*pos++ = 1; /* destination count */
 183		*pos++ = target_flags;
 184		memcpy(pos, target, ETH_ALEN);
 185		pos += ETH_ALEN;
 186		put_unaligned_le32(target_sn, pos);
 187		pos += 4;
 188	} else if (action == MPATH_PREP) {
 189		memcpy(pos, orig_addr, ETH_ALEN);
 190		pos += ETH_ALEN;
 191		put_unaligned_le32(orig_sn, pos);
 192		pos += 4;
 193	}
 194
 195	ieee80211_tx_skb(sdata, skb);
 196	return 0;
 197}
 198
 199
 200/*  Headroom is not adjusted.  Caller should ensure that skb has sufficient
 201 *  headroom in case the frame is encrypted. */
 202static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
 203		struct sk_buff *skb)
 204{
 205	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 206	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 207
 208	skb_reset_mac_header(skb);
 209	skb_reset_network_header(skb);
 210	skb_reset_transport_header(skb);
 211
 212	/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
 213	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
 214	skb->priority = 7;
 215
 216	info->control.vif = &sdata->vif;
 217	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
 218	ieee80211_set_qos_hdr(sdata, skb);
 219	ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
 220}
 221
 222/**
 223 * mesh_path_error_tx - Sends a PERR mesh management frame
 224 *
 225 * @ttl: allowed remaining hops
 226 * @target: broken destination
 227 * @target_sn: SN of the broken destination
 228 * @target_rcode: reason code for this PERR
 229 * @ra: node this frame is addressed to
 230 * @sdata: local mesh subif
 231 *
 232 * Note: This function may be called with driver locks taken that the driver
 233 * also acquires in the TX path.  To avoid a deadlock we don't transmit the
 234 * frame directly but add it to the pending queue instead.
 235 */
 236int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
 237		       u8 ttl, const u8 *target, u32 target_sn,
 238		       u16 target_rcode, const u8 *ra)
 239{
 240	struct ieee80211_local *local = sdata->local;
 241	struct sk_buff *skb;
 242	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 243	struct ieee80211_mgmt *mgmt;
 244	u8 *pos, ie_len;
 245	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
 246		      sizeof(mgmt->u.action.u.mesh_action);
 247
 248	if (time_before(jiffies, ifmsh->next_perr))
 249		return -EAGAIN;
 250
 251	skb = dev_alloc_skb(local->tx_headroom +
 252			    sdata->encrypt_headroom +
 253			    IEEE80211_ENCRYPT_TAILROOM +
 254			    hdr_len +
 255			    2 + 15 /* PERR IE */);
 256	if (!skb)
 257		return -1;
 258	skb_reserve(skb, local->tx_headroom + sdata->encrypt_headroom);
 259	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
 260	memset(mgmt, 0, hdr_len);
 261	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 262					  IEEE80211_STYPE_ACTION);
 263
 264	memcpy(mgmt->da, ra, ETH_ALEN);
 265	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
 266	/* BSSID == SA */
 267	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
 268	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
 269	mgmt->u.action.u.mesh_action.action_code =
 270					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
 271	ie_len = 15;
 272	pos = skb_put(skb, 2 + ie_len);
 273	*pos++ = WLAN_EID_PERR;
 274	*pos++ = ie_len;
 275	/* ttl */
 276	*pos++ = ttl;
 277	/* number of destinations */
 278	*pos++ = 1;
 279	/* Flags field has AE bit only as defined in
 280	 * sec 8.4.2.117 IEEE802.11-2012
 281	 */
 282	*pos = 0;
 283	pos++;
 284	memcpy(pos, target, ETH_ALEN);
 285	pos += ETH_ALEN;
 286	put_unaligned_le32(target_sn, pos);
 287	pos += 4;
 288	put_unaligned_le16(target_rcode, pos);
 289
 290	/* see note in function header */
 291	prepare_frame_for_deferred_tx(sdata, skb);
 292	ifmsh->next_perr = TU_TO_EXP_TIME(
 293				   ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
 294	ieee80211_add_pending_skb(local, skb);
 295	return 0;
 296}
 297
 298void ieee80211s_update_metric(struct ieee80211_local *local,
 299		struct sta_info *sta, struct sk_buff *skb)
 
 300{
 301	struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
 302	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 303	int failed;
 304
 305	if (!ieee80211_is_data(hdr->frame_control))
 306		return;
 307
 308	failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
 309
 310	/* moving average, scaled to 100 */
 311	sta->mesh->fail_avg =
 312		((80 * sta->mesh->fail_avg + 5) / 100 + 20 * failed);
 313	if (sta->mesh->fail_avg > 95)
 
 
 314		mesh_plink_broken(sta);
 
 
 
 
 315}
 316
 317static u32 airtime_link_metric_get(struct ieee80211_local *local,
 318				   struct sta_info *sta)
 319{
 320	struct rate_info rinfo;
 321	/* This should be adjusted for each device */
 322	int device_constant = 1 << ARITH_SHIFT;
 323	int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
 324	int s_unit = 1 << ARITH_SHIFT;
 325	int rate, err;
 326	u32 tx_time, estimated_retx;
 327	u64 result;
 
 
 328
 329	/* Try to get rate based on HW/SW RC algorithm.
 330	 * Rate is returned in units of Kbps, correct this
 331	 * to comply with airtime calculation units
 332	 * Round up in case we get rate < 100Kbps
 333	 */
 334	rate = DIV_ROUND_UP(sta_get_expected_throughput(sta), 100);
 335
 336	if (rate) {
 337		err = 0;
 338	} else {
 339		if (sta->mesh->fail_avg >= 100)
 340			return MAX_METRIC;
 341
 342		sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo);
 343		rate = cfg80211_calculate_bitrate(&rinfo);
 344		if (WARN_ON(!rate))
 345			return MAX_METRIC;
 346
 347		err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100;
 348	}
 349
 350	/* bitrate is in units of 100 Kbps, while we need rate in units of
 351	 * 1Mbps. This will be corrected on tx_time computation.
 352	 */
 353	tx_time = (device_constant + 10 * test_frame_len / rate);
 354	estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
 355	result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
 356	return (u32)result;
 357}
 358
 359/**
 360 * hwmp_route_info_get - Update routing info to originator and transmitter
 361 *
 362 * @sdata: local mesh subif
 363 * @mgmt: mesh management frame
 364 * @hwmp_ie: hwmp information element (PREP or PREQ)
 365 * @action: type of hwmp ie
 366 *
 367 * This function updates the path routing information to the originator and the
 368 * transmitter of a HWMP PREQ or PREP frame.
 369 *
 370 * Returns: metric to frame originator or 0 if the frame should not be further
 371 * processed
 372 *
 373 * Notes: this function is the only place (besides user-provided info) where
 374 * path routing information is updated.
 375 */
 376static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
 377			       struct ieee80211_mgmt *mgmt,
 378			       const u8 *hwmp_ie, enum mpath_frame_type action)
 379{
 380	struct ieee80211_local *local = sdata->local;
 381	struct mesh_path *mpath;
 382	struct sta_info *sta;
 383	bool fresh_info;
 384	const u8 *orig_addr, *ta;
 385	u32 orig_sn, orig_metric;
 386	unsigned long orig_lifetime, exp_time;
 387	u32 last_hop_metric, new_metric;
 388	bool process = true;
 
 389
 390	rcu_read_lock();
 391	sta = sta_info_get(sdata, mgmt->sa);
 392	if (!sta) {
 393		rcu_read_unlock();
 394		return 0;
 395	}
 396
 397	last_hop_metric = airtime_link_metric_get(local, sta);
 398	/* Update and check originator routing info */
 399	fresh_info = true;
 400
 401	switch (action) {
 402	case MPATH_PREQ:
 403		orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
 404		orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
 405		orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
 406		orig_metric = PREQ_IE_METRIC(hwmp_ie);
 
 407		break;
 408	case MPATH_PREP:
 409		/* Originator here refers to the MP that was the target in the
 410		 * Path Request. We divert from the nomenclature in the draft
 411		 * so that we can easily use a single function to gather path
 412		 * information from both PREQ and PREP frames.
 413		 */
 414		orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
 415		orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
 416		orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
 417		orig_metric = PREP_IE_METRIC(hwmp_ie);
 
 418		break;
 419	default:
 420		rcu_read_unlock();
 421		return 0;
 422	}
 423	new_metric = orig_metric + last_hop_metric;
 424	if (new_metric < orig_metric)
 425		new_metric = MAX_METRIC;
 426	exp_time = TU_TO_EXP_TIME(orig_lifetime);
 427
 428	if (ether_addr_equal(orig_addr, sdata->vif.addr)) {
 429		/* This MP is the originator, we are not interested in this
 430		 * frame, except for updating transmitter's path info.
 431		 */
 432		process = false;
 433		fresh_info = false;
 434	} else {
 435		mpath = mesh_path_lookup(sdata, orig_addr);
 436		if (mpath) {
 437			spin_lock_bh(&mpath->state_lock);
 438			if (mpath->flags & MESH_PATH_FIXED)
 439				fresh_info = false;
 440			else if ((mpath->flags & MESH_PATH_ACTIVE) &&
 441			    (mpath->flags & MESH_PATH_SN_VALID)) {
 442				if (SN_GT(mpath->sn, orig_sn) ||
 443				    (mpath->sn == orig_sn &&
 444				     new_metric >= mpath->metric)) {
 
 
 
 445					process = false;
 446					fresh_info = false;
 447				}
 448			} else if (!(mpath->flags & MESH_PATH_ACTIVE)) {
 449				bool have_sn, newer_sn, bounced;
 450
 451				have_sn = mpath->flags & MESH_PATH_SN_VALID;
 452				newer_sn = have_sn && SN_GT(orig_sn, mpath->sn);
 453				bounced = have_sn &&
 454					  (SN_DELTA(orig_sn, mpath->sn) >
 455							MAX_SANE_SN_DELTA);
 456
 457				if (!have_sn || newer_sn) {
 458					/* if SN is newer than what we had
 459					 * then we can take it */;
 460				} else if (bounced) {
 461					/* if SN is way different than what
 462					 * we had then assume the other side
 463					 * rebooted or restarted */;
 464				} else {
 465					process = false;
 466					fresh_info = false;
 467				}
 468			}
 469		} else {
 470			mpath = mesh_path_add(sdata, orig_addr);
 471			if (IS_ERR(mpath)) {
 472				rcu_read_unlock();
 473				return 0;
 474			}
 475			spin_lock_bh(&mpath->state_lock);
 476		}
 477
 478		if (fresh_info) {
 
 
 479			mesh_path_assign_nexthop(mpath, sta);
 480			mpath->flags |= MESH_PATH_SN_VALID;
 481			mpath->metric = new_metric;
 482			mpath->sn = orig_sn;
 483			mpath->exp_time = time_after(mpath->exp_time, exp_time)
 484					  ?  mpath->exp_time : exp_time;
 
 485			mesh_path_activate(mpath);
 486			spin_unlock_bh(&mpath->state_lock);
 
 
 
 487			mesh_path_tx_pending(mpath);
 488			/* draft says preq_id should be saved to, but there does
 489			 * not seem to be any use for it, skipping by now
 490			 */
 491		} else
 492			spin_unlock_bh(&mpath->state_lock);
 493	}
 494
 495	/* Update and check transmitter routing info */
 496	ta = mgmt->sa;
 497	if (ether_addr_equal(orig_addr, ta))
 498		fresh_info = false;
 499	else {
 500		fresh_info = true;
 501
 502		mpath = mesh_path_lookup(sdata, ta);
 503		if (mpath) {
 504			spin_lock_bh(&mpath->state_lock);
 505			if ((mpath->flags & MESH_PATH_FIXED) ||
 506				((mpath->flags & MESH_PATH_ACTIVE) &&
 507					(last_hop_metric > mpath->metric)))
 
 
 508				fresh_info = false;
 509		} else {
 510			mpath = mesh_path_add(sdata, ta);
 511			if (IS_ERR(mpath)) {
 512				rcu_read_unlock();
 513				return 0;
 514			}
 515			spin_lock_bh(&mpath->state_lock);
 516		}
 517
 518		if (fresh_info) {
 
 
 519			mesh_path_assign_nexthop(mpath, sta);
 520			mpath->metric = last_hop_metric;
 521			mpath->exp_time = time_after(mpath->exp_time, exp_time)
 522					  ?  mpath->exp_time : exp_time;
 
 523			mesh_path_activate(mpath);
 524			spin_unlock_bh(&mpath->state_lock);
 
 
 
 525			mesh_path_tx_pending(mpath);
 526		} else
 527			spin_unlock_bh(&mpath->state_lock);
 528	}
 529
 530	rcu_read_unlock();
 531
 532	return process ? new_metric : 0;
 533}
 534
 535static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
 536				    struct ieee80211_mgmt *mgmt,
 537				    const u8 *preq_elem, u32 orig_metric)
 538{
 539	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 540	struct mesh_path *mpath = NULL;
 541	const u8 *target_addr, *orig_addr;
 542	const u8 *da;
 543	u8 target_flags, ttl, flags;
 544	u32 orig_sn, target_sn, lifetime, target_metric = 0;
 545	bool reply = false;
 546	bool forward = true;
 547	bool root_is_gate;
 548
 549	/* Update target SN, if present */
 550	target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
 551	orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
 552	target_sn = PREQ_IE_TARGET_SN(preq_elem);
 553	orig_sn = PREQ_IE_ORIG_SN(preq_elem);
 554	target_flags = PREQ_IE_TARGET_F(preq_elem);
 555	/* Proactive PREQ gate announcements */
 556	flags = PREQ_IE_FLAGS(preq_elem);
 557	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
 558
 559	mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr);
 560
 561	if (ether_addr_equal(target_addr, sdata->vif.addr)) {
 562		mhwmp_dbg(sdata, "PREQ is for us\n");
 563		forward = false;
 564		reply = true;
 565		target_metric = 0;
 
 
 
 
 566		if (time_after(jiffies, ifmsh->last_sn_update +
 567					net_traversal_jiffies(sdata)) ||
 568		    time_before(jiffies, ifmsh->last_sn_update)) {
 569			++ifmsh->sn;
 570			ifmsh->last_sn_update = jiffies;
 571		}
 572		target_sn = ifmsh->sn;
 573	} else if (is_broadcast_ether_addr(target_addr) &&
 574		   (target_flags & IEEE80211_PREQ_TO_FLAG)) {
 575		rcu_read_lock();
 576		mpath = mesh_path_lookup(sdata, orig_addr);
 577		if (mpath) {
 578			if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
 579				reply = true;
 580				target_addr = sdata->vif.addr;
 581				target_sn = ++ifmsh->sn;
 582				target_metric = 0;
 583				ifmsh->last_sn_update = jiffies;
 584			}
 585			if (root_is_gate)
 586				mesh_path_add_gate(mpath);
 587		}
 588		rcu_read_unlock();
 589	} else {
 590		rcu_read_lock();
 591		mpath = mesh_path_lookup(sdata, target_addr);
 592		if (mpath) {
 593			if ((!(mpath->flags & MESH_PATH_SN_VALID)) ||
 594					SN_LT(mpath->sn, target_sn)) {
 595				mpath->sn = target_sn;
 596				mpath->flags |= MESH_PATH_SN_VALID;
 597			} else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) &&
 598					(mpath->flags & MESH_PATH_ACTIVE)) {
 599				reply = true;
 600				target_metric = mpath->metric;
 601				target_sn = mpath->sn;
 602				/* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/
 603				target_flags |= IEEE80211_PREQ_TO_FLAG;
 604			}
 605		}
 606		rcu_read_unlock();
 607	}
 608
 609	if (reply) {
 610		lifetime = PREQ_IE_LIFETIME(preq_elem);
 611		ttl = ifmsh->mshcfg.element_ttl;
 612		if (ttl != 0) {
 613			mhwmp_dbg(sdata, "replying to the PREQ\n");
 614			mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
 615					       orig_sn, 0, target_addr,
 616					       target_sn, mgmt->sa, 0, ttl,
 617					       lifetime, target_metric, 0,
 618					       sdata);
 619		} else {
 620			ifmsh->mshstats.dropped_frames_ttl++;
 621		}
 622	}
 623
 624	if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
 625		u32 preq_id;
 626		u8 hopcount;
 627
 628		ttl = PREQ_IE_TTL(preq_elem);
 629		lifetime = PREQ_IE_LIFETIME(preq_elem);
 630		if (ttl <= 1) {
 631			ifmsh->mshstats.dropped_frames_ttl++;
 632			return;
 633		}
 634		mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr);
 635		--ttl;
 636		preq_id = PREQ_IE_PREQ_ID(preq_elem);
 637		hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
 638		da = (mpath && mpath->is_root) ?
 639			mpath->rann_snd_addr : broadcast_addr;
 640
 641		if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
 642			target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
 643			target_sn = PREQ_IE_TARGET_SN(preq_elem);
 644		}
 645
 646		mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
 647				       orig_sn, target_flags, target_addr,
 648				       target_sn, da, hopcount, ttl, lifetime,
 649				       orig_metric, preq_id, sdata);
 650		if (!is_multicast_ether_addr(da))
 651			ifmsh->mshstats.fwded_unicast++;
 652		else
 653			ifmsh->mshstats.fwded_mcast++;
 654		ifmsh->mshstats.fwded_frames++;
 655	}
 656}
 657
 658
 659static inline struct sta_info *
 660next_hop_deref_protected(struct mesh_path *mpath)
 661{
 662	return rcu_dereference_protected(mpath->next_hop,
 663					 lockdep_is_held(&mpath->state_lock));
 664}
 665
 666
 667static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
 668				    struct ieee80211_mgmt *mgmt,
 669				    const u8 *prep_elem, u32 metric)
 670{
 671	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 672	struct mesh_path *mpath;
 673	const u8 *target_addr, *orig_addr;
 674	u8 ttl, hopcount, flags;
 675	u8 next_hop[ETH_ALEN];
 676	u32 target_sn, orig_sn, lifetime;
 677
 678	mhwmp_dbg(sdata, "received PREP from %pM\n",
 679		  PREP_IE_TARGET_ADDR(prep_elem));
 680
 681	orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
 682	if (ether_addr_equal(orig_addr, sdata->vif.addr))
 683		/* destination, no forwarding required */
 684		return;
 685
 686	if (!ifmsh->mshcfg.dot11MeshForwarding)
 687		return;
 688
 689	ttl = PREP_IE_TTL(prep_elem);
 690	if (ttl <= 1) {
 691		sdata->u.mesh.mshstats.dropped_frames_ttl++;
 692		return;
 693	}
 694
 695	rcu_read_lock();
 696	mpath = mesh_path_lookup(sdata, orig_addr);
 697	if (mpath)
 698		spin_lock_bh(&mpath->state_lock);
 699	else
 700		goto fail;
 701	if (!(mpath->flags & MESH_PATH_ACTIVE)) {
 702		spin_unlock_bh(&mpath->state_lock);
 703		goto fail;
 704	}
 705	memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
 706	spin_unlock_bh(&mpath->state_lock);
 707	--ttl;
 708	flags = PREP_IE_FLAGS(prep_elem);
 709	lifetime = PREP_IE_LIFETIME(prep_elem);
 710	hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
 711	target_addr = PREP_IE_TARGET_ADDR(prep_elem);
 712	target_sn = PREP_IE_TARGET_SN(prep_elem);
 713	orig_sn = PREP_IE_ORIG_SN(prep_elem);
 714
 715	mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, orig_sn, 0,
 716			       target_addr, target_sn, next_hop, hopcount,
 717			       ttl, lifetime, metric, 0, sdata);
 718	rcu_read_unlock();
 719
 720	sdata->u.mesh.mshstats.fwded_unicast++;
 721	sdata->u.mesh.mshstats.fwded_frames++;
 722	return;
 723
 724fail:
 725	rcu_read_unlock();
 726	sdata->u.mesh.mshstats.dropped_frames_no_route++;
 727}
 728
 729static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
 730				    struct ieee80211_mgmt *mgmt,
 731				    const u8 *perr_elem)
 732{
 733	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 734	struct mesh_path *mpath;
 735	u8 ttl;
 736	const u8 *ta, *target_addr;
 737	u32 target_sn;
 738	u16 target_rcode;
 739
 740	ta = mgmt->sa;
 741	ttl = PERR_IE_TTL(perr_elem);
 742	if (ttl <= 1) {
 743		ifmsh->mshstats.dropped_frames_ttl++;
 744		return;
 745	}
 746	ttl--;
 747	target_addr = PERR_IE_TARGET_ADDR(perr_elem);
 748	target_sn = PERR_IE_TARGET_SN(perr_elem);
 749	target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
 750
 751	rcu_read_lock();
 752	mpath = mesh_path_lookup(sdata, target_addr);
 753	if (mpath) {
 754		struct sta_info *sta;
 755
 756		spin_lock_bh(&mpath->state_lock);
 757		sta = next_hop_deref_protected(mpath);
 758		if (mpath->flags & MESH_PATH_ACTIVE &&
 759		    ether_addr_equal(ta, sta->sta.addr) &&
 760		    !(mpath->flags & MESH_PATH_FIXED) &&
 761		    (!(mpath->flags & MESH_PATH_SN_VALID) ||
 762		    SN_GT(target_sn, mpath->sn)  || target_sn == 0)) {
 763			mpath->flags &= ~MESH_PATH_ACTIVE;
 764			if (target_sn != 0)
 765				mpath->sn = target_sn;
 766			else
 767				mpath->sn += 1;
 768			spin_unlock_bh(&mpath->state_lock);
 769			if (!ifmsh->mshcfg.dot11MeshForwarding)
 770				goto endperr;
 771			mesh_path_error_tx(sdata, ttl, target_addr,
 772					   target_sn, target_rcode,
 773					   broadcast_addr);
 774		} else
 775			spin_unlock_bh(&mpath->state_lock);
 776	}
 777endperr:
 778	rcu_read_unlock();
 779}
 780
 781static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
 782				    struct ieee80211_mgmt *mgmt,
 783				    const struct ieee80211_rann_ie *rann)
 784{
 785	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 786	struct ieee80211_local *local = sdata->local;
 787	struct sta_info *sta;
 788	struct mesh_path *mpath;
 789	u8 ttl, flags, hopcount;
 790	const u8 *orig_addr;
 791	u32 orig_sn, metric, metric_txsta, interval;
 792	bool root_is_gate;
 793
 794	ttl = rann->rann_ttl;
 795	flags = rann->rann_flags;
 796	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
 797	orig_addr = rann->rann_addr;
 798	orig_sn = le32_to_cpu(rann->rann_seq);
 799	interval = le32_to_cpu(rann->rann_interval);
 800	hopcount = rann->rann_hopcount;
 801	hopcount++;
 802	metric = le32_to_cpu(rann->rann_metric);
 803
 804	/*  Ignore our own RANNs */
 805	if (ether_addr_equal(orig_addr, sdata->vif.addr))
 806		return;
 807
 808	mhwmp_dbg(sdata,
 809		  "received RANN from %pM via neighbour %pM (is_gate=%d)\n",
 810		  orig_addr, mgmt->sa, root_is_gate);
 811
 812	rcu_read_lock();
 813	sta = sta_info_get(sdata, mgmt->sa);
 814	if (!sta) {
 815		rcu_read_unlock();
 816		return;
 817	}
 818
 819	metric_txsta = airtime_link_metric_get(local, sta);
 
 
 
 820
 821	mpath = mesh_path_lookup(sdata, orig_addr);
 822	if (!mpath) {
 823		mpath = mesh_path_add(sdata, orig_addr);
 824		if (IS_ERR(mpath)) {
 825			rcu_read_unlock();
 826			sdata->u.mesh.mshstats.dropped_frames_no_route++;
 827			return;
 828		}
 829	}
 830
 831	if (!(SN_LT(mpath->sn, orig_sn)) &&
 832	    !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
 833		rcu_read_unlock();
 834		return;
 835	}
 836
 837	if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
 838	     (time_after(jiffies, mpath->last_preq_to_root +
 839				  root_path_confirmation_jiffies(sdata)) ||
 840	     time_before(jiffies, mpath->last_preq_to_root))) &&
 841	     !(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) {
 842		mhwmp_dbg(sdata,
 843			  "time to refresh root mpath %pM\n",
 844			  orig_addr);
 845		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
 846		mpath->last_preq_to_root = jiffies;
 847	}
 848
 849	mpath->sn = orig_sn;
 850	mpath->rann_metric = metric + metric_txsta;
 851	mpath->is_root = true;
 852	/* Recording RANNs sender address to send individually
 853	 * addressed PREQs destined for root mesh STA */
 854	memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
 855
 856	if (root_is_gate)
 857		mesh_path_add_gate(mpath);
 858
 859	if (ttl <= 1) {
 860		ifmsh->mshstats.dropped_frames_ttl++;
 861		rcu_read_unlock();
 862		return;
 863	}
 864	ttl--;
 865
 866	if (ifmsh->mshcfg.dot11MeshForwarding) {
 867		mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
 868				       orig_sn, 0, NULL, 0, broadcast_addr,
 869				       hopcount, ttl, interval,
 870				       metric + metric_txsta, 0, sdata);
 871	}
 872
 873	rcu_read_unlock();
 874}
 875
 876
 877void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
 878			    struct ieee80211_mgmt *mgmt, size_t len)
 879{
 880	struct ieee802_11_elems elems;
 881	size_t baselen;
 882	u32 path_metric;
 883	struct sta_info *sta;
 884
 885	/* need action_code */
 886	if (len < IEEE80211_MIN_ACTION_SIZE + 1)
 887		return;
 888
 889	rcu_read_lock();
 890	sta = sta_info_get(sdata, mgmt->sa);
 891	if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
 892		rcu_read_unlock();
 893		return;
 894	}
 895	rcu_read_unlock();
 896
 897	baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
 898	ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
 899			       len - baselen, false, &elems);
 900
 901	if (elems.preq) {
 902		if (elems.preq_len != 37)
 903			/* Right now we support just 1 destination and no AE */
 904			return;
 905		path_metric = hwmp_route_info_get(sdata, mgmt, elems.preq,
 906						  MPATH_PREQ);
 907		if (path_metric)
 908			hwmp_preq_frame_process(sdata, mgmt, elems.preq,
 909						path_metric);
 910	}
 911	if (elems.prep) {
 912		if (elems.prep_len != 31)
 913			/* Right now we support no AE */
 914			return;
 915		path_metric = hwmp_route_info_get(sdata, mgmt, elems.prep,
 916						  MPATH_PREP);
 917		if (path_metric)
 918			hwmp_prep_frame_process(sdata, mgmt, elems.prep,
 919						path_metric);
 920	}
 921	if (elems.perr) {
 922		if (elems.perr_len != 15)
 923			/* Right now we support only one destination per PERR */
 924			return;
 925		hwmp_perr_frame_process(sdata, mgmt, elems.perr);
 926	}
 927	if (elems.rann)
 928		hwmp_rann_frame_process(sdata, mgmt, elems.rann);
 929}
 930
 931/**
 932 * mesh_queue_preq - queue a PREQ to a given destination
 933 *
 934 * @mpath: mesh path to discover
 935 * @flags: special attributes of the PREQ to be sent
 936 *
 937 * Locking: the function must be called from within a rcu read lock block.
 938 *
 939 */
 940static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
 941{
 942	struct ieee80211_sub_if_data *sdata = mpath->sdata;
 943	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 944	struct mesh_preq_queue *preq_node;
 945
 946	preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
 947	if (!preq_node) {
 948		mhwmp_dbg(sdata, "could not allocate PREQ node\n");
 949		return;
 950	}
 951
 952	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
 953	if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
 954		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
 955		kfree(preq_node);
 956		if (printk_ratelimit())
 957			mhwmp_dbg(sdata, "PREQ node queue full\n");
 958		return;
 959	}
 960
 961	spin_lock(&mpath->state_lock);
 962	if (mpath->flags & MESH_PATH_REQ_QUEUED) {
 963		spin_unlock(&mpath->state_lock);
 964		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
 965		kfree(preq_node);
 966		return;
 967	}
 968
 969	memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
 970	preq_node->flags = flags;
 971
 972	mpath->flags |= MESH_PATH_REQ_QUEUED;
 973	spin_unlock(&mpath->state_lock);
 974
 975	list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
 976	++ifmsh->preq_queue_len;
 977	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
 978
 979	if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
 980		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 981
 982	else if (time_before(jiffies, ifmsh->last_preq)) {
 983		/* avoid long wait if did not send preqs for a long time
 984		 * and jiffies wrapped around
 985		 */
 986		ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
 987		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 988	} else
 989		mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
 990						min_preq_int_jiff(sdata));
 991}
 992
 993/**
 994 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
 995 *
 996 * @sdata: local mesh subif
 997 */
 998void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
 999{
1000	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1001	struct mesh_preq_queue *preq_node;
1002	struct mesh_path *mpath;
1003	u8 ttl, target_flags = 0;
1004	const u8 *da;
1005	u32 lifetime;
1006
1007	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
1008	if (!ifmsh->preq_queue_len ||
1009		time_before(jiffies, ifmsh->last_preq +
1010				min_preq_int_jiff(sdata))) {
1011		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1012		return;
1013	}
1014
1015	preq_node = list_first_entry(&ifmsh->preq_queue.list,
1016			struct mesh_preq_queue, list);
1017	list_del(&preq_node->list);
1018	--ifmsh->preq_queue_len;
1019	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1020
1021	rcu_read_lock();
1022	mpath = mesh_path_lookup(sdata, preq_node->dst);
1023	if (!mpath)
1024		goto enddiscovery;
1025
1026	spin_lock_bh(&mpath->state_lock);
1027	if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) {
1028		spin_unlock_bh(&mpath->state_lock);
1029		goto enddiscovery;
1030	}
1031	mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1032	if (preq_node->flags & PREQ_Q_F_START) {
1033		if (mpath->flags & MESH_PATH_RESOLVING) {
1034			spin_unlock_bh(&mpath->state_lock);
1035			goto enddiscovery;
1036		} else {
1037			mpath->flags &= ~MESH_PATH_RESOLVED;
1038			mpath->flags |= MESH_PATH_RESOLVING;
1039			mpath->discovery_retries = 0;
1040			mpath->discovery_timeout = disc_timeout_jiff(sdata);
1041		}
1042	} else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
1043			mpath->flags & MESH_PATH_RESOLVED) {
1044		mpath->flags &= ~MESH_PATH_RESOLVING;
1045		spin_unlock_bh(&mpath->state_lock);
1046		goto enddiscovery;
1047	}
1048
1049	ifmsh->last_preq = jiffies;
1050
1051	if (time_after(jiffies, ifmsh->last_sn_update +
1052				net_traversal_jiffies(sdata)) ||
1053	    time_before(jiffies, ifmsh->last_sn_update)) {
1054		++ifmsh->sn;
1055		sdata->u.mesh.last_sn_update = jiffies;
1056	}
1057	lifetime = default_lifetime(sdata);
1058	ttl = sdata->u.mesh.mshcfg.element_ttl;
1059	if (ttl == 0) {
1060		sdata->u.mesh.mshstats.dropped_frames_ttl++;
1061		spin_unlock_bh(&mpath->state_lock);
1062		goto enddiscovery;
1063	}
1064
1065	if (preq_node->flags & PREQ_Q_F_REFRESH)
1066		target_flags |= IEEE80211_PREQ_TO_FLAG;
1067	else
1068		target_flags &= ~IEEE80211_PREQ_TO_FLAG;
1069
1070	spin_unlock_bh(&mpath->state_lock);
1071	da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
1072	mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
1073			       target_flags, mpath->dst, mpath->sn, da, 0,
1074			       ttl, lifetime, 0, ifmsh->preq_id++, sdata);
1075	mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
1076
1077enddiscovery:
1078	rcu_read_unlock();
1079	kfree(preq_node);
1080}
1081
1082/**
1083 * mesh_nexthop_resolve - lookup next hop; conditionally start path discovery
1084 *
1085 * @skb: 802.11 frame to be sent
1086 * @sdata: network subif the frame will be sent through
1087 *
1088 * Lookup next hop for given skb and start path discovery if no
1089 * forwarding information is found.
1090 *
1091 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
1092 * skb is freeed here if no mpath could be allocated.
1093 */
1094int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
1095			 struct sk_buff *skb)
1096{
1097	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1098	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1099	struct mesh_path *mpath;
1100	struct sk_buff *skb_to_free = NULL;
1101	u8 *target_addr = hdr->addr3;
1102	int err = 0;
1103
1104	/* Nulls are only sent to peers for PS and should be pre-addressed */
1105	if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1106		return 0;
1107
1108	rcu_read_lock();
1109	err = mesh_nexthop_lookup(sdata, skb);
1110	if (!err)
1111		goto endlookup;
 
 
1112
1113	/* no nexthop found, start resolving */
1114	mpath = mesh_path_lookup(sdata, target_addr);
1115	if (!mpath) {
1116		mpath = mesh_path_add(sdata, target_addr);
1117		if (IS_ERR(mpath)) {
1118			mesh_path_discard_frame(sdata, skb);
1119			err = PTR_ERR(mpath);
1120			goto endlookup;
1121		}
1122	}
1123
1124	if (!(mpath->flags & MESH_PATH_RESOLVING))
1125		mesh_queue_preq(mpath, PREQ_Q_F_START);
1126
1127	if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
1128		skb_to_free = skb_dequeue(&mpath->frame_queue);
1129
1130	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1131	ieee80211_set_qos_hdr(sdata, skb);
1132	skb_queue_tail(&mpath->frame_queue, skb);
1133	err = -ENOENT;
1134	if (skb_to_free)
1135		mesh_path_discard_frame(sdata, skb_to_free);
1136
1137endlookup:
1138	rcu_read_unlock();
1139	return err;
1140}
1141
1142/**
1143 * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
1144 * this function is considered "using" the associated mpath, so preempt a path
1145 * refresh if this mpath expires soon.
1146 *
1147 * @skb: 802.11 frame to be sent
1148 * @sdata: network subif the frame will be sent through
1149 *
1150 * Returns: 0 if the next hop was found. Nonzero otherwise.
1151 */
1152int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata,
1153			struct sk_buff *skb)
1154{
1155	struct mesh_path *mpath;
1156	struct sta_info *next_hop;
1157	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1158	u8 *target_addr = hdr->addr3;
1159	int err = -ENOENT;
1160
1161	rcu_read_lock();
1162	mpath = mesh_path_lookup(sdata, target_addr);
1163
1164	if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
1165		goto endlookup;
1166
1167	if (time_after(jiffies,
1168		       mpath->exp_time -
1169		       msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
1170	    ether_addr_equal(sdata->vif.addr, hdr->addr4) &&
1171	    !(mpath->flags & MESH_PATH_RESOLVING) &&
1172	    !(mpath->flags & MESH_PATH_FIXED))
1173		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
1174
1175	next_hop = rcu_dereference(mpath->next_hop);
1176	if (next_hop) {
1177		memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
1178		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
1179		ieee80211_mps_set_frame_flags(sdata, next_hop, hdr);
1180		err = 0;
1181	}
1182
1183endlookup:
1184	rcu_read_unlock();
1185	return err;
1186}
1187
1188void mesh_path_timer(unsigned long data)
1189{
1190	struct mesh_path *mpath = (void *) data;
1191	struct ieee80211_sub_if_data *sdata = mpath->sdata;
1192	int ret;
1193
1194	if (sdata->local->quiescing)
1195		return;
1196
1197	spin_lock_bh(&mpath->state_lock);
1198	if (mpath->flags & MESH_PATH_RESOLVED ||
1199			(!(mpath->flags & MESH_PATH_RESOLVING))) {
1200		mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
1201		spin_unlock_bh(&mpath->state_lock);
1202	} else if (mpath->discovery_retries < max_preq_retries(sdata)) {
1203		++mpath->discovery_retries;
1204		mpath->discovery_timeout *= 2;
1205		mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1206		spin_unlock_bh(&mpath->state_lock);
1207		mesh_queue_preq(mpath, 0);
1208	} else {
1209		mpath->flags &= ~(MESH_PATH_RESOLVING |
1210				  MESH_PATH_RESOLVED |
1211				  MESH_PATH_REQ_QUEUED);
1212		mpath->exp_time = jiffies;
1213		spin_unlock_bh(&mpath->state_lock);
1214		if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
1215			ret = mesh_path_send_to_gates(mpath);
1216			if (ret)
1217				mhwmp_dbg(sdata, "no gate was reachable\n");
1218		} else
1219			mesh_path_flush_pending(mpath);
1220	}
1221}
1222
1223void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1224{
1225	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1226	u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
1227	u8 flags, target_flags = 0;
1228
1229	flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
1230			? RANN_FLAG_IS_GATE : 0;
1231
1232	switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) {
1233	case IEEE80211_PROACTIVE_RANN:
1234		mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
1235				       ++ifmsh->sn, 0, NULL, 0, broadcast_addr,
1236				       0, ifmsh->mshcfg.element_ttl,
1237				       interval, 0, 0, sdata);
1238		break;
1239	case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
1240		flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
 
1241	case IEEE80211_PROACTIVE_PREQ_NO_PREP:
1242		interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
1243		target_flags |= IEEE80211_PREQ_TO_FLAG |
1244				IEEE80211_PREQ_USN_FLAG;
1245		mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr,
1246				       ++ifmsh->sn, target_flags,
1247				       (u8 *) broadcast_addr, 0, broadcast_addr,
1248				       0, ifmsh->mshcfg.element_ttl, interval,
1249				       0, ifmsh->preq_id++, sdata);
1250		break;
1251	default:
1252		mhwmp_dbg(sdata, "Proactive mechanism not supported\n");
1253		return;
1254	}
1255}
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2008, 2009 open80211s Ltd.
   4 * Copyright (C) 2019 Intel Corporation
   5 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
 
 
 
 
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/etherdevice.h>
  10#include <asm/unaligned.h>
  11#include "wme.h"
  12#include "mesh.h"
  13
  14#define TEST_FRAME_LEN	8192
  15#define MAX_METRIC	0xffffffff
  16#define ARITH_SHIFT	8
  17#define LINK_FAIL_THRESH 95
  18
  19#define MAX_PREQ_QUEUE_LEN	64
  20
  21static void mesh_queue_preq(struct mesh_path *, u8);
  22
  23static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
  24{
  25	if (ae)
  26		offset += 6;
  27	return get_unaligned_le32(preq_elem + offset);
  28}
  29
  30static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae)
  31{
  32	if (ae)
  33		offset += 6;
  34	return get_unaligned_le16(preq_elem + offset);
  35}
  36
  37/* HWMP IE processing macros */
  38#define AE_F			(1<<6)
  39#define AE_F_SET(x)		(*x & AE_F)
  40#define PREQ_IE_FLAGS(x)	(*(x))
  41#define PREQ_IE_HOPCOUNT(x)	(*(x + 1))
  42#define PREQ_IE_TTL(x)		(*(x + 2))
  43#define PREQ_IE_PREQ_ID(x)	u32_field_get(x, 3, 0)
  44#define PREQ_IE_ORIG_ADDR(x)	(x + 7)
  45#define PREQ_IE_ORIG_SN(x)	u32_field_get(x, 13, 0)
  46#define PREQ_IE_LIFETIME(x)	u32_field_get(x, 17, AE_F_SET(x))
  47#define PREQ_IE_METRIC(x) 	u32_field_get(x, 21, AE_F_SET(x))
  48#define PREQ_IE_TARGET_F(x)	(*(AE_F_SET(x) ? x + 32 : x + 26))
  49#define PREQ_IE_TARGET_ADDR(x) 	(AE_F_SET(x) ? x + 33 : x + 27)
  50#define PREQ_IE_TARGET_SN(x) 	u32_field_get(x, 33, AE_F_SET(x))
  51
  52
  53#define PREP_IE_FLAGS(x)	PREQ_IE_FLAGS(x)
  54#define PREP_IE_HOPCOUNT(x)	PREQ_IE_HOPCOUNT(x)
  55#define PREP_IE_TTL(x)		PREQ_IE_TTL(x)
  56#define PREP_IE_ORIG_ADDR(x)	(AE_F_SET(x) ? x + 27 : x + 21)
  57#define PREP_IE_ORIG_SN(x)	u32_field_get(x, 27, AE_F_SET(x))
  58#define PREP_IE_LIFETIME(x)	u32_field_get(x, 13, AE_F_SET(x))
  59#define PREP_IE_METRIC(x)	u32_field_get(x, 17, AE_F_SET(x))
  60#define PREP_IE_TARGET_ADDR(x)	(x + 3)
  61#define PREP_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
  62
  63#define PERR_IE_TTL(x)		(*(x))
  64#define PERR_IE_TARGET_FLAGS(x)	(*(x + 2))
  65#define PERR_IE_TARGET_ADDR(x)	(x + 3)
  66#define PERR_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
  67#define PERR_IE_TARGET_RCODE(x)	u16_field_get(x, 13, 0)
  68
  69#define MSEC_TO_TU(x) (x*1000/1024)
  70#define SN_GT(x, y) ((s32)(y - x) < 0)
  71#define SN_LT(x, y) ((s32)(x - y) < 0)
  72#define MAX_SANE_SN_DELTA 32
  73
  74static inline u32 SN_DELTA(u32 x, u32 y)
  75{
  76	return x >= y ? x - y : y - x;
  77}
  78
  79#define net_traversal_jiffies(s) \
  80	msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
  81#define default_lifetime(s) \
  82	MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
  83#define min_preq_int_jiff(s) \
  84	(msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
  85#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
  86#define disc_timeout_jiff(s) \
  87	msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
  88#define root_path_confirmation_jiffies(s) \
  89	msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval)
  90
  91enum mpath_frame_type {
  92	MPATH_PREQ = 0,
  93	MPATH_PREP,
  94	MPATH_PERR,
  95	MPATH_RANN
  96};
  97
  98static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  99
 100static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
 101				  const u8 *orig_addr, u32 orig_sn,
 102				  u8 target_flags, const u8 *target,
 103				  u32 target_sn, const u8 *da,
 104				  u8 hop_count, u8 ttl,
 105				  u32 lifetime, u32 metric, u32 preq_id,
 106				  struct ieee80211_sub_if_data *sdata)
 107{
 108	struct ieee80211_local *local = sdata->local;
 109	struct sk_buff *skb;
 110	struct ieee80211_mgmt *mgmt;
 111	u8 *pos, ie_len;
 112	int hdr_len = offsetofend(struct ieee80211_mgmt,
 113				  u.action.u.mesh_action);
 114
 115	skb = dev_alloc_skb(local->tx_headroom +
 116			    hdr_len +
 117			    2 + 37); /* max HWMP IE */
 118	if (!skb)
 119		return -1;
 120	skb_reserve(skb, local->tx_headroom);
 121	mgmt = skb_put_zero(skb, hdr_len);
 
 122	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 123					  IEEE80211_STYPE_ACTION);
 124
 125	memcpy(mgmt->da, da, ETH_ALEN);
 126	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
 127	/* BSSID == SA */
 128	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
 129	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
 130	mgmt->u.action.u.mesh_action.action_code =
 131					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
 132
 133	switch (action) {
 134	case MPATH_PREQ:
 135		mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
 136		ie_len = 37;
 137		pos = skb_put(skb, 2 + ie_len);
 138		*pos++ = WLAN_EID_PREQ;
 139		break;
 140	case MPATH_PREP:
 141		mhwmp_dbg(sdata, "sending PREP to %pM\n", orig_addr);
 142		ie_len = 31;
 143		pos = skb_put(skb, 2 + ie_len);
 144		*pos++ = WLAN_EID_PREP;
 145		break;
 146	case MPATH_RANN:
 147		mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
 148		ie_len = sizeof(struct ieee80211_rann_ie);
 149		pos = skb_put(skb, 2 + ie_len);
 150		*pos++ = WLAN_EID_RANN;
 151		break;
 152	default:
 153		kfree_skb(skb);
 154		return -ENOTSUPP;
 155	}
 156	*pos++ = ie_len;
 157	*pos++ = flags;
 158	*pos++ = hop_count;
 159	*pos++ = ttl;
 160	if (action == MPATH_PREP) {
 161		memcpy(pos, target, ETH_ALEN);
 162		pos += ETH_ALEN;
 163		put_unaligned_le32(target_sn, pos);
 164		pos += 4;
 165	} else {
 166		if (action == MPATH_PREQ) {
 167			put_unaligned_le32(preq_id, pos);
 168			pos += 4;
 169		}
 170		memcpy(pos, orig_addr, ETH_ALEN);
 171		pos += ETH_ALEN;
 172		put_unaligned_le32(orig_sn, pos);
 173		pos += 4;
 174	}
 175	put_unaligned_le32(lifetime, pos); /* interval for RANN */
 176	pos += 4;
 177	put_unaligned_le32(metric, pos);
 178	pos += 4;
 179	if (action == MPATH_PREQ) {
 180		*pos++ = 1; /* destination count */
 181		*pos++ = target_flags;
 182		memcpy(pos, target, ETH_ALEN);
 183		pos += ETH_ALEN;
 184		put_unaligned_le32(target_sn, pos);
 185		pos += 4;
 186	} else if (action == MPATH_PREP) {
 187		memcpy(pos, orig_addr, ETH_ALEN);
 188		pos += ETH_ALEN;
 189		put_unaligned_le32(orig_sn, pos);
 190		pos += 4;
 191	}
 192
 193	ieee80211_tx_skb(sdata, skb);
 194	return 0;
 195}
 196
 197
 198/*  Headroom is not adjusted.  Caller should ensure that skb has sufficient
 199 *  headroom in case the frame is encrypted. */
 200static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
 201		struct sk_buff *skb)
 202{
 203	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 204	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 205
 206	skb_reset_mac_header(skb);
 207	skb_reset_network_header(skb);
 208	skb_reset_transport_header(skb);
 209
 210	/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
 211	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
 212	skb->priority = 7;
 213
 214	info->control.vif = &sdata->vif;
 215	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
 216	ieee80211_set_qos_hdr(sdata, skb);
 217	ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
 218}
 219
 220/**
 221 * mesh_path_error_tx - Sends a PERR mesh management frame
 222 *
 223 * @ttl: allowed remaining hops
 224 * @target: broken destination
 225 * @target_sn: SN of the broken destination
 226 * @target_rcode: reason code for this PERR
 227 * @ra: node this frame is addressed to
 228 * @sdata: local mesh subif
 229 *
 230 * Note: This function may be called with driver locks taken that the driver
 231 * also acquires in the TX path.  To avoid a deadlock we don't transmit the
 232 * frame directly but add it to the pending queue instead.
 233 */
 234int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
 235		       u8 ttl, const u8 *target, u32 target_sn,
 236		       u16 target_rcode, const u8 *ra)
 237{
 238	struct ieee80211_local *local = sdata->local;
 239	struct sk_buff *skb;
 240	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 241	struct ieee80211_mgmt *mgmt;
 242	u8 *pos, ie_len;
 243	int hdr_len = offsetofend(struct ieee80211_mgmt,
 244				  u.action.u.mesh_action);
 245
 246	if (time_before(jiffies, ifmsh->next_perr))
 247		return -EAGAIN;
 248
 249	skb = dev_alloc_skb(local->tx_headroom +
 250			    sdata->encrypt_headroom +
 251			    IEEE80211_ENCRYPT_TAILROOM +
 252			    hdr_len +
 253			    2 + 15 /* PERR IE */);
 254	if (!skb)
 255		return -1;
 256	skb_reserve(skb, local->tx_headroom + sdata->encrypt_headroom);
 257	mgmt = skb_put_zero(skb, hdr_len);
 
 258	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 259					  IEEE80211_STYPE_ACTION);
 260
 261	memcpy(mgmt->da, ra, ETH_ALEN);
 262	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
 263	/* BSSID == SA */
 264	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
 265	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
 266	mgmt->u.action.u.mesh_action.action_code =
 267					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
 268	ie_len = 15;
 269	pos = skb_put(skb, 2 + ie_len);
 270	*pos++ = WLAN_EID_PERR;
 271	*pos++ = ie_len;
 272	/* ttl */
 273	*pos++ = ttl;
 274	/* number of destinations */
 275	*pos++ = 1;
 276	/* Flags field has AE bit only as defined in
 277	 * sec 8.4.2.117 IEEE802.11-2012
 278	 */
 279	*pos = 0;
 280	pos++;
 281	memcpy(pos, target, ETH_ALEN);
 282	pos += ETH_ALEN;
 283	put_unaligned_le32(target_sn, pos);
 284	pos += 4;
 285	put_unaligned_le16(target_rcode, pos);
 286
 287	/* see note in function header */
 288	prepare_frame_for_deferred_tx(sdata, skb);
 289	ifmsh->next_perr = TU_TO_EXP_TIME(
 290				   ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
 291	ieee80211_add_pending_skb(local, skb);
 292	return 0;
 293}
 294
 295void ieee80211s_update_metric(struct ieee80211_local *local,
 296			      struct sta_info *sta,
 297			      struct ieee80211_tx_status *st)
 298{
 299	struct ieee80211_tx_info *txinfo = st->info;
 
 300	int failed;
 301	struct rate_info rinfo;
 
 
 302
 303	failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
 304
 305	/* moving average, scaled to 100.
 306	 * feed failure as 100 and success as 0
 307	 */
 308	ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, failed * 100);
 309	if (ewma_mesh_fail_avg_read(&sta->mesh->fail_avg) >
 310			LINK_FAIL_THRESH)
 311		mesh_plink_broken(sta);
 312
 313	sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo);
 314	ewma_mesh_tx_rate_avg_add(&sta->mesh->tx_rate_avg,
 315				  cfg80211_calculate_bitrate(&rinfo));
 316}
 317
 318u32 airtime_link_metric_get(struct ieee80211_local *local,
 319			    struct sta_info *sta)
 320{
 
 321	/* This should be adjusted for each device */
 322	int device_constant = 1 << ARITH_SHIFT;
 323	int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
 324	int s_unit = 1 << ARITH_SHIFT;
 325	int rate, err;
 326	u32 tx_time, estimated_retx;
 327	u64 result;
 328	unsigned long fail_avg =
 329		ewma_mesh_fail_avg_read(&sta->mesh->fail_avg);
 330
 331	/* Try to get rate based on HW/SW RC algorithm.
 332	 * Rate is returned in units of Kbps, correct this
 333	 * to comply with airtime calculation units
 334	 * Round up in case we get rate < 100Kbps
 335	 */
 336	rate = DIV_ROUND_UP(sta_get_expected_throughput(sta), 100);
 337
 338	if (rate) {
 339		err = 0;
 340	} else {
 341		if (fail_avg > LINK_FAIL_THRESH)
 342			return MAX_METRIC;
 343
 344		rate = ewma_mesh_tx_rate_avg_read(&sta->mesh->tx_rate_avg);
 
 345		if (WARN_ON(!rate))
 346			return MAX_METRIC;
 347
 348		err = (fail_avg << ARITH_SHIFT) / 100;
 349	}
 350
 351	/* bitrate is in units of 100 Kbps, while we need rate in units of
 352	 * 1Mbps. This will be corrected on tx_time computation.
 353	 */
 354	tx_time = (device_constant + 10 * test_frame_len / rate);
 355	estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
 356	result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
 357	return (u32)result;
 358}
 359
 360/**
 361 * hwmp_route_info_get - Update routing info to originator and transmitter
 362 *
 363 * @sdata: local mesh subif
 364 * @mgmt: mesh management frame
 365 * @hwmp_ie: hwmp information element (PREP or PREQ)
 366 * @action: type of hwmp ie
 367 *
 368 * This function updates the path routing information to the originator and the
 369 * transmitter of a HWMP PREQ or PREP frame.
 370 *
 371 * Returns: metric to frame originator or 0 if the frame should not be further
 372 * processed
 373 *
 374 * Notes: this function is the only place (besides user-provided info) where
 375 * path routing information is updated.
 376 */
 377static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
 378			       struct ieee80211_mgmt *mgmt,
 379			       const u8 *hwmp_ie, enum mpath_frame_type action)
 380{
 381	struct ieee80211_local *local = sdata->local;
 382	struct mesh_path *mpath;
 383	struct sta_info *sta;
 384	bool fresh_info;
 385	const u8 *orig_addr, *ta;
 386	u32 orig_sn, orig_metric;
 387	unsigned long orig_lifetime, exp_time;
 388	u32 last_hop_metric, new_metric;
 389	bool process = true;
 390	u8 hopcount;
 391
 392	rcu_read_lock();
 393	sta = sta_info_get(sdata, mgmt->sa);
 394	if (!sta) {
 395		rcu_read_unlock();
 396		return 0;
 397	}
 398
 399	last_hop_metric = airtime_link_metric_get(local, sta);
 400	/* Update and check originator routing info */
 401	fresh_info = true;
 402
 403	switch (action) {
 404	case MPATH_PREQ:
 405		orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
 406		orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
 407		orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
 408		orig_metric = PREQ_IE_METRIC(hwmp_ie);
 409		hopcount = PREQ_IE_HOPCOUNT(hwmp_ie) + 1;
 410		break;
 411	case MPATH_PREP:
 412		/* Originator here refers to the MP that was the target in the
 413		 * Path Request. We divert from the nomenclature in the draft
 414		 * so that we can easily use a single function to gather path
 415		 * information from both PREQ and PREP frames.
 416		 */
 417		orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
 418		orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
 419		orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
 420		orig_metric = PREP_IE_METRIC(hwmp_ie);
 421		hopcount = PREP_IE_HOPCOUNT(hwmp_ie) + 1;
 422		break;
 423	default:
 424		rcu_read_unlock();
 425		return 0;
 426	}
 427	new_metric = orig_metric + last_hop_metric;
 428	if (new_metric < orig_metric)
 429		new_metric = MAX_METRIC;
 430	exp_time = TU_TO_EXP_TIME(orig_lifetime);
 431
 432	if (ether_addr_equal(orig_addr, sdata->vif.addr)) {
 433		/* This MP is the originator, we are not interested in this
 434		 * frame, except for updating transmitter's path info.
 435		 */
 436		process = false;
 437		fresh_info = false;
 438	} else {
 439		mpath = mesh_path_lookup(sdata, orig_addr);
 440		if (mpath) {
 441			spin_lock_bh(&mpath->state_lock);
 442			if (mpath->flags & MESH_PATH_FIXED)
 443				fresh_info = false;
 444			else if ((mpath->flags & MESH_PATH_ACTIVE) &&
 445			    (mpath->flags & MESH_PATH_SN_VALID)) {
 446				if (SN_GT(mpath->sn, orig_sn) ||
 447				    (mpath->sn == orig_sn &&
 448				     (rcu_access_pointer(mpath->next_hop) !=
 449						      sta ?
 450					      mult_frac(new_metric, 10, 9) :
 451					      new_metric) >= mpath->metric)) {
 452					process = false;
 453					fresh_info = false;
 454				}
 455			} else if (!(mpath->flags & MESH_PATH_ACTIVE)) {
 456				bool have_sn, newer_sn, bounced;
 457
 458				have_sn = mpath->flags & MESH_PATH_SN_VALID;
 459				newer_sn = have_sn && SN_GT(orig_sn, mpath->sn);
 460				bounced = have_sn &&
 461					  (SN_DELTA(orig_sn, mpath->sn) >
 462							MAX_SANE_SN_DELTA);
 463
 464				if (!have_sn || newer_sn) {
 465					/* if SN is newer than what we had
 466					 * then we can take it */;
 467				} else if (bounced) {
 468					/* if SN is way different than what
 469					 * we had then assume the other side
 470					 * rebooted or restarted */;
 471				} else {
 472					process = false;
 473					fresh_info = false;
 474				}
 475			}
 476		} else {
 477			mpath = mesh_path_add(sdata, orig_addr);
 478			if (IS_ERR(mpath)) {
 479				rcu_read_unlock();
 480				return 0;
 481			}
 482			spin_lock_bh(&mpath->state_lock);
 483		}
 484
 485		if (fresh_info) {
 486			if (rcu_access_pointer(mpath->next_hop) != sta)
 487				mpath->path_change_count++;
 488			mesh_path_assign_nexthop(mpath, sta);
 489			mpath->flags |= MESH_PATH_SN_VALID;
 490			mpath->metric = new_metric;
 491			mpath->sn = orig_sn;
 492			mpath->exp_time = time_after(mpath->exp_time, exp_time)
 493					  ?  mpath->exp_time : exp_time;
 494			mpath->hop_count = hopcount;
 495			mesh_path_activate(mpath);
 496			spin_unlock_bh(&mpath->state_lock);
 497			ewma_mesh_fail_avg_init(&sta->mesh->fail_avg);
 498			/* init it at a low value - 0 start is tricky */
 499			ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1);
 500			mesh_path_tx_pending(mpath);
 501			/* draft says preq_id should be saved to, but there does
 502			 * not seem to be any use for it, skipping by now
 503			 */
 504		} else
 505			spin_unlock_bh(&mpath->state_lock);
 506	}
 507
 508	/* Update and check transmitter routing info */
 509	ta = mgmt->sa;
 510	if (ether_addr_equal(orig_addr, ta))
 511		fresh_info = false;
 512	else {
 513		fresh_info = true;
 514
 515		mpath = mesh_path_lookup(sdata, ta);
 516		if (mpath) {
 517			spin_lock_bh(&mpath->state_lock);
 518			if ((mpath->flags & MESH_PATH_FIXED) ||
 519			    ((mpath->flags & MESH_PATH_ACTIVE) &&
 520			     ((rcu_access_pointer(mpath->next_hop) != sta ?
 521				       mult_frac(last_hop_metric, 10, 9) :
 522				       last_hop_metric) > mpath->metric)))
 523				fresh_info = false;
 524		} else {
 525			mpath = mesh_path_add(sdata, ta);
 526			if (IS_ERR(mpath)) {
 527				rcu_read_unlock();
 528				return 0;
 529			}
 530			spin_lock_bh(&mpath->state_lock);
 531		}
 532
 533		if (fresh_info) {
 534			if (rcu_access_pointer(mpath->next_hop) != sta)
 535				mpath->path_change_count++;
 536			mesh_path_assign_nexthop(mpath, sta);
 537			mpath->metric = last_hop_metric;
 538			mpath->exp_time = time_after(mpath->exp_time, exp_time)
 539					  ?  mpath->exp_time : exp_time;
 540			mpath->hop_count = 1;
 541			mesh_path_activate(mpath);
 542			spin_unlock_bh(&mpath->state_lock);
 543			ewma_mesh_fail_avg_init(&sta->mesh->fail_avg);
 544			/* init it at a low value - 0 start is tricky */
 545			ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1);
 546			mesh_path_tx_pending(mpath);
 547		} else
 548			spin_unlock_bh(&mpath->state_lock);
 549	}
 550
 551	rcu_read_unlock();
 552
 553	return process ? new_metric : 0;
 554}
 555
 556static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
 557				    struct ieee80211_mgmt *mgmt,
 558				    const u8 *preq_elem, u32 orig_metric)
 559{
 560	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 561	struct mesh_path *mpath = NULL;
 562	const u8 *target_addr, *orig_addr;
 563	const u8 *da;
 564	u8 target_flags, ttl, flags;
 565	u32 orig_sn, target_sn, lifetime, target_metric = 0;
 566	bool reply = false;
 567	bool forward = true;
 568	bool root_is_gate;
 569
 570	/* Update target SN, if present */
 571	target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
 572	orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
 573	target_sn = PREQ_IE_TARGET_SN(preq_elem);
 574	orig_sn = PREQ_IE_ORIG_SN(preq_elem);
 575	target_flags = PREQ_IE_TARGET_F(preq_elem);
 576	/* Proactive PREQ gate announcements */
 577	flags = PREQ_IE_FLAGS(preq_elem);
 578	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
 579
 580	mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr);
 581
 582	if (ether_addr_equal(target_addr, sdata->vif.addr)) {
 583		mhwmp_dbg(sdata, "PREQ is for us\n");
 584		forward = false;
 585		reply = true;
 586		target_metric = 0;
 587
 588		if (SN_GT(target_sn, ifmsh->sn))
 589			ifmsh->sn = target_sn;
 590
 591		if (time_after(jiffies, ifmsh->last_sn_update +
 592					net_traversal_jiffies(sdata)) ||
 593		    time_before(jiffies, ifmsh->last_sn_update)) {
 594			++ifmsh->sn;
 595			ifmsh->last_sn_update = jiffies;
 596		}
 597		target_sn = ifmsh->sn;
 598	} else if (is_broadcast_ether_addr(target_addr) &&
 599		   (target_flags & IEEE80211_PREQ_TO_FLAG)) {
 600		rcu_read_lock();
 601		mpath = mesh_path_lookup(sdata, orig_addr);
 602		if (mpath) {
 603			if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
 604				reply = true;
 605				target_addr = sdata->vif.addr;
 606				target_sn = ++ifmsh->sn;
 607				target_metric = 0;
 608				ifmsh->last_sn_update = jiffies;
 609			}
 610			if (root_is_gate)
 611				mesh_path_add_gate(mpath);
 612		}
 613		rcu_read_unlock();
 614	} else {
 615		rcu_read_lock();
 616		mpath = mesh_path_lookup(sdata, target_addr);
 617		if (mpath) {
 618			if ((!(mpath->flags & MESH_PATH_SN_VALID)) ||
 619					SN_LT(mpath->sn, target_sn)) {
 620				mpath->sn = target_sn;
 621				mpath->flags |= MESH_PATH_SN_VALID;
 622			} else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) &&
 623					(mpath->flags & MESH_PATH_ACTIVE)) {
 624				reply = true;
 625				target_metric = mpath->metric;
 626				target_sn = mpath->sn;
 627				/* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/
 628				target_flags |= IEEE80211_PREQ_TO_FLAG;
 629			}
 630		}
 631		rcu_read_unlock();
 632	}
 633
 634	if (reply) {
 635		lifetime = PREQ_IE_LIFETIME(preq_elem);
 636		ttl = ifmsh->mshcfg.element_ttl;
 637		if (ttl != 0) {
 638			mhwmp_dbg(sdata, "replying to the PREQ\n");
 639			mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
 640					       orig_sn, 0, target_addr,
 641					       target_sn, mgmt->sa, 0, ttl,
 642					       lifetime, target_metric, 0,
 643					       sdata);
 644		} else {
 645			ifmsh->mshstats.dropped_frames_ttl++;
 646		}
 647	}
 648
 649	if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
 650		u32 preq_id;
 651		u8 hopcount;
 652
 653		ttl = PREQ_IE_TTL(preq_elem);
 654		lifetime = PREQ_IE_LIFETIME(preq_elem);
 655		if (ttl <= 1) {
 656			ifmsh->mshstats.dropped_frames_ttl++;
 657			return;
 658		}
 659		mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr);
 660		--ttl;
 661		preq_id = PREQ_IE_PREQ_ID(preq_elem);
 662		hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
 663		da = (mpath && mpath->is_root) ?
 664			mpath->rann_snd_addr : broadcast_addr;
 665
 666		if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
 667			target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
 668			target_sn = PREQ_IE_TARGET_SN(preq_elem);
 669		}
 670
 671		mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
 672				       orig_sn, target_flags, target_addr,
 673				       target_sn, da, hopcount, ttl, lifetime,
 674				       orig_metric, preq_id, sdata);
 675		if (!is_multicast_ether_addr(da))
 676			ifmsh->mshstats.fwded_unicast++;
 677		else
 678			ifmsh->mshstats.fwded_mcast++;
 679		ifmsh->mshstats.fwded_frames++;
 680	}
 681}
 682
 683
 684static inline struct sta_info *
 685next_hop_deref_protected(struct mesh_path *mpath)
 686{
 687	return rcu_dereference_protected(mpath->next_hop,
 688					 lockdep_is_held(&mpath->state_lock));
 689}
 690
 691
 692static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
 693				    struct ieee80211_mgmt *mgmt,
 694				    const u8 *prep_elem, u32 metric)
 695{
 696	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 697	struct mesh_path *mpath;
 698	const u8 *target_addr, *orig_addr;
 699	u8 ttl, hopcount, flags;
 700	u8 next_hop[ETH_ALEN];
 701	u32 target_sn, orig_sn, lifetime;
 702
 703	mhwmp_dbg(sdata, "received PREP from %pM\n",
 704		  PREP_IE_TARGET_ADDR(prep_elem));
 705
 706	orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
 707	if (ether_addr_equal(orig_addr, sdata->vif.addr))
 708		/* destination, no forwarding required */
 709		return;
 710
 711	if (!ifmsh->mshcfg.dot11MeshForwarding)
 712		return;
 713
 714	ttl = PREP_IE_TTL(prep_elem);
 715	if (ttl <= 1) {
 716		sdata->u.mesh.mshstats.dropped_frames_ttl++;
 717		return;
 718	}
 719
 720	rcu_read_lock();
 721	mpath = mesh_path_lookup(sdata, orig_addr);
 722	if (mpath)
 723		spin_lock_bh(&mpath->state_lock);
 724	else
 725		goto fail;
 726	if (!(mpath->flags & MESH_PATH_ACTIVE)) {
 727		spin_unlock_bh(&mpath->state_lock);
 728		goto fail;
 729	}
 730	memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
 731	spin_unlock_bh(&mpath->state_lock);
 732	--ttl;
 733	flags = PREP_IE_FLAGS(prep_elem);
 734	lifetime = PREP_IE_LIFETIME(prep_elem);
 735	hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
 736	target_addr = PREP_IE_TARGET_ADDR(prep_elem);
 737	target_sn = PREP_IE_TARGET_SN(prep_elem);
 738	orig_sn = PREP_IE_ORIG_SN(prep_elem);
 739
 740	mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, orig_sn, 0,
 741			       target_addr, target_sn, next_hop, hopcount,
 742			       ttl, lifetime, metric, 0, sdata);
 743	rcu_read_unlock();
 744
 745	sdata->u.mesh.mshstats.fwded_unicast++;
 746	sdata->u.mesh.mshstats.fwded_frames++;
 747	return;
 748
 749fail:
 750	rcu_read_unlock();
 751	sdata->u.mesh.mshstats.dropped_frames_no_route++;
 752}
 753
 754static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
 755				    struct ieee80211_mgmt *mgmt,
 756				    const u8 *perr_elem)
 757{
 758	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 759	struct mesh_path *mpath;
 760	u8 ttl;
 761	const u8 *ta, *target_addr;
 762	u32 target_sn;
 763	u16 target_rcode;
 764
 765	ta = mgmt->sa;
 766	ttl = PERR_IE_TTL(perr_elem);
 767	if (ttl <= 1) {
 768		ifmsh->mshstats.dropped_frames_ttl++;
 769		return;
 770	}
 771	ttl--;
 772	target_addr = PERR_IE_TARGET_ADDR(perr_elem);
 773	target_sn = PERR_IE_TARGET_SN(perr_elem);
 774	target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
 775
 776	rcu_read_lock();
 777	mpath = mesh_path_lookup(sdata, target_addr);
 778	if (mpath) {
 779		struct sta_info *sta;
 780
 781		spin_lock_bh(&mpath->state_lock);
 782		sta = next_hop_deref_protected(mpath);
 783		if (mpath->flags & MESH_PATH_ACTIVE &&
 784		    ether_addr_equal(ta, sta->sta.addr) &&
 785		    !(mpath->flags & MESH_PATH_FIXED) &&
 786		    (!(mpath->flags & MESH_PATH_SN_VALID) ||
 787		    SN_GT(target_sn, mpath->sn)  || target_sn == 0)) {
 788			mpath->flags &= ~MESH_PATH_ACTIVE;
 789			if (target_sn != 0)
 790				mpath->sn = target_sn;
 791			else
 792				mpath->sn += 1;
 793			spin_unlock_bh(&mpath->state_lock);
 794			if (!ifmsh->mshcfg.dot11MeshForwarding)
 795				goto endperr;
 796			mesh_path_error_tx(sdata, ttl, target_addr,
 797					   target_sn, target_rcode,
 798					   broadcast_addr);
 799		} else
 800			spin_unlock_bh(&mpath->state_lock);
 801	}
 802endperr:
 803	rcu_read_unlock();
 804}
 805
 806static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
 807				    struct ieee80211_mgmt *mgmt,
 808				    const struct ieee80211_rann_ie *rann)
 809{
 810	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 811	struct ieee80211_local *local = sdata->local;
 812	struct sta_info *sta;
 813	struct mesh_path *mpath;
 814	u8 ttl, flags, hopcount;
 815	const u8 *orig_addr;
 816	u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
 817	bool root_is_gate;
 818
 819	ttl = rann->rann_ttl;
 820	flags = rann->rann_flags;
 821	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
 822	orig_addr = rann->rann_addr;
 823	orig_sn = le32_to_cpu(rann->rann_seq);
 824	interval = le32_to_cpu(rann->rann_interval);
 825	hopcount = rann->rann_hopcount;
 826	hopcount++;
 827	orig_metric = le32_to_cpu(rann->rann_metric);
 828
 829	/*  Ignore our own RANNs */
 830	if (ether_addr_equal(orig_addr, sdata->vif.addr))
 831		return;
 832
 833	mhwmp_dbg(sdata,
 834		  "received RANN from %pM via neighbour %pM (is_gate=%d)\n",
 835		  orig_addr, mgmt->sa, root_is_gate);
 836
 837	rcu_read_lock();
 838	sta = sta_info_get(sdata, mgmt->sa);
 839	if (!sta) {
 840		rcu_read_unlock();
 841		return;
 842	}
 843
 844	last_hop_metric = airtime_link_metric_get(local, sta);
 845	new_metric = orig_metric + last_hop_metric;
 846	if (new_metric < orig_metric)
 847		new_metric = MAX_METRIC;
 848
 849	mpath = mesh_path_lookup(sdata, orig_addr);
 850	if (!mpath) {
 851		mpath = mesh_path_add(sdata, orig_addr);
 852		if (IS_ERR(mpath)) {
 853			rcu_read_unlock();
 854			sdata->u.mesh.mshstats.dropped_frames_no_route++;
 855			return;
 856		}
 857	}
 858
 859	if (!(SN_LT(mpath->sn, orig_sn)) &&
 860	    !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
 861		rcu_read_unlock();
 862		return;
 863	}
 864
 865	if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
 866	     (time_after(jiffies, mpath->last_preq_to_root +
 867				  root_path_confirmation_jiffies(sdata)) ||
 868	     time_before(jiffies, mpath->last_preq_to_root))) &&
 869	     !(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) {
 870		mhwmp_dbg(sdata,
 871			  "time to refresh root mpath %pM\n",
 872			  orig_addr);
 873		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
 874		mpath->last_preq_to_root = jiffies;
 875	}
 876
 877	mpath->sn = orig_sn;
 878	mpath->rann_metric = new_metric;
 879	mpath->is_root = true;
 880	/* Recording RANNs sender address to send individually
 881	 * addressed PREQs destined for root mesh STA */
 882	memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
 883
 884	if (root_is_gate)
 885		mesh_path_add_gate(mpath);
 886
 887	if (ttl <= 1) {
 888		ifmsh->mshstats.dropped_frames_ttl++;
 889		rcu_read_unlock();
 890		return;
 891	}
 892	ttl--;
 893
 894	if (ifmsh->mshcfg.dot11MeshForwarding) {
 895		mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
 896				       orig_sn, 0, NULL, 0, broadcast_addr,
 897				       hopcount, ttl, interval,
 898				       new_metric, 0, sdata);
 899	}
 900
 901	rcu_read_unlock();
 902}
 903
 904
 905void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
 906			    struct ieee80211_mgmt *mgmt, size_t len)
 907{
 908	struct ieee802_11_elems elems;
 909	size_t baselen;
 910	u32 path_metric;
 911	struct sta_info *sta;
 912
 913	/* need action_code */
 914	if (len < IEEE80211_MIN_ACTION_SIZE + 1)
 915		return;
 916
 917	rcu_read_lock();
 918	sta = sta_info_get(sdata, mgmt->sa);
 919	if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
 920		rcu_read_unlock();
 921		return;
 922	}
 923	rcu_read_unlock();
 924
 925	baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
 926	ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
 927			       len - baselen, false, &elems, mgmt->bssid, NULL);
 928
 929	if (elems.preq) {
 930		if (elems.preq_len != 37)
 931			/* Right now we support just 1 destination and no AE */
 932			return;
 933		path_metric = hwmp_route_info_get(sdata, mgmt, elems.preq,
 934						  MPATH_PREQ);
 935		if (path_metric)
 936			hwmp_preq_frame_process(sdata, mgmt, elems.preq,
 937						path_metric);
 938	}
 939	if (elems.prep) {
 940		if (elems.prep_len != 31)
 941			/* Right now we support no AE */
 942			return;
 943		path_metric = hwmp_route_info_get(sdata, mgmt, elems.prep,
 944						  MPATH_PREP);
 945		if (path_metric)
 946			hwmp_prep_frame_process(sdata, mgmt, elems.prep,
 947						path_metric);
 948	}
 949	if (elems.perr) {
 950		if (elems.perr_len != 15)
 951			/* Right now we support only one destination per PERR */
 952			return;
 953		hwmp_perr_frame_process(sdata, mgmt, elems.perr);
 954	}
 955	if (elems.rann)
 956		hwmp_rann_frame_process(sdata, mgmt, elems.rann);
 957}
 958
 959/**
 960 * mesh_queue_preq - queue a PREQ to a given destination
 961 *
 962 * @mpath: mesh path to discover
 963 * @flags: special attributes of the PREQ to be sent
 964 *
 965 * Locking: the function must be called from within a rcu read lock block.
 966 *
 967 */
 968static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
 969{
 970	struct ieee80211_sub_if_data *sdata = mpath->sdata;
 971	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 972	struct mesh_preq_queue *preq_node;
 973
 974	preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
 975	if (!preq_node) {
 976		mhwmp_dbg(sdata, "could not allocate PREQ node\n");
 977		return;
 978	}
 979
 980	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
 981	if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
 982		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
 983		kfree(preq_node);
 984		if (printk_ratelimit())
 985			mhwmp_dbg(sdata, "PREQ node queue full\n");
 986		return;
 987	}
 988
 989	spin_lock(&mpath->state_lock);
 990	if (mpath->flags & MESH_PATH_REQ_QUEUED) {
 991		spin_unlock(&mpath->state_lock);
 992		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
 993		kfree(preq_node);
 994		return;
 995	}
 996
 997	memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
 998	preq_node->flags = flags;
 999
1000	mpath->flags |= MESH_PATH_REQ_QUEUED;
1001	spin_unlock(&mpath->state_lock);
1002
1003	list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
1004	++ifmsh->preq_queue_len;
1005	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1006
1007	if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
1008		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
1009
1010	else if (time_before(jiffies, ifmsh->last_preq)) {
1011		/* avoid long wait if did not send preqs for a long time
1012		 * and jiffies wrapped around
1013		 */
1014		ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
1015		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
1016	} else
1017		mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
1018						min_preq_int_jiff(sdata));
1019}
1020
1021/**
1022 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
1023 *
1024 * @sdata: local mesh subif
1025 */
1026void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
1027{
1028	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1029	struct mesh_preq_queue *preq_node;
1030	struct mesh_path *mpath;
1031	u8 ttl, target_flags = 0;
1032	const u8 *da;
1033	u32 lifetime;
1034
1035	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
1036	if (!ifmsh->preq_queue_len ||
1037		time_before(jiffies, ifmsh->last_preq +
1038				min_preq_int_jiff(sdata))) {
1039		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1040		return;
1041	}
1042
1043	preq_node = list_first_entry(&ifmsh->preq_queue.list,
1044			struct mesh_preq_queue, list);
1045	list_del(&preq_node->list);
1046	--ifmsh->preq_queue_len;
1047	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1048
1049	rcu_read_lock();
1050	mpath = mesh_path_lookup(sdata, preq_node->dst);
1051	if (!mpath)
1052		goto enddiscovery;
1053
1054	spin_lock_bh(&mpath->state_lock);
1055	if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) {
1056		spin_unlock_bh(&mpath->state_lock);
1057		goto enddiscovery;
1058	}
1059	mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1060	if (preq_node->flags & PREQ_Q_F_START) {
1061		if (mpath->flags & MESH_PATH_RESOLVING) {
1062			spin_unlock_bh(&mpath->state_lock);
1063			goto enddiscovery;
1064		} else {
1065			mpath->flags &= ~MESH_PATH_RESOLVED;
1066			mpath->flags |= MESH_PATH_RESOLVING;
1067			mpath->discovery_retries = 0;
1068			mpath->discovery_timeout = disc_timeout_jiff(sdata);
1069		}
1070	} else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
1071			mpath->flags & MESH_PATH_RESOLVED) {
1072		mpath->flags &= ~MESH_PATH_RESOLVING;
1073		spin_unlock_bh(&mpath->state_lock);
1074		goto enddiscovery;
1075	}
1076
1077	ifmsh->last_preq = jiffies;
1078
1079	if (time_after(jiffies, ifmsh->last_sn_update +
1080				net_traversal_jiffies(sdata)) ||
1081	    time_before(jiffies, ifmsh->last_sn_update)) {
1082		++ifmsh->sn;
1083		sdata->u.mesh.last_sn_update = jiffies;
1084	}
1085	lifetime = default_lifetime(sdata);
1086	ttl = sdata->u.mesh.mshcfg.element_ttl;
1087	if (ttl == 0) {
1088		sdata->u.mesh.mshstats.dropped_frames_ttl++;
1089		spin_unlock_bh(&mpath->state_lock);
1090		goto enddiscovery;
1091	}
1092
1093	if (preq_node->flags & PREQ_Q_F_REFRESH)
1094		target_flags |= IEEE80211_PREQ_TO_FLAG;
1095	else
1096		target_flags &= ~IEEE80211_PREQ_TO_FLAG;
1097
1098	spin_unlock_bh(&mpath->state_lock);
1099	da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
1100	mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
1101			       target_flags, mpath->dst, mpath->sn, da, 0,
1102			       ttl, lifetime, 0, ifmsh->preq_id++, sdata);
1103	mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
1104
1105enddiscovery:
1106	rcu_read_unlock();
1107	kfree(preq_node);
1108}
1109
1110/**
1111 * mesh_nexthop_resolve - lookup next hop; conditionally start path discovery
1112 *
1113 * @skb: 802.11 frame to be sent
1114 * @sdata: network subif the frame will be sent through
1115 *
1116 * Lookup next hop for given skb and start path discovery if no
1117 * forwarding information is found.
1118 *
1119 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
1120 * skb is freeed here if no mpath could be allocated.
1121 */
1122int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
1123			 struct sk_buff *skb)
1124{
1125	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1126	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1127	struct mesh_path *mpath;
1128	struct sk_buff *skb_to_free = NULL;
1129	u8 *target_addr = hdr->addr3;
 
1130
1131	/* Nulls are only sent to peers for PS and should be pre-addressed */
1132	if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1133		return 0;
1134
1135	/* Allow injected packets to bypass mesh routing */
1136	if (info->control.flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP)
1137		return 0;
1138
1139	if (!mesh_nexthop_lookup(sdata, skb))
1140		return 0;
1141
1142	/* no nexthop found, start resolving */
1143	mpath = mesh_path_lookup(sdata, target_addr);
1144	if (!mpath) {
1145		mpath = mesh_path_add(sdata, target_addr);
1146		if (IS_ERR(mpath)) {
1147			mesh_path_discard_frame(sdata, skb);
1148			return PTR_ERR(mpath);
 
1149		}
1150	}
1151
1152	if (!(mpath->flags & MESH_PATH_RESOLVING))
1153		mesh_queue_preq(mpath, PREQ_Q_F_START);
1154
1155	if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
1156		skb_to_free = skb_dequeue(&mpath->frame_queue);
1157
1158	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1159	ieee80211_set_qos_hdr(sdata, skb);
1160	skb_queue_tail(&mpath->frame_queue, skb);
 
1161	if (skb_to_free)
1162		mesh_path_discard_frame(sdata, skb_to_free);
1163
1164	return -ENOENT;
 
 
1165}
1166
1167/**
1168 * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
1169 * this function is considered "using" the associated mpath, so preempt a path
1170 * refresh if this mpath expires soon.
1171 *
1172 * @skb: 802.11 frame to be sent
1173 * @sdata: network subif the frame will be sent through
1174 *
1175 * Returns: 0 if the next hop was found. Nonzero otherwise.
1176 */
1177int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata,
1178			struct sk_buff *skb)
1179{
1180	struct mesh_path *mpath;
1181	struct sta_info *next_hop;
1182	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1183	u8 *target_addr = hdr->addr3;
 
1184
 
1185	mpath = mesh_path_lookup(sdata, target_addr);
 
1186	if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
1187		return -ENOENT;
1188
1189	if (time_after(jiffies,
1190		       mpath->exp_time -
1191		       msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
1192	    ether_addr_equal(sdata->vif.addr, hdr->addr4) &&
1193	    !(mpath->flags & MESH_PATH_RESOLVING) &&
1194	    !(mpath->flags & MESH_PATH_FIXED))
1195		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
1196
1197	next_hop = rcu_dereference(mpath->next_hop);
1198	if (next_hop) {
1199		memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
1200		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
1201		ieee80211_mps_set_frame_flags(sdata, next_hop, hdr);
1202		return 0;
1203	}
1204
1205	return -ENOENT;
 
 
1206}
1207
1208void mesh_path_timer(struct timer_list *t)
1209{
1210	struct mesh_path *mpath = from_timer(mpath, t, timer);
1211	struct ieee80211_sub_if_data *sdata = mpath->sdata;
1212	int ret;
1213
1214	if (sdata->local->quiescing)
1215		return;
1216
1217	spin_lock_bh(&mpath->state_lock);
1218	if (mpath->flags & MESH_PATH_RESOLVED ||
1219			(!(mpath->flags & MESH_PATH_RESOLVING))) {
1220		mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
1221		spin_unlock_bh(&mpath->state_lock);
1222	} else if (mpath->discovery_retries < max_preq_retries(sdata)) {
1223		++mpath->discovery_retries;
1224		mpath->discovery_timeout *= 2;
1225		mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1226		spin_unlock_bh(&mpath->state_lock);
1227		mesh_queue_preq(mpath, 0);
1228	} else {
1229		mpath->flags &= ~(MESH_PATH_RESOLVING |
1230				  MESH_PATH_RESOLVED |
1231				  MESH_PATH_REQ_QUEUED);
1232		mpath->exp_time = jiffies;
1233		spin_unlock_bh(&mpath->state_lock);
1234		if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
1235			ret = mesh_path_send_to_gates(mpath);
1236			if (ret)
1237				mhwmp_dbg(sdata, "no gate was reachable\n");
1238		} else
1239			mesh_path_flush_pending(mpath);
1240	}
1241}
1242
1243void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1244{
1245	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1246	u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
1247	u8 flags, target_flags = 0;
1248
1249	flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
1250			? RANN_FLAG_IS_GATE : 0;
1251
1252	switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) {
1253	case IEEE80211_PROACTIVE_RANN:
1254		mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
1255				       ++ifmsh->sn, 0, NULL, 0, broadcast_addr,
1256				       0, ifmsh->mshcfg.element_ttl,
1257				       interval, 0, 0, sdata);
1258		break;
1259	case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
1260		flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
1261		/* fall through */
1262	case IEEE80211_PROACTIVE_PREQ_NO_PREP:
1263		interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
1264		target_flags |= IEEE80211_PREQ_TO_FLAG |
1265				IEEE80211_PREQ_USN_FLAG;
1266		mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr,
1267				       ++ifmsh->sn, target_flags,
1268				       (u8 *) broadcast_addr, 0, broadcast_addr,
1269				       0, ifmsh->mshcfg.element_ttl, interval,
1270				       0, ifmsh->preq_id++, sdata);
1271		break;
1272	default:
1273		mhwmp_dbg(sdata, "Proactive mechanism not supported\n");
1274		return;
1275	}
1276}