Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * Copyright (c) 2008, 2009 open80211s Ltd.
 
   3 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 */
   9
  10#include <linux/slab.h>
  11#include <linux/etherdevice.h>
  12#include <asm/unaligned.h>
  13#include "wme.h"
  14#include "mesh.h"
  15
  16#define TEST_FRAME_LEN	8192
  17#define MAX_METRIC	0xffffffff
  18#define ARITH_SHIFT	8
 
  19
  20#define MAX_PREQ_QUEUE_LEN	64
  21
  22static void mesh_queue_preq(struct mesh_path *, u8);
  23
  24static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
  25{
  26	if (ae)
  27		offset += 6;
  28	return get_unaligned_le32(preq_elem + offset);
  29}
  30
  31static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae)
  32{
  33	if (ae)
  34		offset += 6;
  35	return get_unaligned_le16(preq_elem + offset);
  36}
  37
  38/* HWMP IE processing macros */
  39#define AE_F			(1<<6)
  40#define AE_F_SET(x)		(*x & AE_F)
  41#define PREQ_IE_FLAGS(x)	(*(x))
  42#define PREQ_IE_HOPCOUNT(x)	(*(x + 1))
  43#define PREQ_IE_TTL(x)		(*(x + 2))
  44#define PREQ_IE_PREQ_ID(x)	u32_field_get(x, 3, 0)
  45#define PREQ_IE_ORIG_ADDR(x)	(x + 7)
  46#define PREQ_IE_ORIG_SN(x)	u32_field_get(x, 13, 0)
  47#define PREQ_IE_LIFETIME(x)	u32_field_get(x, 17, AE_F_SET(x))
  48#define PREQ_IE_METRIC(x) 	u32_field_get(x, 21, AE_F_SET(x))
  49#define PREQ_IE_TARGET_F(x)	(*(AE_F_SET(x) ? x + 32 : x + 26))
  50#define PREQ_IE_TARGET_ADDR(x) 	(AE_F_SET(x) ? x + 33 : x + 27)
  51#define PREQ_IE_TARGET_SN(x) 	u32_field_get(x, 33, AE_F_SET(x))
  52
  53
  54#define PREP_IE_FLAGS(x)	PREQ_IE_FLAGS(x)
  55#define PREP_IE_HOPCOUNT(x)	PREQ_IE_HOPCOUNT(x)
  56#define PREP_IE_TTL(x)		PREQ_IE_TTL(x)
  57#define PREP_IE_ORIG_ADDR(x)	(AE_F_SET(x) ? x + 27 : x + 21)
  58#define PREP_IE_ORIG_SN(x)	u32_field_get(x, 27, AE_F_SET(x))
  59#define PREP_IE_LIFETIME(x)	u32_field_get(x, 13, AE_F_SET(x))
  60#define PREP_IE_METRIC(x)	u32_field_get(x, 17, AE_F_SET(x))
  61#define PREP_IE_TARGET_ADDR(x)	(x + 3)
  62#define PREP_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
  63
  64#define PERR_IE_TTL(x)		(*(x))
  65#define PERR_IE_TARGET_FLAGS(x)	(*(x + 2))
  66#define PERR_IE_TARGET_ADDR(x)	(x + 3)
  67#define PERR_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
  68#define PERR_IE_TARGET_RCODE(x)	u16_field_get(x, 13, 0)
  69
  70#define MSEC_TO_TU(x) (x*1000/1024)
  71#define SN_GT(x, y) ((s32)(y - x) < 0)
  72#define SN_LT(x, y) ((s32)(x - y) < 0)
  73#define MAX_SANE_SN_DELTA 32
  74
  75static inline u32 SN_DELTA(u32 x, u32 y)
  76{
  77	return x >= y ? x - y : y - x;
  78}
  79
  80#define net_traversal_jiffies(s) \
  81	msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
  82#define default_lifetime(s) \
  83	MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
  84#define min_preq_int_jiff(s) \
  85	(msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
  86#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
  87#define disc_timeout_jiff(s) \
  88	msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
  89#define root_path_confirmation_jiffies(s) \
  90	msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval)
  91
  92enum mpath_frame_type {
  93	MPATH_PREQ = 0,
  94	MPATH_PREP,
  95	MPATH_PERR,
  96	MPATH_RANN
  97};
  98
  99static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 100
 101static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
 102				  const u8 *orig_addr, u32 orig_sn,
 103				  u8 target_flags, const u8 *target,
 104				  u32 target_sn, const u8 *da,
 105				  u8 hop_count, u8 ttl,
 106				  u32 lifetime, u32 metric, u32 preq_id,
 107				  struct ieee80211_sub_if_data *sdata)
 108{
 109	struct ieee80211_local *local = sdata->local;
 110	struct sk_buff *skb;
 111	struct ieee80211_mgmt *mgmt;
 112	u8 *pos, ie_len;
 113	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
 114		      sizeof(mgmt->u.action.u.mesh_action);
 115
 116	skb = dev_alloc_skb(local->tx_headroom +
 117			    hdr_len +
 118			    2 + 37); /* max HWMP IE */
 119	if (!skb)
 120		return -1;
 121	skb_reserve(skb, local->tx_headroom);
 122	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
 123	memset(mgmt, 0, hdr_len);
 124	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 125					  IEEE80211_STYPE_ACTION);
 126
 127	memcpy(mgmt->da, da, ETH_ALEN);
 128	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
 129	/* BSSID == SA */
 130	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
 131	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
 132	mgmt->u.action.u.mesh_action.action_code =
 133					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
 134
 135	switch (action) {
 136	case MPATH_PREQ:
 137		mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
 138		ie_len = 37;
 139		pos = skb_put(skb, 2 + ie_len);
 140		*pos++ = WLAN_EID_PREQ;
 141		break;
 142	case MPATH_PREP:
 143		mhwmp_dbg(sdata, "sending PREP to %pM\n", orig_addr);
 144		ie_len = 31;
 145		pos = skb_put(skb, 2 + ie_len);
 146		*pos++ = WLAN_EID_PREP;
 147		break;
 148	case MPATH_RANN:
 149		mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
 150		ie_len = sizeof(struct ieee80211_rann_ie);
 151		pos = skb_put(skb, 2 + ie_len);
 152		*pos++ = WLAN_EID_RANN;
 153		break;
 154	default:
 155		kfree_skb(skb);
 156		return -ENOTSUPP;
 157	}
 158	*pos++ = ie_len;
 159	*pos++ = flags;
 160	*pos++ = hop_count;
 161	*pos++ = ttl;
 162	if (action == MPATH_PREP) {
 163		memcpy(pos, target, ETH_ALEN);
 164		pos += ETH_ALEN;
 165		put_unaligned_le32(target_sn, pos);
 166		pos += 4;
 167	} else {
 168		if (action == MPATH_PREQ) {
 169			put_unaligned_le32(preq_id, pos);
 170			pos += 4;
 171		}
 172		memcpy(pos, orig_addr, ETH_ALEN);
 173		pos += ETH_ALEN;
 174		put_unaligned_le32(orig_sn, pos);
 175		pos += 4;
 176	}
 177	put_unaligned_le32(lifetime, pos); /* interval for RANN */
 178	pos += 4;
 179	put_unaligned_le32(metric, pos);
 180	pos += 4;
 181	if (action == MPATH_PREQ) {
 182		*pos++ = 1; /* destination count */
 183		*pos++ = target_flags;
 184		memcpy(pos, target, ETH_ALEN);
 185		pos += ETH_ALEN;
 186		put_unaligned_le32(target_sn, pos);
 187		pos += 4;
 188	} else if (action == MPATH_PREP) {
 189		memcpy(pos, orig_addr, ETH_ALEN);
 190		pos += ETH_ALEN;
 191		put_unaligned_le32(orig_sn, pos);
 192		pos += 4;
 193	}
 194
 195	ieee80211_tx_skb(sdata, skb);
 196	return 0;
 197}
 198
 199
 200/*  Headroom is not adjusted.  Caller should ensure that skb has sufficient
 201 *  headroom in case the frame is encrypted. */
 202static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
 203		struct sk_buff *skb)
 204{
 205	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 206	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 207
 208	skb_reset_mac_header(skb);
 209	skb_reset_network_header(skb);
 210	skb_reset_transport_header(skb);
 211
 212	/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
 213	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
 214	skb->priority = 7;
 215
 216	info->control.vif = &sdata->vif;
 217	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
 218	ieee80211_set_qos_hdr(sdata, skb);
 219	ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
 220}
 221
 222/**
 223 * mesh_path_error_tx - Sends a PERR mesh management frame
 224 *
 225 * @ttl: allowed remaining hops
 226 * @target: broken destination
 227 * @target_sn: SN of the broken destination
 228 * @target_rcode: reason code for this PERR
 229 * @ra: node this frame is addressed to
 230 * @sdata: local mesh subif
 231 *
 232 * Note: This function may be called with driver locks taken that the driver
 233 * also acquires in the TX path.  To avoid a deadlock we don't transmit the
 234 * frame directly but add it to the pending queue instead.
 
 
 235 */
 236int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
 237		       u8 ttl, const u8 *target, u32 target_sn,
 238		       u16 target_rcode, const u8 *ra)
 239{
 240	struct ieee80211_local *local = sdata->local;
 241	struct sk_buff *skb;
 242	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 243	struct ieee80211_mgmt *mgmt;
 244	u8 *pos, ie_len;
 245	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
 246		      sizeof(mgmt->u.action.u.mesh_action);
 247
 248	if (time_before(jiffies, ifmsh->next_perr))
 249		return -EAGAIN;
 250
 251	skb = dev_alloc_skb(local->tx_headroom +
 252			    sdata->encrypt_headroom +
 253			    IEEE80211_ENCRYPT_TAILROOM +
 254			    hdr_len +
 255			    2 + 15 /* PERR IE */);
 256	if (!skb)
 257		return -1;
 258	skb_reserve(skb, local->tx_headroom + sdata->encrypt_headroom);
 259	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
 260	memset(mgmt, 0, hdr_len);
 261	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 262					  IEEE80211_STYPE_ACTION);
 263
 264	memcpy(mgmt->da, ra, ETH_ALEN);
 265	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
 266	/* BSSID == SA */
 267	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
 268	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
 269	mgmt->u.action.u.mesh_action.action_code =
 270					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
 271	ie_len = 15;
 272	pos = skb_put(skb, 2 + ie_len);
 273	*pos++ = WLAN_EID_PERR;
 274	*pos++ = ie_len;
 275	/* ttl */
 276	*pos++ = ttl;
 277	/* number of destinations */
 278	*pos++ = 1;
 279	/* Flags field has AE bit only as defined in
 280	 * sec 8.4.2.117 IEEE802.11-2012
 281	 */
 282	*pos = 0;
 283	pos++;
 284	memcpy(pos, target, ETH_ALEN);
 285	pos += ETH_ALEN;
 286	put_unaligned_le32(target_sn, pos);
 287	pos += 4;
 288	put_unaligned_le16(target_rcode, pos);
 289
 290	/* see note in function header */
 291	prepare_frame_for_deferred_tx(sdata, skb);
 292	ifmsh->next_perr = TU_TO_EXP_TIME(
 293				   ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
 294	ieee80211_add_pending_skb(local, skb);
 295	return 0;
 296}
 297
 298void ieee80211s_update_metric(struct ieee80211_local *local,
 299		struct sta_info *sta, struct sk_buff *skb)
 
 300{
 301	struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
 302	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 303	int failed;
 304
 305	if (!ieee80211_is_data(hdr->frame_control))
 306		return;
 307
 308	failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
 309
 310	/* moving average, scaled to 100 */
 311	sta->mesh->fail_avg =
 312		((80 * sta->mesh->fail_avg + 5) / 100 + 20 * failed);
 313	if (sta->mesh->fail_avg > 95)
 
 
 314		mesh_plink_broken(sta);
 
 
 
 
 
 
 
 
 
 315}
 316
 317static u32 airtime_link_metric_get(struct ieee80211_local *local,
 318				   struct sta_info *sta)
 319{
 320	struct rate_info rinfo;
 321	/* This should be adjusted for each device */
 322	int device_constant = 1 << ARITH_SHIFT;
 323	int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
 324	int s_unit = 1 << ARITH_SHIFT;
 325	int rate, err;
 326	u32 tx_time, estimated_retx;
 327	u64 result;
 
 
 
 
 
 328
 329	/* Try to get rate based on HW/SW RC algorithm.
 330	 * Rate is returned in units of Kbps, correct this
 331	 * to comply with airtime calculation units
 332	 * Round up in case we get rate < 100Kbps
 333	 */
 334	rate = DIV_ROUND_UP(sta_get_expected_throughput(sta), 100);
 335
 336	if (rate) {
 337		err = 0;
 338	} else {
 339		if (sta->mesh->fail_avg >= 100)
 340			return MAX_METRIC;
 341
 342		sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo);
 343		rate = cfg80211_calculate_bitrate(&rinfo);
 344		if (WARN_ON(!rate))
 345			return MAX_METRIC;
 346
 347		err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100;
 348	}
 349
 350	/* bitrate is in units of 100 Kbps, while we need rate in units of
 351	 * 1Mbps. This will be corrected on tx_time computation.
 352	 */
 353	tx_time = (device_constant + 10 * test_frame_len / rate);
 354	estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
 355	result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
 356	return (u32)result;
 357}
 358
 359/**
 360 * hwmp_route_info_get - Update routing info to originator and transmitter
 361 *
 362 * @sdata: local mesh subif
 363 * @mgmt: mesh management frame
 364 * @hwmp_ie: hwmp information element (PREP or PREQ)
 365 * @action: type of hwmp ie
 366 *
 367 * This function updates the path routing information to the originator and the
 368 * transmitter of a HWMP PREQ or PREP frame.
 369 *
 370 * Returns: metric to frame originator or 0 if the frame should not be further
 371 * processed
 372 *
 373 * Notes: this function is the only place (besides user-provided info) where
 374 * path routing information is updated.
 375 */
 376static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
 377			       struct ieee80211_mgmt *mgmt,
 378			       const u8 *hwmp_ie, enum mpath_frame_type action)
 379{
 380	struct ieee80211_local *local = sdata->local;
 381	struct mesh_path *mpath;
 382	struct sta_info *sta;
 383	bool fresh_info;
 384	const u8 *orig_addr, *ta;
 385	u32 orig_sn, orig_metric;
 386	unsigned long orig_lifetime, exp_time;
 387	u32 last_hop_metric, new_metric;
 
 388	bool process = true;
 
 389
 390	rcu_read_lock();
 391	sta = sta_info_get(sdata, mgmt->sa);
 392	if (!sta) {
 393		rcu_read_unlock();
 394		return 0;
 395	}
 396
 397	last_hop_metric = airtime_link_metric_get(local, sta);
 398	/* Update and check originator routing info */
 399	fresh_info = true;
 400
 401	switch (action) {
 402	case MPATH_PREQ:
 403		orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
 404		orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
 405		orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
 406		orig_metric = PREQ_IE_METRIC(hwmp_ie);
 
 407		break;
 408	case MPATH_PREP:
 409		/* Originator here refers to the MP that was the target in the
 410		 * Path Request. We divert from the nomenclature in the draft
 411		 * so that we can easily use a single function to gather path
 412		 * information from both PREQ and PREP frames.
 413		 */
 414		orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
 415		orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
 416		orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
 417		orig_metric = PREP_IE_METRIC(hwmp_ie);
 
 418		break;
 419	default:
 420		rcu_read_unlock();
 421		return 0;
 422	}
 423	new_metric = orig_metric + last_hop_metric;
 424	if (new_metric < orig_metric)
 425		new_metric = MAX_METRIC;
 426	exp_time = TU_TO_EXP_TIME(orig_lifetime);
 427
 428	if (ether_addr_equal(orig_addr, sdata->vif.addr)) {
 429		/* This MP is the originator, we are not interested in this
 430		 * frame, except for updating transmitter's path info.
 431		 */
 432		process = false;
 433		fresh_info = false;
 434	} else {
 435		mpath = mesh_path_lookup(sdata, orig_addr);
 436		if (mpath) {
 437			spin_lock_bh(&mpath->state_lock);
 438			if (mpath->flags & MESH_PATH_FIXED)
 439				fresh_info = false;
 440			else if ((mpath->flags & MESH_PATH_ACTIVE) &&
 441			    (mpath->flags & MESH_PATH_SN_VALID)) {
 442				if (SN_GT(mpath->sn, orig_sn) ||
 443				    (mpath->sn == orig_sn &&
 444				     new_metric >= mpath->metric)) {
 
 
 
 445					process = false;
 446					fresh_info = false;
 447				}
 448			} else if (!(mpath->flags & MESH_PATH_ACTIVE)) {
 449				bool have_sn, newer_sn, bounced;
 450
 451				have_sn = mpath->flags & MESH_PATH_SN_VALID;
 452				newer_sn = have_sn && SN_GT(orig_sn, mpath->sn);
 453				bounced = have_sn &&
 454					  (SN_DELTA(orig_sn, mpath->sn) >
 455							MAX_SANE_SN_DELTA);
 456
 457				if (!have_sn || newer_sn) {
 458					/* if SN is newer than what we had
 459					 * then we can take it */;
 460				} else if (bounced) {
 461					/* if SN is way different than what
 462					 * we had then assume the other side
 463					 * rebooted or restarted */;
 464				} else {
 465					process = false;
 466					fresh_info = false;
 467				}
 468			}
 469		} else {
 470			mpath = mesh_path_add(sdata, orig_addr);
 471			if (IS_ERR(mpath)) {
 472				rcu_read_unlock();
 473				return 0;
 474			}
 475			spin_lock_bh(&mpath->state_lock);
 476		}
 477
 478		if (fresh_info) {
 
 
 
 
 479			mesh_path_assign_nexthop(mpath, sta);
 480			mpath->flags |= MESH_PATH_SN_VALID;
 481			mpath->metric = new_metric;
 482			mpath->sn = orig_sn;
 483			mpath->exp_time = time_after(mpath->exp_time, exp_time)
 484					  ?  mpath->exp_time : exp_time;
 
 485			mesh_path_activate(mpath);
 486			spin_unlock_bh(&mpath->state_lock);
 
 
 
 
 
 487			mesh_path_tx_pending(mpath);
 488			/* draft says preq_id should be saved to, but there does
 489			 * not seem to be any use for it, skipping by now
 490			 */
 491		} else
 492			spin_unlock_bh(&mpath->state_lock);
 493	}
 494
 495	/* Update and check transmitter routing info */
 496	ta = mgmt->sa;
 497	if (ether_addr_equal(orig_addr, ta))
 498		fresh_info = false;
 499	else {
 500		fresh_info = true;
 501
 502		mpath = mesh_path_lookup(sdata, ta);
 503		if (mpath) {
 504			spin_lock_bh(&mpath->state_lock);
 505			if ((mpath->flags & MESH_PATH_FIXED) ||
 506				((mpath->flags & MESH_PATH_ACTIVE) &&
 507					(last_hop_metric > mpath->metric)))
 
 
 508				fresh_info = false;
 509		} else {
 510			mpath = mesh_path_add(sdata, ta);
 511			if (IS_ERR(mpath)) {
 512				rcu_read_unlock();
 513				return 0;
 514			}
 515			spin_lock_bh(&mpath->state_lock);
 516		}
 517
 518		if (fresh_info) {
 
 
 
 
 519			mesh_path_assign_nexthop(mpath, sta);
 520			mpath->metric = last_hop_metric;
 521			mpath->exp_time = time_after(mpath->exp_time, exp_time)
 522					  ?  mpath->exp_time : exp_time;
 
 523			mesh_path_activate(mpath);
 524			spin_unlock_bh(&mpath->state_lock);
 
 
 
 
 
 525			mesh_path_tx_pending(mpath);
 526		} else
 527			spin_unlock_bh(&mpath->state_lock);
 528	}
 529
 530	rcu_read_unlock();
 531
 532	return process ? new_metric : 0;
 533}
 534
 535static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
 536				    struct ieee80211_mgmt *mgmt,
 537				    const u8 *preq_elem, u32 orig_metric)
 538{
 539	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 540	struct mesh_path *mpath = NULL;
 541	const u8 *target_addr, *orig_addr;
 542	const u8 *da;
 543	u8 target_flags, ttl, flags;
 544	u32 orig_sn, target_sn, lifetime, target_metric = 0;
 545	bool reply = false;
 546	bool forward = true;
 547	bool root_is_gate;
 548
 549	/* Update target SN, if present */
 550	target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
 551	orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
 552	target_sn = PREQ_IE_TARGET_SN(preq_elem);
 553	orig_sn = PREQ_IE_ORIG_SN(preq_elem);
 554	target_flags = PREQ_IE_TARGET_F(preq_elem);
 555	/* Proactive PREQ gate announcements */
 556	flags = PREQ_IE_FLAGS(preq_elem);
 557	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
 558
 559	mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr);
 560
 561	if (ether_addr_equal(target_addr, sdata->vif.addr)) {
 562		mhwmp_dbg(sdata, "PREQ is for us\n");
 563		forward = false;
 564		reply = true;
 565		target_metric = 0;
 
 
 
 
 566		if (time_after(jiffies, ifmsh->last_sn_update +
 567					net_traversal_jiffies(sdata)) ||
 568		    time_before(jiffies, ifmsh->last_sn_update)) {
 569			++ifmsh->sn;
 570			ifmsh->last_sn_update = jiffies;
 571		}
 572		target_sn = ifmsh->sn;
 573	} else if (is_broadcast_ether_addr(target_addr) &&
 574		   (target_flags & IEEE80211_PREQ_TO_FLAG)) {
 575		rcu_read_lock();
 576		mpath = mesh_path_lookup(sdata, orig_addr);
 577		if (mpath) {
 578			if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
 579				reply = true;
 580				target_addr = sdata->vif.addr;
 581				target_sn = ++ifmsh->sn;
 582				target_metric = 0;
 583				ifmsh->last_sn_update = jiffies;
 584			}
 585			if (root_is_gate)
 586				mesh_path_add_gate(mpath);
 587		}
 588		rcu_read_unlock();
 589	} else {
 590		rcu_read_lock();
 591		mpath = mesh_path_lookup(sdata, target_addr);
 592		if (mpath) {
 593			if ((!(mpath->flags & MESH_PATH_SN_VALID)) ||
 594					SN_LT(mpath->sn, target_sn)) {
 595				mpath->sn = target_sn;
 596				mpath->flags |= MESH_PATH_SN_VALID;
 597			} else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) &&
 598					(mpath->flags & MESH_PATH_ACTIVE)) {
 599				reply = true;
 600				target_metric = mpath->metric;
 601				target_sn = mpath->sn;
 602				/* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/
 603				target_flags |= IEEE80211_PREQ_TO_FLAG;
 604			}
 605		}
 606		rcu_read_unlock();
 607	}
 608
 609	if (reply) {
 610		lifetime = PREQ_IE_LIFETIME(preq_elem);
 611		ttl = ifmsh->mshcfg.element_ttl;
 612		if (ttl != 0) {
 613			mhwmp_dbg(sdata, "replying to the PREQ\n");
 614			mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
 615					       orig_sn, 0, target_addr,
 616					       target_sn, mgmt->sa, 0, ttl,
 617					       lifetime, target_metric, 0,
 618					       sdata);
 619		} else {
 620			ifmsh->mshstats.dropped_frames_ttl++;
 621		}
 622	}
 623
 624	if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
 625		u32 preq_id;
 626		u8 hopcount;
 627
 628		ttl = PREQ_IE_TTL(preq_elem);
 629		lifetime = PREQ_IE_LIFETIME(preq_elem);
 630		if (ttl <= 1) {
 631			ifmsh->mshstats.dropped_frames_ttl++;
 632			return;
 633		}
 634		mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr);
 635		--ttl;
 636		preq_id = PREQ_IE_PREQ_ID(preq_elem);
 637		hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
 638		da = (mpath && mpath->is_root) ?
 639			mpath->rann_snd_addr : broadcast_addr;
 640
 641		if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
 642			target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
 643			target_sn = PREQ_IE_TARGET_SN(preq_elem);
 644		}
 645
 646		mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
 647				       orig_sn, target_flags, target_addr,
 648				       target_sn, da, hopcount, ttl, lifetime,
 649				       orig_metric, preq_id, sdata);
 650		if (!is_multicast_ether_addr(da))
 651			ifmsh->mshstats.fwded_unicast++;
 652		else
 653			ifmsh->mshstats.fwded_mcast++;
 654		ifmsh->mshstats.fwded_frames++;
 655	}
 656}
 657
 658
 659static inline struct sta_info *
 660next_hop_deref_protected(struct mesh_path *mpath)
 661{
 662	return rcu_dereference_protected(mpath->next_hop,
 663					 lockdep_is_held(&mpath->state_lock));
 664}
 665
 666
 667static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
 668				    struct ieee80211_mgmt *mgmt,
 669				    const u8 *prep_elem, u32 metric)
 670{
 671	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 672	struct mesh_path *mpath;
 673	const u8 *target_addr, *orig_addr;
 674	u8 ttl, hopcount, flags;
 675	u8 next_hop[ETH_ALEN];
 676	u32 target_sn, orig_sn, lifetime;
 677
 678	mhwmp_dbg(sdata, "received PREP from %pM\n",
 679		  PREP_IE_TARGET_ADDR(prep_elem));
 680
 681	orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
 682	if (ether_addr_equal(orig_addr, sdata->vif.addr))
 683		/* destination, no forwarding required */
 684		return;
 685
 686	if (!ifmsh->mshcfg.dot11MeshForwarding)
 687		return;
 688
 689	ttl = PREP_IE_TTL(prep_elem);
 690	if (ttl <= 1) {
 691		sdata->u.mesh.mshstats.dropped_frames_ttl++;
 692		return;
 693	}
 694
 695	rcu_read_lock();
 696	mpath = mesh_path_lookup(sdata, orig_addr);
 697	if (mpath)
 698		spin_lock_bh(&mpath->state_lock);
 699	else
 700		goto fail;
 701	if (!(mpath->flags & MESH_PATH_ACTIVE)) {
 702		spin_unlock_bh(&mpath->state_lock);
 703		goto fail;
 704	}
 705	memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
 706	spin_unlock_bh(&mpath->state_lock);
 707	--ttl;
 708	flags = PREP_IE_FLAGS(prep_elem);
 709	lifetime = PREP_IE_LIFETIME(prep_elem);
 710	hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
 711	target_addr = PREP_IE_TARGET_ADDR(prep_elem);
 712	target_sn = PREP_IE_TARGET_SN(prep_elem);
 713	orig_sn = PREP_IE_ORIG_SN(prep_elem);
 714
 715	mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, orig_sn, 0,
 716			       target_addr, target_sn, next_hop, hopcount,
 717			       ttl, lifetime, metric, 0, sdata);
 718	rcu_read_unlock();
 719
 720	sdata->u.mesh.mshstats.fwded_unicast++;
 721	sdata->u.mesh.mshstats.fwded_frames++;
 722	return;
 723
 724fail:
 725	rcu_read_unlock();
 726	sdata->u.mesh.mshstats.dropped_frames_no_route++;
 727}
 728
 729static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
 730				    struct ieee80211_mgmt *mgmt,
 731				    const u8 *perr_elem)
 732{
 733	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 734	struct mesh_path *mpath;
 735	u8 ttl;
 736	const u8 *ta, *target_addr;
 737	u32 target_sn;
 738	u16 target_rcode;
 739
 740	ta = mgmt->sa;
 741	ttl = PERR_IE_TTL(perr_elem);
 742	if (ttl <= 1) {
 743		ifmsh->mshstats.dropped_frames_ttl++;
 744		return;
 745	}
 746	ttl--;
 747	target_addr = PERR_IE_TARGET_ADDR(perr_elem);
 748	target_sn = PERR_IE_TARGET_SN(perr_elem);
 749	target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
 750
 751	rcu_read_lock();
 752	mpath = mesh_path_lookup(sdata, target_addr);
 753	if (mpath) {
 754		struct sta_info *sta;
 755
 756		spin_lock_bh(&mpath->state_lock);
 757		sta = next_hop_deref_protected(mpath);
 758		if (mpath->flags & MESH_PATH_ACTIVE &&
 759		    ether_addr_equal(ta, sta->sta.addr) &&
 760		    !(mpath->flags & MESH_PATH_FIXED) &&
 761		    (!(mpath->flags & MESH_PATH_SN_VALID) ||
 762		    SN_GT(target_sn, mpath->sn)  || target_sn == 0)) {
 763			mpath->flags &= ~MESH_PATH_ACTIVE;
 764			if (target_sn != 0)
 765				mpath->sn = target_sn;
 766			else
 767				mpath->sn += 1;
 768			spin_unlock_bh(&mpath->state_lock);
 769			if (!ifmsh->mshcfg.dot11MeshForwarding)
 770				goto endperr;
 771			mesh_path_error_tx(sdata, ttl, target_addr,
 772					   target_sn, target_rcode,
 773					   broadcast_addr);
 774		} else
 775			spin_unlock_bh(&mpath->state_lock);
 776	}
 777endperr:
 778	rcu_read_unlock();
 779}
 780
 781static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
 782				    struct ieee80211_mgmt *mgmt,
 783				    const struct ieee80211_rann_ie *rann)
 784{
 785	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 786	struct ieee80211_local *local = sdata->local;
 787	struct sta_info *sta;
 788	struct mesh_path *mpath;
 789	u8 ttl, flags, hopcount;
 790	const u8 *orig_addr;
 791	u32 orig_sn, metric, metric_txsta, interval;
 792	bool root_is_gate;
 793
 794	ttl = rann->rann_ttl;
 795	flags = rann->rann_flags;
 796	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
 797	orig_addr = rann->rann_addr;
 798	orig_sn = le32_to_cpu(rann->rann_seq);
 799	interval = le32_to_cpu(rann->rann_interval);
 800	hopcount = rann->rann_hopcount;
 801	hopcount++;
 802	metric = le32_to_cpu(rann->rann_metric);
 803
 804	/*  Ignore our own RANNs */
 805	if (ether_addr_equal(orig_addr, sdata->vif.addr))
 806		return;
 807
 808	mhwmp_dbg(sdata,
 809		  "received RANN from %pM via neighbour %pM (is_gate=%d)\n",
 810		  orig_addr, mgmt->sa, root_is_gate);
 811
 812	rcu_read_lock();
 813	sta = sta_info_get(sdata, mgmt->sa);
 814	if (!sta) {
 815		rcu_read_unlock();
 816		return;
 817	}
 818
 819	metric_txsta = airtime_link_metric_get(local, sta);
 
 
 
 820
 821	mpath = mesh_path_lookup(sdata, orig_addr);
 822	if (!mpath) {
 823		mpath = mesh_path_add(sdata, orig_addr);
 824		if (IS_ERR(mpath)) {
 825			rcu_read_unlock();
 826			sdata->u.mesh.mshstats.dropped_frames_no_route++;
 827			return;
 828		}
 829	}
 830
 831	if (!(SN_LT(mpath->sn, orig_sn)) &&
 832	    !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
 833		rcu_read_unlock();
 834		return;
 835	}
 836
 837	if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
 838	     (time_after(jiffies, mpath->last_preq_to_root +
 839				  root_path_confirmation_jiffies(sdata)) ||
 840	     time_before(jiffies, mpath->last_preq_to_root))) &&
 841	     !(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) {
 842		mhwmp_dbg(sdata,
 843			  "time to refresh root mpath %pM\n",
 844			  orig_addr);
 845		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
 846		mpath->last_preq_to_root = jiffies;
 847	}
 848
 849	mpath->sn = orig_sn;
 850	mpath->rann_metric = metric + metric_txsta;
 851	mpath->is_root = true;
 852	/* Recording RANNs sender address to send individually
 853	 * addressed PREQs destined for root mesh STA */
 854	memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
 855
 856	if (root_is_gate)
 857		mesh_path_add_gate(mpath);
 858
 859	if (ttl <= 1) {
 860		ifmsh->mshstats.dropped_frames_ttl++;
 861		rcu_read_unlock();
 862		return;
 863	}
 864	ttl--;
 865
 866	if (ifmsh->mshcfg.dot11MeshForwarding) {
 867		mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
 868				       orig_sn, 0, NULL, 0, broadcast_addr,
 869				       hopcount, ttl, interval,
 870				       metric + metric_txsta, 0, sdata);
 871	}
 872
 873	rcu_read_unlock();
 874}
 875
 876
 877void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
 878			    struct ieee80211_mgmt *mgmt, size_t len)
 879{
 880	struct ieee802_11_elems elems;
 881	size_t baselen;
 882	u32 path_metric;
 883	struct sta_info *sta;
 884
 885	/* need action_code */
 886	if (len < IEEE80211_MIN_ACTION_SIZE + 1)
 887		return;
 888
 889	rcu_read_lock();
 890	sta = sta_info_get(sdata, mgmt->sa);
 891	if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
 892		rcu_read_unlock();
 893		return;
 894	}
 895	rcu_read_unlock();
 896
 897	baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
 898	ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
 899			       len - baselen, false, &elems);
 
 
 900
 901	if (elems.preq) {
 902		if (elems.preq_len != 37)
 903			/* Right now we support just 1 destination and no AE */
 904			return;
 905		path_metric = hwmp_route_info_get(sdata, mgmt, elems.preq,
 906						  MPATH_PREQ);
 907		if (path_metric)
 908			hwmp_preq_frame_process(sdata, mgmt, elems.preq,
 909						path_metric);
 910	}
 911	if (elems.prep) {
 912		if (elems.prep_len != 31)
 913			/* Right now we support no AE */
 914			return;
 915		path_metric = hwmp_route_info_get(sdata, mgmt, elems.prep,
 916						  MPATH_PREP);
 917		if (path_metric)
 918			hwmp_prep_frame_process(sdata, mgmt, elems.prep,
 919						path_metric);
 920	}
 921	if (elems.perr) {
 922		if (elems.perr_len != 15)
 923			/* Right now we support only one destination per PERR */
 924			return;
 925		hwmp_perr_frame_process(sdata, mgmt, elems.perr);
 926	}
 927	if (elems.rann)
 928		hwmp_rann_frame_process(sdata, mgmt, elems.rann);
 
 
 929}
 930
 931/**
 932 * mesh_queue_preq - queue a PREQ to a given destination
 933 *
 934 * @mpath: mesh path to discover
 935 * @flags: special attributes of the PREQ to be sent
 936 *
 937 * Locking: the function must be called from within a rcu read lock block.
 938 *
 939 */
 940static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
 941{
 942	struct ieee80211_sub_if_data *sdata = mpath->sdata;
 943	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 944	struct mesh_preq_queue *preq_node;
 945
 946	preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
 947	if (!preq_node) {
 948		mhwmp_dbg(sdata, "could not allocate PREQ node\n");
 949		return;
 950	}
 951
 952	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
 953	if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
 954		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
 955		kfree(preq_node);
 956		if (printk_ratelimit())
 957			mhwmp_dbg(sdata, "PREQ node queue full\n");
 958		return;
 959	}
 960
 961	spin_lock(&mpath->state_lock);
 962	if (mpath->flags & MESH_PATH_REQ_QUEUED) {
 963		spin_unlock(&mpath->state_lock);
 964		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
 965		kfree(preq_node);
 966		return;
 967	}
 968
 969	memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
 970	preq_node->flags = flags;
 971
 972	mpath->flags |= MESH_PATH_REQ_QUEUED;
 973	spin_unlock(&mpath->state_lock);
 974
 975	list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
 976	++ifmsh->preq_queue_len;
 977	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
 978
 979	if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
 980		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 981
 982	else if (time_before(jiffies, ifmsh->last_preq)) {
 983		/* avoid long wait if did not send preqs for a long time
 984		 * and jiffies wrapped around
 985		 */
 986		ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
 987		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 988	} else
 989		mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
 990						min_preq_int_jiff(sdata));
 991}
 992
 993/**
 994 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
 995 *
 996 * @sdata: local mesh subif
 997 */
 998void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
 999{
1000	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1001	struct mesh_preq_queue *preq_node;
1002	struct mesh_path *mpath;
1003	u8 ttl, target_flags = 0;
1004	const u8 *da;
1005	u32 lifetime;
1006
1007	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
1008	if (!ifmsh->preq_queue_len ||
1009		time_before(jiffies, ifmsh->last_preq +
1010				min_preq_int_jiff(sdata))) {
1011		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1012		return;
1013	}
1014
1015	preq_node = list_first_entry(&ifmsh->preq_queue.list,
1016			struct mesh_preq_queue, list);
1017	list_del(&preq_node->list);
1018	--ifmsh->preq_queue_len;
1019	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1020
1021	rcu_read_lock();
1022	mpath = mesh_path_lookup(sdata, preq_node->dst);
1023	if (!mpath)
1024		goto enddiscovery;
1025
1026	spin_lock_bh(&mpath->state_lock);
1027	if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) {
1028		spin_unlock_bh(&mpath->state_lock);
1029		goto enddiscovery;
1030	}
1031	mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1032	if (preq_node->flags & PREQ_Q_F_START) {
1033		if (mpath->flags & MESH_PATH_RESOLVING) {
1034			spin_unlock_bh(&mpath->state_lock);
1035			goto enddiscovery;
1036		} else {
1037			mpath->flags &= ~MESH_PATH_RESOLVED;
1038			mpath->flags |= MESH_PATH_RESOLVING;
1039			mpath->discovery_retries = 0;
1040			mpath->discovery_timeout = disc_timeout_jiff(sdata);
1041		}
1042	} else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
1043			mpath->flags & MESH_PATH_RESOLVED) {
1044		mpath->flags &= ~MESH_PATH_RESOLVING;
1045		spin_unlock_bh(&mpath->state_lock);
1046		goto enddiscovery;
1047	}
1048
1049	ifmsh->last_preq = jiffies;
1050
1051	if (time_after(jiffies, ifmsh->last_sn_update +
1052				net_traversal_jiffies(sdata)) ||
1053	    time_before(jiffies, ifmsh->last_sn_update)) {
1054		++ifmsh->sn;
1055		sdata->u.mesh.last_sn_update = jiffies;
1056	}
1057	lifetime = default_lifetime(sdata);
1058	ttl = sdata->u.mesh.mshcfg.element_ttl;
1059	if (ttl == 0) {
1060		sdata->u.mesh.mshstats.dropped_frames_ttl++;
1061		spin_unlock_bh(&mpath->state_lock);
1062		goto enddiscovery;
1063	}
1064
1065	if (preq_node->flags & PREQ_Q_F_REFRESH)
1066		target_flags |= IEEE80211_PREQ_TO_FLAG;
1067	else
1068		target_flags &= ~IEEE80211_PREQ_TO_FLAG;
1069
1070	spin_unlock_bh(&mpath->state_lock);
1071	da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
1072	mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
1073			       target_flags, mpath->dst, mpath->sn, da, 0,
1074			       ttl, lifetime, 0, ifmsh->preq_id++, sdata);
1075	mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
 
 
 
 
1076
1077enddiscovery:
1078	rcu_read_unlock();
1079	kfree(preq_node);
1080}
1081
1082/**
1083 * mesh_nexthop_resolve - lookup next hop; conditionally start path discovery
1084 *
1085 * @skb: 802.11 frame to be sent
1086 * @sdata: network subif the frame will be sent through
1087 *
1088 * Lookup next hop for given skb and start path discovery if no
1089 * forwarding information is found.
1090 *
1091 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
1092 * skb is freeed here if no mpath could be allocated.
1093 */
1094int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
1095			 struct sk_buff *skb)
1096{
1097	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1098	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1099	struct mesh_path *mpath;
1100	struct sk_buff *skb_to_free = NULL;
1101	u8 *target_addr = hdr->addr3;
1102	int err = 0;
1103
1104	/* Nulls are only sent to peers for PS and should be pre-addressed */
1105	if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1106		return 0;
1107
1108	rcu_read_lock();
1109	err = mesh_nexthop_lookup(sdata, skb);
1110	if (!err)
1111		goto endlookup;
 
 
1112
1113	/* no nexthop found, start resolving */
1114	mpath = mesh_path_lookup(sdata, target_addr);
1115	if (!mpath) {
1116		mpath = mesh_path_add(sdata, target_addr);
1117		if (IS_ERR(mpath)) {
1118			mesh_path_discard_frame(sdata, skb);
1119			err = PTR_ERR(mpath);
1120			goto endlookup;
1121		}
1122	}
1123
1124	if (!(mpath->flags & MESH_PATH_RESOLVING))
 
1125		mesh_queue_preq(mpath, PREQ_Q_F_START);
1126
1127	if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
1128		skb_to_free = skb_dequeue(&mpath->frame_queue);
1129
1130	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1131	ieee80211_set_qos_hdr(sdata, skb);
1132	skb_queue_tail(&mpath->frame_queue, skb);
1133	err = -ENOENT;
1134	if (skb_to_free)
1135		mesh_path_discard_frame(sdata, skb_to_free);
1136
1137endlookup:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1138	rcu_read_unlock();
1139	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1140}
1141
1142/**
1143 * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
1144 * this function is considered "using" the associated mpath, so preempt a path
1145 * refresh if this mpath expires soon.
1146 *
1147 * @skb: 802.11 frame to be sent
1148 * @sdata: network subif the frame will be sent through
1149 *
1150 * Returns: 0 if the next hop was found. Nonzero otherwise.
1151 */
1152int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata,
1153			struct sk_buff *skb)
1154{
 
1155	struct mesh_path *mpath;
1156	struct sta_info *next_hop;
1157	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1158	u8 *target_addr = hdr->addr3;
1159	int err = -ENOENT;
1160
1161	rcu_read_lock();
1162	mpath = mesh_path_lookup(sdata, target_addr);
 
1163
 
1164	if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
1165		goto endlookup;
1166
1167	if (time_after(jiffies,
1168		       mpath->exp_time -
1169		       msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
1170	    ether_addr_equal(sdata->vif.addr, hdr->addr4) &&
1171	    !(mpath->flags & MESH_PATH_RESOLVING) &&
1172	    !(mpath->flags & MESH_PATH_FIXED))
1173		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
1174
1175	next_hop = rcu_dereference(mpath->next_hop);
1176	if (next_hop) {
1177		memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
1178		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
1179		ieee80211_mps_set_frame_flags(sdata, next_hop, hdr);
1180		err = 0;
 
 
1181	}
1182
1183endlookup:
1184	rcu_read_unlock();
1185	return err;
1186}
1187
1188void mesh_path_timer(unsigned long data)
1189{
1190	struct mesh_path *mpath = (void *) data;
1191	struct ieee80211_sub_if_data *sdata = mpath->sdata;
1192	int ret;
1193
1194	if (sdata->local->quiescing)
1195		return;
1196
1197	spin_lock_bh(&mpath->state_lock);
1198	if (mpath->flags & MESH_PATH_RESOLVED ||
1199			(!(mpath->flags & MESH_PATH_RESOLVING))) {
1200		mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
1201		spin_unlock_bh(&mpath->state_lock);
1202	} else if (mpath->discovery_retries < max_preq_retries(sdata)) {
1203		++mpath->discovery_retries;
1204		mpath->discovery_timeout *= 2;
1205		mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1206		spin_unlock_bh(&mpath->state_lock);
1207		mesh_queue_preq(mpath, 0);
1208	} else {
1209		mpath->flags &= ~(MESH_PATH_RESOLVING |
1210				  MESH_PATH_RESOLVED |
1211				  MESH_PATH_REQ_QUEUED);
1212		mpath->exp_time = jiffies;
1213		spin_unlock_bh(&mpath->state_lock);
1214		if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
1215			ret = mesh_path_send_to_gates(mpath);
1216			if (ret)
1217				mhwmp_dbg(sdata, "no gate was reachable\n");
1218		} else
1219			mesh_path_flush_pending(mpath);
1220	}
1221}
1222
1223void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1224{
1225	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1226	u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
1227	u8 flags, target_flags = 0;
1228
1229	flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
1230			? RANN_FLAG_IS_GATE : 0;
1231
1232	switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) {
1233	case IEEE80211_PROACTIVE_RANN:
1234		mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
1235				       ++ifmsh->sn, 0, NULL, 0, broadcast_addr,
1236				       0, ifmsh->mshcfg.element_ttl,
1237				       interval, 0, 0, sdata);
1238		break;
1239	case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
1240		flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
 
1241	case IEEE80211_PROACTIVE_PREQ_NO_PREP:
1242		interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
1243		target_flags |= IEEE80211_PREQ_TO_FLAG |
1244				IEEE80211_PREQ_USN_FLAG;
1245		mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr,
1246				       ++ifmsh->sn, target_flags,
1247				       (u8 *) broadcast_addr, 0, broadcast_addr,
1248				       0, ifmsh->mshcfg.element_ttl, interval,
1249				       0, ifmsh->preq_id++, sdata);
1250		break;
1251	default:
1252		mhwmp_dbg(sdata, "Proactive mechanism not supported\n");
1253		return;
1254	}
1255}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2008, 2009 open80211s Ltd.
   4 * Copyright (C) 2019, 2021-2023 Intel Corporation
   5 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
 
 
 
 
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/etherdevice.h>
  10#include <asm/unaligned.h>
  11#include "wme.h"
  12#include "mesh.h"
  13
  14#define TEST_FRAME_LEN	8192
  15#define MAX_METRIC	0xffffffff
  16#define ARITH_SHIFT	8
  17#define LINK_FAIL_THRESH 95
  18
  19#define MAX_PREQ_QUEUE_LEN	64
  20
  21static void mesh_queue_preq(struct mesh_path *, u8);
  22
  23static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
  24{
  25	if (ae)
  26		offset += 6;
  27	return get_unaligned_le32(preq_elem + offset);
  28}
  29
  30static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae)
  31{
  32	if (ae)
  33		offset += 6;
  34	return get_unaligned_le16(preq_elem + offset);
  35}
  36
  37/* HWMP IE processing macros */
  38#define AE_F			(1<<6)
  39#define AE_F_SET(x)		(*x & AE_F)
  40#define PREQ_IE_FLAGS(x)	(*(x))
  41#define PREQ_IE_HOPCOUNT(x)	(*(x + 1))
  42#define PREQ_IE_TTL(x)		(*(x + 2))
  43#define PREQ_IE_PREQ_ID(x)	u32_field_get(x, 3, 0)
  44#define PREQ_IE_ORIG_ADDR(x)	(x + 7)
  45#define PREQ_IE_ORIG_SN(x)	u32_field_get(x, 13, 0)
  46#define PREQ_IE_LIFETIME(x)	u32_field_get(x, 17, AE_F_SET(x))
  47#define PREQ_IE_METRIC(x) 	u32_field_get(x, 21, AE_F_SET(x))
  48#define PREQ_IE_TARGET_F(x)	(*(AE_F_SET(x) ? x + 32 : x + 26))
  49#define PREQ_IE_TARGET_ADDR(x) 	(AE_F_SET(x) ? x + 33 : x + 27)
  50#define PREQ_IE_TARGET_SN(x) 	u32_field_get(x, 33, AE_F_SET(x))
  51
  52
  53#define PREP_IE_FLAGS(x)	PREQ_IE_FLAGS(x)
  54#define PREP_IE_HOPCOUNT(x)	PREQ_IE_HOPCOUNT(x)
  55#define PREP_IE_TTL(x)		PREQ_IE_TTL(x)
  56#define PREP_IE_ORIG_ADDR(x)	(AE_F_SET(x) ? x + 27 : x + 21)
  57#define PREP_IE_ORIG_SN(x)	u32_field_get(x, 27, AE_F_SET(x))
  58#define PREP_IE_LIFETIME(x)	u32_field_get(x, 13, AE_F_SET(x))
  59#define PREP_IE_METRIC(x)	u32_field_get(x, 17, AE_F_SET(x))
  60#define PREP_IE_TARGET_ADDR(x)	(x + 3)
  61#define PREP_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
  62
  63#define PERR_IE_TTL(x)		(*(x))
  64#define PERR_IE_TARGET_FLAGS(x)	(*(x + 2))
  65#define PERR_IE_TARGET_ADDR(x)	(x + 3)
  66#define PERR_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
  67#define PERR_IE_TARGET_RCODE(x)	u16_field_get(x, 13, 0)
  68
  69#define MSEC_TO_TU(x) (x*1000/1024)
  70#define SN_GT(x, y) ((s32)(y - x) < 0)
  71#define SN_LT(x, y) ((s32)(x - y) < 0)
  72#define MAX_SANE_SN_DELTA 32
  73
  74static inline u32 SN_DELTA(u32 x, u32 y)
  75{
  76	return x >= y ? x - y : y - x;
  77}
  78
  79#define net_traversal_jiffies(s) \
  80	msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
  81#define default_lifetime(s) \
  82	MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
  83#define min_preq_int_jiff(s) \
  84	(msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
  85#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
  86#define disc_timeout_jiff(s) \
  87	msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
  88#define root_path_confirmation_jiffies(s) \
  89	msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval)
  90
  91enum mpath_frame_type {
  92	MPATH_PREQ = 0,
  93	MPATH_PREP,
  94	MPATH_PERR,
  95	MPATH_RANN
  96};
  97
  98static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  99
 100static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
 101				  const u8 *orig_addr, u32 orig_sn,
 102				  u8 target_flags, const u8 *target,
 103				  u32 target_sn, const u8 *da,
 104				  u8 hop_count, u8 ttl,
 105				  u32 lifetime, u32 metric, u32 preq_id,
 106				  struct ieee80211_sub_if_data *sdata)
 107{
 108	struct ieee80211_local *local = sdata->local;
 109	struct sk_buff *skb;
 110	struct ieee80211_mgmt *mgmt;
 111	u8 *pos, ie_len;
 112	int hdr_len = offsetofend(struct ieee80211_mgmt,
 113				  u.action.u.mesh_action);
 114
 115	skb = dev_alloc_skb(local->tx_headroom +
 116			    hdr_len +
 117			    2 + 37); /* max HWMP IE */
 118	if (!skb)
 119		return -1;
 120	skb_reserve(skb, local->tx_headroom);
 121	mgmt = skb_put_zero(skb, hdr_len);
 
 122	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 123					  IEEE80211_STYPE_ACTION);
 124
 125	memcpy(mgmt->da, da, ETH_ALEN);
 126	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
 127	/* BSSID == SA */
 128	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
 129	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
 130	mgmt->u.action.u.mesh_action.action_code =
 131					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
 132
 133	switch (action) {
 134	case MPATH_PREQ:
 135		mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
 136		ie_len = 37;
 137		pos = skb_put(skb, 2 + ie_len);
 138		*pos++ = WLAN_EID_PREQ;
 139		break;
 140	case MPATH_PREP:
 141		mhwmp_dbg(sdata, "sending PREP to %pM\n", orig_addr);
 142		ie_len = 31;
 143		pos = skb_put(skb, 2 + ie_len);
 144		*pos++ = WLAN_EID_PREP;
 145		break;
 146	case MPATH_RANN:
 147		mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
 148		ie_len = sizeof(struct ieee80211_rann_ie);
 149		pos = skb_put(skb, 2 + ie_len);
 150		*pos++ = WLAN_EID_RANN;
 151		break;
 152	default:
 153		kfree_skb(skb);
 154		return -EOPNOTSUPP;
 155	}
 156	*pos++ = ie_len;
 157	*pos++ = flags;
 158	*pos++ = hop_count;
 159	*pos++ = ttl;
 160	if (action == MPATH_PREP) {
 161		memcpy(pos, target, ETH_ALEN);
 162		pos += ETH_ALEN;
 163		put_unaligned_le32(target_sn, pos);
 164		pos += 4;
 165	} else {
 166		if (action == MPATH_PREQ) {
 167			put_unaligned_le32(preq_id, pos);
 168			pos += 4;
 169		}
 170		memcpy(pos, orig_addr, ETH_ALEN);
 171		pos += ETH_ALEN;
 172		put_unaligned_le32(orig_sn, pos);
 173		pos += 4;
 174	}
 175	put_unaligned_le32(lifetime, pos); /* interval for RANN */
 176	pos += 4;
 177	put_unaligned_le32(metric, pos);
 178	pos += 4;
 179	if (action == MPATH_PREQ) {
 180		*pos++ = 1; /* destination count */
 181		*pos++ = target_flags;
 182		memcpy(pos, target, ETH_ALEN);
 183		pos += ETH_ALEN;
 184		put_unaligned_le32(target_sn, pos);
 185		pos += 4;
 186	} else if (action == MPATH_PREP) {
 187		memcpy(pos, orig_addr, ETH_ALEN);
 188		pos += ETH_ALEN;
 189		put_unaligned_le32(orig_sn, pos);
 190		pos += 4;
 191	}
 192
 193	ieee80211_tx_skb(sdata, skb);
 194	return 0;
 195}
 196
 197
 198/*  Headroom is not adjusted.  Caller should ensure that skb has sufficient
 199 *  headroom in case the frame is encrypted. */
 200static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
 201		struct sk_buff *skb)
 202{
 203	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 204	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 205
 206	skb_reset_mac_header(skb);
 207	skb_reset_network_header(skb);
 208	skb_reset_transport_header(skb);
 209
 210	/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
 211	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
 212	skb->priority = 7;
 213
 214	info->control.vif = &sdata->vif;
 215	info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
 216	ieee80211_set_qos_hdr(sdata, skb);
 217	ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
 218}
 219
 220/**
 221 * mesh_path_error_tx - Sends a PERR mesh management frame
 222 *
 223 * @ttl: allowed remaining hops
 224 * @target: broken destination
 225 * @target_sn: SN of the broken destination
 226 * @target_rcode: reason code for this PERR
 227 * @ra: node this frame is addressed to
 228 * @sdata: local mesh subif
 229 *
 230 * Note: This function may be called with driver locks taken that the driver
 231 * also acquires in the TX path.  To avoid a deadlock we don't transmit the
 232 * frame directly but add it to the pending queue instead.
 233 *
 234 * Returns: 0 on success
 235 */
 236int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
 237		       u8 ttl, const u8 *target, u32 target_sn,
 238		       u16 target_rcode, const u8 *ra)
 239{
 240	struct ieee80211_local *local = sdata->local;
 241	struct sk_buff *skb;
 242	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 243	struct ieee80211_mgmt *mgmt;
 244	u8 *pos, ie_len;
 245	int hdr_len = offsetofend(struct ieee80211_mgmt,
 246				  u.action.u.mesh_action);
 247
 248	if (time_before(jiffies, ifmsh->next_perr))
 249		return -EAGAIN;
 250
 251	skb = dev_alloc_skb(local->tx_headroom +
 252			    IEEE80211_ENCRYPT_HEADROOM +
 253			    IEEE80211_ENCRYPT_TAILROOM +
 254			    hdr_len +
 255			    2 + 15 /* PERR IE */);
 256	if (!skb)
 257		return -1;
 258	skb_reserve(skb, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM);
 259	mgmt = skb_put_zero(skb, hdr_len);
 
 260	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 261					  IEEE80211_STYPE_ACTION);
 262
 263	memcpy(mgmt->da, ra, ETH_ALEN);
 264	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
 265	/* BSSID == SA */
 266	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
 267	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
 268	mgmt->u.action.u.mesh_action.action_code =
 269					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
 270	ie_len = 15;
 271	pos = skb_put(skb, 2 + ie_len);
 272	*pos++ = WLAN_EID_PERR;
 273	*pos++ = ie_len;
 274	/* ttl */
 275	*pos++ = ttl;
 276	/* number of destinations */
 277	*pos++ = 1;
 278	/* Flags field has AE bit only as defined in
 279	 * sec 8.4.2.117 IEEE802.11-2012
 280	 */
 281	*pos = 0;
 282	pos++;
 283	memcpy(pos, target, ETH_ALEN);
 284	pos += ETH_ALEN;
 285	put_unaligned_le32(target_sn, pos);
 286	pos += 4;
 287	put_unaligned_le16(target_rcode, pos);
 288
 289	/* see note in function header */
 290	prepare_frame_for_deferred_tx(sdata, skb);
 291	ifmsh->next_perr = TU_TO_EXP_TIME(
 292				   ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
 293	ieee80211_add_pending_skb(local, skb);
 294	return 0;
 295}
 296
 297void ieee80211s_update_metric(struct ieee80211_local *local,
 298			      struct sta_info *sta,
 299			      struct ieee80211_tx_status *st)
 300{
 301	struct ieee80211_tx_info *txinfo = st->info;
 
 302	int failed;
 303	struct rate_info rinfo;
 
 
 304
 305	failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
 306
 307	/* moving average, scaled to 100.
 308	 * feed failure as 100 and success as 0
 309	 */
 310	ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, failed * 100);
 311	if (ewma_mesh_fail_avg_read(&sta->mesh->fail_avg) >
 312			LINK_FAIL_THRESH)
 313		mesh_plink_broken(sta);
 314
 315	/* use rate info set by the driver directly if present */
 316	if (st->n_rates)
 317		rinfo = sta->deflink.tx_stats.last_rate_info;
 318	else
 319		sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate, &rinfo);
 320
 321	ewma_mesh_tx_rate_avg_add(&sta->mesh->tx_rate_avg,
 322				  cfg80211_calculate_bitrate(&rinfo));
 323}
 324
 325u32 airtime_link_metric_get(struct ieee80211_local *local,
 326			    struct sta_info *sta)
 327{
 
 328	/* This should be adjusted for each device */
 329	int device_constant = 1 << ARITH_SHIFT;
 330	int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
 331	int s_unit = 1 << ARITH_SHIFT;
 332	int rate, err;
 333	u32 tx_time, estimated_retx;
 334	u64 result;
 335	unsigned long fail_avg =
 336		ewma_mesh_fail_avg_read(&sta->mesh->fail_avg);
 337
 338	if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
 339		return MAX_METRIC;
 340
 341	/* Try to get rate based on HW/SW RC algorithm.
 342	 * Rate is returned in units of Kbps, correct this
 343	 * to comply with airtime calculation units
 344	 * Round up in case we get rate < 100Kbps
 345	 */
 346	rate = DIV_ROUND_UP(sta_get_expected_throughput(sta), 100);
 347
 348	if (rate) {
 349		err = 0;
 350	} else {
 351		if (fail_avg > LINK_FAIL_THRESH)
 352			return MAX_METRIC;
 353
 354		rate = ewma_mesh_tx_rate_avg_read(&sta->mesh->tx_rate_avg);
 
 355		if (WARN_ON(!rate))
 356			return MAX_METRIC;
 357
 358		err = (fail_avg << ARITH_SHIFT) / 100;
 359	}
 360
 361	/* bitrate is in units of 100 Kbps, while we need rate in units of
 362	 * 1Mbps. This will be corrected on tx_time computation.
 363	 */
 364	tx_time = (device_constant + 10 * test_frame_len / rate);
 365	estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
 366	result = ((u64)tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
 367	return (u32)result;
 368}
 369
 370/**
 371 * hwmp_route_info_get - Update routing info to originator and transmitter
 372 *
 373 * @sdata: local mesh subif
 374 * @mgmt: mesh management frame
 375 * @hwmp_ie: hwmp information element (PREP or PREQ)
 376 * @action: type of hwmp ie
 377 *
 378 * This function updates the path routing information to the originator and the
 379 * transmitter of a HWMP PREQ or PREP frame.
 380 *
 381 * Returns: metric to frame originator or 0 if the frame should not be further
 382 * processed
 383 *
 384 * Notes: this function is the only place (besides user-provided info) where
 385 * path routing information is updated.
 386 */
 387static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
 388			       struct ieee80211_mgmt *mgmt,
 389			       const u8 *hwmp_ie, enum mpath_frame_type action)
 390{
 391	struct ieee80211_local *local = sdata->local;
 392	struct mesh_path *mpath;
 393	struct sta_info *sta;
 394	bool fresh_info;
 395	const u8 *orig_addr, *ta;
 396	u32 orig_sn, orig_metric;
 397	unsigned long orig_lifetime, exp_time;
 398	u32 last_hop_metric, new_metric;
 399	bool flush_mpath = false;
 400	bool process = true;
 401	u8 hopcount;
 402
 403	rcu_read_lock();
 404	sta = sta_info_get(sdata, mgmt->sa);
 405	if (!sta) {
 406		rcu_read_unlock();
 407		return 0;
 408	}
 409
 410	last_hop_metric = airtime_link_metric_get(local, sta);
 411	/* Update and check originator routing info */
 412	fresh_info = true;
 413
 414	switch (action) {
 415	case MPATH_PREQ:
 416		orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
 417		orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
 418		orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
 419		orig_metric = PREQ_IE_METRIC(hwmp_ie);
 420		hopcount = PREQ_IE_HOPCOUNT(hwmp_ie) + 1;
 421		break;
 422	case MPATH_PREP:
 423		/* Originator here refers to the MP that was the target in the
 424		 * Path Request. We divert from the nomenclature in the draft
 425		 * so that we can easily use a single function to gather path
 426		 * information from both PREQ and PREP frames.
 427		 */
 428		orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
 429		orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
 430		orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
 431		orig_metric = PREP_IE_METRIC(hwmp_ie);
 432		hopcount = PREP_IE_HOPCOUNT(hwmp_ie) + 1;
 433		break;
 434	default:
 435		rcu_read_unlock();
 436		return 0;
 437	}
 438	new_metric = orig_metric + last_hop_metric;
 439	if (new_metric < orig_metric)
 440		new_metric = MAX_METRIC;
 441	exp_time = TU_TO_EXP_TIME(orig_lifetime);
 442
 443	if (ether_addr_equal(orig_addr, sdata->vif.addr)) {
 444		/* This MP is the originator, we are not interested in this
 445		 * frame, except for updating transmitter's path info.
 446		 */
 447		process = false;
 448		fresh_info = false;
 449	} else {
 450		mpath = mesh_path_lookup(sdata, orig_addr);
 451		if (mpath) {
 452			spin_lock_bh(&mpath->state_lock);
 453			if (mpath->flags & MESH_PATH_FIXED)
 454				fresh_info = false;
 455			else if ((mpath->flags & MESH_PATH_ACTIVE) &&
 456			    (mpath->flags & MESH_PATH_SN_VALID)) {
 457				if (SN_GT(mpath->sn, orig_sn) ||
 458				    (mpath->sn == orig_sn &&
 459				     (rcu_access_pointer(mpath->next_hop) !=
 460						      sta ?
 461					      mult_frac(new_metric, 10, 9) :
 462					      new_metric) >= mpath->metric)) {
 463					process = false;
 464					fresh_info = false;
 465				}
 466			} else if (!(mpath->flags & MESH_PATH_ACTIVE)) {
 467				bool have_sn, newer_sn, bounced;
 468
 469				have_sn = mpath->flags & MESH_PATH_SN_VALID;
 470				newer_sn = have_sn && SN_GT(orig_sn, mpath->sn);
 471				bounced = have_sn &&
 472					  (SN_DELTA(orig_sn, mpath->sn) >
 473							MAX_SANE_SN_DELTA);
 474
 475				if (!have_sn || newer_sn) {
 476					/* if SN is newer than what we had
 477					 * then we can take it */;
 478				} else if (bounced) {
 479					/* if SN is way different than what
 480					 * we had then assume the other side
 481					 * rebooted or restarted */;
 482				} else {
 483					process = false;
 484					fresh_info = false;
 485				}
 486			}
 487		} else {
 488			mpath = mesh_path_add(sdata, orig_addr);
 489			if (IS_ERR(mpath)) {
 490				rcu_read_unlock();
 491				return 0;
 492			}
 493			spin_lock_bh(&mpath->state_lock);
 494		}
 495
 496		if (fresh_info) {
 497			if (rcu_access_pointer(mpath->next_hop) != sta) {
 498				mpath->path_change_count++;
 499				flush_mpath = true;
 500			}
 501			mesh_path_assign_nexthop(mpath, sta);
 502			mpath->flags |= MESH_PATH_SN_VALID;
 503			mpath->metric = new_metric;
 504			mpath->sn = orig_sn;
 505			mpath->exp_time = time_after(mpath->exp_time, exp_time)
 506					  ?  mpath->exp_time : exp_time;
 507			mpath->hop_count = hopcount;
 508			mesh_path_activate(mpath);
 509			spin_unlock_bh(&mpath->state_lock);
 510			if (flush_mpath)
 511				mesh_fast_tx_flush_mpath(mpath);
 512			ewma_mesh_fail_avg_init(&sta->mesh->fail_avg);
 513			/* init it at a low value - 0 start is tricky */
 514			ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1);
 515			mesh_path_tx_pending(mpath);
 516			/* draft says preq_id should be saved to, but there does
 517			 * not seem to be any use for it, skipping by now
 518			 */
 519		} else
 520			spin_unlock_bh(&mpath->state_lock);
 521	}
 522
 523	/* Update and check transmitter routing info */
 524	ta = mgmt->sa;
 525	if (ether_addr_equal(orig_addr, ta))
 526		fresh_info = false;
 527	else {
 528		fresh_info = true;
 529
 530		mpath = mesh_path_lookup(sdata, ta);
 531		if (mpath) {
 532			spin_lock_bh(&mpath->state_lock);
 533			if ((mpath->flags & MESH_PATH_FIXED) ||
 534			    ((mpath->flags & MESH_PATH_ACTIVE) &&
 535			     ((rcu_access_pointer(mpath->next_hop) != sta ?
 536				       mult_frac(last_hop_metric, 10, 9) :
 537				       last_hop_metric) > mpath->metric)))
 538				fresh_info = false;
 539		} else {
 540			mpath = mesh_path_add(sdata, ta);
 541			if (IS_ERR(mpath)) {
 542				rcu_read_unlock();
 543				return 0;
 544			}
 545			spin_lock_bh(&mpath->state_lock);
 546		}
 547
 548		if (fresh_info) {
 549			if (rcu_access_pointer(mpath->next_hop) != sta) {
 550				mpath->path_change_count++;
 551				flush_mpath = true;
 552			}
 553			mesh_path_assign_nexthop(mpath, sta);
 554			mpath->metric = last_hop_metric;
 555			mpath->exp_time = time_after(mpath->exp_time, exp_time)
 556					  ?  mpath->exp_time : exp_time;
 557			mpath->hop_count = 1;
 558			mesh_path_activate(mpath);
 559			spin_unlock_bh(&mpath->state_lock);
 560			if (flush_mpath)
 561				mesh_fast_tx_flush_mpath(mpath);
 562			ewma_mesh_fail_avg_init(&sta->mesh->fail_avg);
 563			/* init it at a low value - 0 start is tricky */
 564			ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1);
 565			mesh_path_tx_pending(mpath);
 566		} else
 567			spin_unlock_bh(&mpath->state_lock);
 568	}
 569
 570	rcu_read_unlock();
 571
 572	return process ? new_metric : 0;
 573}
 574
 575static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
 576				    struct ieee80211_mgmt *mgmt,
 577				    const u8 *preq_elem, u32 orig_metric)
 578{
 579	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 580	struct mesh_path *mpath = NULL;
 581	const u8 *target_addr, *orig_addr;
 582	const u8 *da;
 583	u8 target_flags, ttl, flags;
 584	u32 orig_sn, target_sn, lifetime, target_metric = 0;
 585	bool reply = false;
 586	bool forward = true;
 587	bool root_is_gate;
 588
 589	/* Update target SN, if present */
 590	target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
 591	orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
 592	target_sn = PREQ_IE_TARGET_SN(preq_elem);
 593	orig_sn = PREQ_IE_ORIG_SN(preq_elem);
 594	target_flags = PREQ_IE_TARGET_F(preq_elem);
 595	/* Proactive PREQ gate announcements */
 596	flags = PREQ_IE_FLAGS(preq_elem);
 597	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
 598
 599	mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr);
 600
 601	if (ether_addr_equal(target_addr, sdata->vif.addr)) {
 602		mhwmp_dbg(sdata, "PREQ is for us\n");
 603		forward = false;
 604		reply = true;
 605		target_metric = 0;
 606
 607		if (SN_GT(target_sn, ifmsh->sn))
 608			ifmsh->sn = target_sn;
 609
 610		if (time_after(jiffies, ifmsh->last_sn_update +
 611					net_traversal_jiffies(sdata)) ||
 612		    time_before(jiffies, ifmsh->last_sn_update)) {
 613			++ifmsh->sn;
 614			ifmsh->last_sn_update = jiffies;
 615		}
 616		target_sn = ifmsh->sn;
 617	} else if (is_broadcast_ether_addr(target_addr) &&
 618		   (target_flags & IEEE80211_PREQ_TO_FLAG)) {
 619		rcu_read_lock();
 620		mpath = mesh_path_lookup(sdata, orig_addr);
 621		if (mpath) {
 622			if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
 623				reply = true;
 624				target_addr = sdata->vif.addr;
 625				target_sn = ++ifmsh->sn;
 626				target_metric = 0;
 627				ifmsh->last_sn_update = jiffies;
 628			}
 629			if (root_is_gate)
 630				mesh_path_add_gate(mpath);
 631		}
 632		rcu_read_unlock();
 633	} else {
 634		rcu_read_lock();
 635		mpath = mesh_path_lookup(sdata, target_addr);
 636		if (mpath) {
 637			if ((!(mpath->flags & MESH_PATH_SN_VALID)) ||
 638					SN_LT(mpath->sn, target_sn)) {
 639				mpath->sn = target_sn;
 640				mpath->flags |= MESH_PATH_SN_VALID;
 641			} else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) &&
 642					(mpath->flags & MESH_PATH_ACTIVE)) {
 643				reply = true;
 644				target_metric = mpath->metric;
 645				target_sn = mpath->sn;
 646				/* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/
 647				target_flags |= IEEE80211_PREQ_TO_FLAG;
 648			}
 649		}
 650		rcu_read_unlock();
 651	}
 652
 653	if (reply) {
 654		lifetime = PREQ_IE_LIFETIME(preq_elem);
 655		ttl = ifmsh->mshcfg.element_ttl;
 656		if (ttl != 0) {
 657			mhwmp_dbg(sdata, "replying to the PREQ\n");
 658			mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
 659					       orig_sn, 0, target_addr,
 660					       target_sn, mgmt->sa, 0, ttl,
 661					       lifetime, target_metric, 0,
 662					       sdata);
 663		} else {
 664			ifmsh->mshstats.dropped_frames_ttl++;
 665		}
 666	}
 667
 668	if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
 669		u32 preq_id;
 670		u8 hopcount;
 671
 672		ttl = PREQ_IE_TTL(preq_elem);
 673		lifetime = PREQ_IE_LIFETIME(preq_elem);
 674		if (ttl <= 1) {
 675			ifmsh->mshstats.dropped_frames_ttl++;
 676			return;
 677		}
 678		mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr);
 679		--ttl;
 680		preq_id = PREQ_IE_PREQ_ID(preq_elem);
 681		hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
 682		da = (mpath && mpath->is_root) ?
 683			mpath->rann_snd_addr : broadcast_addr;
 684
 685		if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
 686			target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
 687			target_sn = PREQ_IE_TARGET_SN(preq_elem);
 688		}
 689
 690		mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
 691				       orig_sn, target_flags, target_addr,
 692				       target_sn, da, hopcount, ttl, lifetime,
 693				       orig_metric, preq_id, sdata);
 694		if (!is_multicast_ether_addr(da))
 695			ifmsh->mshstats.fwded_unicast++;
 696		else
 697			ifmsh->mshstats.fwded_mcast++;
 698		ifmsh->mshstats.fwded_frames++;
 699	}
 700}
 701
 702
 703static inline struct sta_info *
 704next_hop_deref_protected(struct mesh_path *mpath)
 705{
 706	return rcu_dereference_protected(mpath->next_hop,
 707					 lockdep_is_held(&mpath->state_lock));
 708}
 709
 710
 711static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
 712				    struct ieee80211_mgmt *mgmt,
 713				    const u8 *prep_elem, u32 metric)
 714{
 715	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 716	struct mesh_path *mpath;
 717	const u8 *target_addr, *orig_addr;
 718	u8 ttl, hopcount, flags;
 719	u8 next_hop[ETH_ALEN];
 720	u32 target_sn, orig_sn, lifetime;
 721
 722	mhwmp_dbg(sdata, "received PREP from %pM\n",
 723		  PREP_IE_TARGET_ADDR(prep_elem));
 724
 725	orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
 726	if (ether_addr_equal(orig_addr, sdata->vif.addr))
 727		/* destination, no forwarding required */
 728		return;
 729
 730	if (!ifmsh->mshcfg.dot11MeshForwarding)
 731		return;
 732
 733	ttl = PREP_IE_TTL(prep_elem);
 734	if (ttl <= 1) {
 735		sdata->u.mesh.mshstats.dropped_frames_ttl++;
 736		return;
 737	}
 738
 739	rcu_read_lock();
 740	mpath = mesh_path_lookup(sdata, orig_addr);
 741	if (mpath)
 742		spin_lock_bh(&mpath->state_lock);
 743	else
 744		goto fail;
 745	if (!(mpath->flags & MESH_PATH_ACTIVE)) {
 746		spin_unlock_bh(&mpath->state_lock);
 747		goto fail;
 748	}
 749	memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
 750	spin_unlock_bh(&mpath->state_lock);
 751	--ttl;
 752	flags = PREP_IE_FLAGS(prep_elem);
 753	lifetime = PREP_IE_LIFETIME(prep_elem);
 754	hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
 755	target_addr = PREP_IE_TARGET_ADDR(prep_elem);
 756	target_sn = PREP_IE_TARGET_SN(prep_elem);
 757	orig_sn = PREP_IE_ORIG_SN(prep_elem);
 758
 759	mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, orig_sn, 0,
 760			       target_addr, target_sn, next_hop, hopcount,
 761			       ttl, lifetime, metric, 0, sdata);
 762	rcu_read_unlock();
 763
 764	sdata->u.mesh.mshstats.fwded_unicast++;
 765	sdata->u.mesh.mshstats.fwded_frames++;
 766	return;
 767
 768fail:
 769	rcu_read_unlock();
 770	sdata->u.mesh.mshstats.dropped_frames_no_route++;
 771}
 772
 773static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
 774				    struct ieee80211_mgmt *mgmt,
 775				    const u8 *perr_elem)
 776{
 777	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 778	struct mesh_path *mpath;
 779	u8 ttl;
 780	const u8 *ta, *target_addr;
 781	u32 target_sn;
 782	u16 target_rcode;
 783
 784	ta = mgmt->sa;
 785	ttl = PERR_IE_TTL(perr_elem);
 786	if (ttl <= 1) {
 787		ifmsh->mshstats.dropped_frames_ttl++;
 788		return;
 789	}
 790	ttl--;
 791	target_addr = PERR_IE_TARGET_ADDR(perr_elem);
 792	target_sn = PERR_IE_TARGET_SN(perr_elem);
 793	target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
 794
 795	rcu_read_lock();
 796	mpath = mesh_path_lookup(sdata, target_addr);
 797	if (mpath) {
 798		struct sta_info *sta;
 799
 800		spin_lock_bh(&mpath->state_lock);
 801		sta = next_hop_deref_protected(mpath);
 802		if (mpath->flags & MESH_PATH_ACTIVE &&
 803		    ether_addr_equal(ta, sta->sta.addr) &&
 804		    !(mpath->flags & MESH_PATH_FIXED) &&
 805		    (!(mpath->flags & MESH_PATH_SN_VALID) ||
 806		    SN_GT(target_sn, mpath->sn)  || target_sn == 0)) {
 807			mpath->flags &= ~MESH_PATH_ACTIVE;
 808			if (target_sn != 0)
 809				mpath->sn = target_sn;
 810			else
 811				mpath->sn += 1;
 812			spin_unlock_bh(&mpath->state_lock);
 813			if (!ifmsh->mshcfg.dot11MeshForwarding)
 814				goto endperr;
 815			mesh_path_error_tx(sdata, ttl, target_addr,
 816					   target_sn, target_rcode,
 817					   broadcast_addr);
 818		} else
 819			spin_unlock_bh(&mpath->state_lock);
 820	}
 821endperr:
 822	rcu_read_unlock();
 823}
 824
 825static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
 826				    struct ieee80211_mgmt *mgmt,
 827				    const struct ieee80211_rann_ie *rann)
 828{
 829	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 830	struct ieee80211_local *local = sdata->local;
 831	struct sta_info *sta;
 832	struct mesh_path *mpath;
 833	u8 ttl, flags, hopcount;
 834	const u8 *orig_addr;
 835	u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
 836	bool root_is_gate;
 837
 838	ttl = rann->rann_ttl;
 839	flags = rann->rann_flags;
 840	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
 841	orig_addr = rann->rann_addr;
 842	orig_sn = le32_to_cpu(rann->rann_seq);
 843	interval = le32_to_cpu(rann->rann_interval);
 844	hopcount = rann->rann_hopcount;
 845	hopcount++;
 846	orig_metric = le32_to_cpu(rann->rann_metric);
 847
 848	/*  Ignore our own RANNs */
 849	if (ether_addr_equal(orig_addr, sdata->vif.addr))
 850		return;
 851
 852	mhwmp_dbg(sdata,
 853		  "received RANN from %pM via neighbour %pM (is_gate=%d)\n",
 854		  orig_addr, mgmt->sa, root_is_gate);
 855
 856	rcu_read_lock();
 857	sta = sta_info_get(sdata, mgmt->sa);
 858	if (!sta) {
 859		rcu_read_unlock();
 860		return;
 861	}
 862
 863	last_hop_metric = airtime_link_metric_get(local, sta);
 864	new_metric = orig_metric + last_hop_metric;
 865	if (new_metric < orig_metric)
 866		new_metric = MAX_METRIC;
 867
 868	mpath = mesh_path_lookup(sdata, orig_addr);
 869	if (!mpath) {
 870		mpath = mesh_path_add(sdata, orig_addr);
 871		if (IS_ERR(mpath)) {
 872			rcu_read_unlock();
 873			sdata->u.mesh.mshstats.dropped_frames_no_route++;
 874			return;
 875		}
 876	}
 877
 878	if (!(SN_LT(mpath->sn, orig_sn)) &&
 879	    !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
 880		rcu_read_unlock();
 881		return;
 882	}
 883
 884	if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
 885	     (time_after(jiffies, mpath->last_preq_to_root +
 886				  root_path_confirmation_jiffies(sdata)) ||
 887	     time_before(jiffies, mpath->last_preq_to_root))) &&
 888	     !(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) {
 889		mhwmp_dbg(sdata,
 890			  "time to refresh root mpath %pM\n",
 891			  orig_addr);
 892		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
 893		mpath->last_preq_to_root = jiffies;
 894	}
 895
 896	mpath->sn = orig_sn;
 897	mpath->rann_metric = new_metric;
 898	mpath->is_root = true;
 899	/* Recording RANNs sender address to send individually
 900	 * addressed PREQs destined for root mesh STA */
 901	memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
 902
 903	if (root_is_gate)
 904		mesh_path_add_gate(mpath);
 905
 906	if (ttl <= 1) {
 907		ifmsh->mshstats.dropped_frames_ttl++;
 908		rcu_read_unlock();
 909		return;
 910	}
 911	ttl--;
 912
 913	if (ifmsh->mshcfg.dot11MeshForwarding) {
 914		mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
 915				       orig_sn, 0, NULL, 0, broadcast_addr,
 916				       hopcount, ttl, interval,
 917				       new_metric, 0, sdata);
 918	}
 919
 920	rcu_read_unlock();
 921}
 922
 923
 924void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
 925			    struct ieee80211_mgmt *mgmt, size_t len)
 926{
 927	struct ieee802_11_elems *elems;
 928	size_t baselen;
 929	u32 path_metric;
 930	struct sta_info *sta;
 931
 932	/* need action_code */
 933	if (len < IEEE80211_MIN_ACTION_SIZE + 1)
 934		return;
 935
 936	rcu_read_lock();
 937	sta = sta_info_get(sdata, mgmt->sa);
 938	if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
 939		rcu_read_unlock();
 940		return;
 941	}
 942	rcu_read_unlock();
 943
 944	baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
 945	elems = ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
 946				       len - baselen, false, NULL);
 947	if (!elems)
 948		return;
 949
 950	if (elems->preq) {
 951		if (elems->preq_len != 37)
 952			/* Right now we support just 1 destination and no AE */
 953			goto free;
 954		path_metric = hwmp_route_info_get(sdata, mgmt, elems->preq,
 955						  MPATH_PREQ);
 956		if (path_metric)
 957			hwmp_preq_frame_process(sdata, mgmt, elems->preq,
 958						path_metric);
 959	}
 960	if (elems->prep) {
 961		if (elems->prep_len != 31)
 962			/* Right now we support no AE */
 963			goto free;
 964		path_metric = hwmp_route_info_get(sdata, mgmt, elems->prep,
 965						  MPATH_PREP);
 966		if (path_metric)
 967			hwmp_prep_frame_process(sdata, mgmt, elems->prep,
 968						path_metric);
 969	}
 970	if (elems->perr) {
 971		if (elems->perr_len != 15)
 972			/* Right now we support only one destination per PERR */
 973			goto free;
 974		hwmp_perr_frame_process(sdata, mgmt, elems->perr);
 975	}
 976	if (elems->rann)
 977		hwmp_rann_frame_process(sdata, mgmt, elems->rann);
 978free:
 979	kfree(elems);
 980}
 981
 982/**
 983 * mesh_queue_preq - queue a PREQ to a given destination
 984 *
 985 * @mpath: mesh path to discover
 986 * @flags: special attributes of the PREQ to be sent
 987 *
 988 * Locking: the function must be called from within a rcu read lock block.
 989 *
 990 */
 991static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
 992{
 993	struct ieee80211_sub_if_data *sdata = mpath->sdata;
 994	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 995	struct mesh_preq_queue *preq_node;
 996
 997	preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
 998	if (!preq_node) {
 999		mhwmp_dbg(sdata, "could not allocate PREQ node\n");
1000		return;
1001	}
1002
1003	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
1004	if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
1005		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1006		kfree(preq_node);
1007		if (printk_ratelimit())
1008			mhwmp_dbg(sdata, "PREQ node queue full\n");
1009		return;
1010	}
1011
1012	spin_lock(&mpath->state_lock);
1013	if (mpath->flags & MESH_PATH_REQ_QUEUED) {
1014		spin_unlock(&mpath->state_lock);
1015		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1016		kfree(preq_node);
1017		return;
1018	}
1019
1020	memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
1021	preq_node->flags = flags;
1022
1023	mpath->flags |= MESH_PATH_REQ_QUEUED;
1024	spin_unlock(&mpath->state_lock);
1025
1026	list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
1027	++ifmsh->preq_queue_len;
1028	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1029
1030	if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
1031		wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
1032
1033	else if (time_before(jiffies, ifmsh->last_preq)) {
1034		/* avoid long wait if did not send preqs for a long time
1035		 * and jiffies wrapped around
1036		 */
1037		ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
1038		wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
1039	} else
1040		mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
1041						min_preq_int_jiff(sdata));
1042}
1043
1044/**
1045 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
1046 *
1047 * @sdata: local mesh subif
1048 */
1049void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
1050{
1051	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1052	struct mesh_preq_queue *preq_node;
1053	struct mesh_path *mpath;
1054	u8 ttl, target_flags = 0;
1055	const u8 *da;
1056	u32 lifetime;
1057
1058	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
1059	if (!ifmsh->preq_queue_len ||
1060		time_before(jiffies, ifmsh->last_preq +
1061				min_preq_int_jiff(sdata))) {
1062		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1063		return;
1064	}
1065
1066	preq_node = list_first_entry(&ifmsh->preq_queue.list,
1067			struct mesh_preq_queue, list);
1068	list_del(&preq_node->list);
1069	--ifmsh->preq_queue_len;
1070	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1071
1072	rcu_read_lock();
1073	mpath = mesh_path_lookup(sdata, preq_node->dst);
1074	if (!mpath)
1075		goto enddiscovery;
1076
1077	spin_lock_bh(&mpath->state_lock);
1078	if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) {
1079		spin_unlock_bh(&mpath->state_lock);
1080		goto enddiscovery;
1081	}
1082	mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1083	if (preq_node->flags & PREQ_Q_F_START) {
1084		if (mpath->flags & MESH_PATH_RESOLVING) {
1085			spin_unlock_bh(&mpath->state_lock);
1086			goto enddiscovery;
1087		} else {
1088			mpath->flags &= ~MESH_PATH_RESOLVED;
1089			mpath->flags |= MESH_PATH_RESOLVING;
1090			mpath->discovery_retries = 0;
1091			mpath->discovery_timeout = disc_timeout_jiff(sdata);
1092		}
1093	} else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
1094			mpath->flags & MESH_PATH_RESOLVED) {
1095		mpath->flags &= ~MESH_PATH_RESOLVING;
1096		spin_unlock_bh(&mpath->state_lock);
1097		goto enddiscovery;
1098	}
1099
1100	ifmsh->last_preq = jiffies;
1101
1102	if (time_after(jiffies, ifmsh->last_sn_update +
1103				net_traversal_jiffies(sdata)) ||
1104	    time_before(jiffies, ifmsh->last_sn_update)) {
1105		++ifmsh->sn;
1106		sdata->u.mesh.last_sn_update = jiffies;
1107	}
1108	lifetime = default_lifetime(sdata);
1109	ttl = sdata->u.mesh.mshcfg.element_ttl;
1110	if (ttl == 0) {
1111		sdata->u.mesh.mshstats.dropped_frames_ttl++;
1112		spin_unlock_bh(&mpath->state_lock);
1113		goto enddiscovery;
1114	}
1115
1116	if (preq_node->flags & PREQ_Q_F_REFRESH)
1117		target_flags |= IEEE80211_PREQ_TO_FLAG;
1118	else
1119		target_flags &= ~IEEE80211_PREQ_TO_FLAG;
1120
1121	spin_unlock_bh(&mpath->state_lock);
1122	da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
1123	mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
1124			       target_flags, mpath->dst, mpath->sn, da, 0,
1125			       ttl, lifetime, 0, ifmsh->preq_id++, sdata);
1126
1127	spin_lock_bh(&mpath->state_lock);
1128	if (!(mpath->flags & MESH_PATH_DELETED))
1129		mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
1130	spin_unlock_bh(&mpath->state_lock);
1131
1132enddiscovery:
1133	rcu_read_unlock();
1134	kfree(preq_node);
1135}
1136
1137/**
1138 * mesh_nexthop_resolve - lookup next hop; conditionally start path discovery
1139 *
1140 * @skb: 802.11 frame to be sent
1141 * @sdata: network subif the frame will be sent through
1142 *
1143 * Lookup next hop for given skb and start path discovery if no
1144 * forwarding information is found.
1145 *
1146 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
1147 * skb is freed here if no mpath could be allocated.
1148 */
1149int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
1150			 struct sk_buff *skb)
1151{
1152	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1153	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1154	struct mesh_path *mpath;
1155	struct sk_buff *skb_to_free = NULL;
1156	u8 *target_addr = hdr->addr3;
 
1157
1158	/* Nulls are only sent to peers for PS and should be pre-addressed */
1159	if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1160		return 0;
1161
1162	/* Allow injected packets to bypass mesh routing */
1163	if (info->control.flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP)
1164		return 0;
1165
1166	if (!mesh_nexthop_lookup(sdata, skb))
1167		return 0;
1168
1169	/* no nexthop found, start resolving */
1170	mpath = mesh_path_lookup(sdata, target_addr);
1171	if (!mpath) {
1172		mpath = mesh_path_add(sdata, target_addr);
1173		if (IS_ERR(mpath)) {
1174			mesh_path_discard_frame(sdata, skb);
1175			return PTR_ERR(mpath);
 
1176		}
1177	}
1178
1179	if (!(mpath->flags & MESH_PATH_RESOLVING) &&
1180	    mesh_path_sel_is_hwmp(sdata))
1181		mesh_queue_preq(mpath, PREQ_Q_F_START);
1182
1183	if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
1184		skb_to_free = skb_dequeue(&mpath->frame_queue);
1185
1186	info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
1187	ieee80211_set_qos_hdr(sdata, skb);
1188	skb_queue_tail(&mpath->frame_queue, skb);
 
1189	if (skb_to_free)
1190		mesh_path_discard_frame(sdata, skb_to_free);
1191
1192	return -ENOENT;
1193}
1194
1195/**
1196 * mesh_nexthop_lookup_nolearn - try to set next hop without path discovery
1197 * @skb: 802.11 frame to be sent
1198 * @sdata: network subif the frame will be sent through
1199 *
1200 * Check if the meshDA (addr3) of a unicast frame is a direct neighbor.
1201 * And if so, set the RA (addr1) to it to transmit to this node directly,
1202 * avoiding PREQ/PREP path discovery.
1203 *
1204 * Returns: 0 if the next hop was found and -ENOENT otherwise.
1205 */
1206static int mesh_nexthop_lookup_nolearn(struct ieee80211_sub_if_data *sdata,
1207				       struct sk_buff *skb)
1208{
1209	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1210	struct sta_info *sta;
1211
1212	if (is_multicast_ether_addr(hdr->addr1))
1213		return -ENOENT;
1214
1215	rcu_read_lock();
1216	sta = sta_info_get(sdata, hdr->addr3);
1217
1218	if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
1219		rcu_read_unlock();
1220		return -ENOENT;
1221	}
1222	rcu_read_unlock();
1223
1224	memcpy(hdr->addr1, hdr->addr3, ETH_ALEN);
1225	memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
1226	return 0;
1227}
1228
1229void mesh_path_refresh(struct ieee80211_sub_if_data *sdata,
1230		       struct mesh_path *mpath, const u8 *addr)
1231{
1232	if (mpath->flags & (MESH_PATH_REQ_QUEUED | MESH_PATH_FIXED |
1233			    MESH_PATH_RESOLVING))
1234		return;
1235
1236	if (time_after(jiffies,
1237		       mpath->exp_time -
1238		       msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
1239	    (!addr || ether_addr_equal(sdata->vif.addr, addr)))
1240		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
1241}
1242
1243/**
1244 * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
1245 * this function is considered "using" the associated mpath, so preempt a path
1246 * refresh if this mpath expires soon.
1247 *
1248 * @skb: 802.11 frame to be sent
1249 * @sdata: network subif the frame will be sent through
1250 *
1251 * Returns: 0 if the next hop was found. Nonzero otherwise.
1252 */
1253int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata,
1254			struct sk_buff *skb)
1255{
1256	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1257	struct mesh_path *mpath;
1258	struct sta_info *next_hop;
1259	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1260	u8 *target_addr = hdr->addr3;
 
1261
1262	if (ifmsh->mshcfg.dot11MeshNolearn &&
1263	    !mesh_nexthop_lookup_nolearn(sdata, skb))
1264		return 0;
1265
1266	mpath = mesh_path_lookup(sdata, target_addr);
1267	if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
1268		return -ENOENT;
1269
1270	mesh_path_refresh(sdata, mpath, hdr->addr4);
 
 
 
 
 
 
1271
1272	next_hop = rcu_dereference(mpath->next_hop);
1273	if (next_hop) {
1274		memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
1275		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
1276		ieee80211_mps_set_frame_flags(sdata, next_hop, hdr);
1277		if (ieee80211_hw_check(&sdata->local->hw, SUPPORT_FAST_XMIT))
1278			mesh_fast_tx_cache(sdata, skb, mpath);
1279		return 0;
1280	}
1281
1282	return -ENOENT;
 
 
1283}
1284
1285void mesh_path_timer(struct timer_list *t)
1286{
1287	struct mesh_path *mpath = from_timer(mpath, t, timer);
1288	struct ieee80211_sub_if_data *sdata = mpath->sdata;
1289	int ret;
1290
1291	if (sdata->local->quiescing)
1292		return;
1293
1294	spin_lock_bh(&mpath->state_lock);
1295	if (mpath->flags & MESH_PATH_RESOLVED ||
1296			(!(mpath->flags & MESH_PATH_RESOLVING))) {
1297		mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
1298		spin_unlock_bh(&mpath->state_lock);
1299	} else if (mpath->discovery_retries < max_preq_retries(sdata)) {
1300		++mpath->discovery_retries;
1301		mpath->discovery_timeout *= 2;
1302		mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1303		spin_unlock_bh(&mpath->state_lock);
1304		mesh_queue_preq(mpath, 0);
1305	} else {
1306		mpath->flags &= ~(MESH_PATH_RESOLVING |
1307				  MESH_PATH_RESOLVED |
1308				  MESH_PATH_REQ_QUEUED);
1309		mpath->exp_time = jiffies;
1310		spin_unlock_bh(&mpath->state_lock);
1311		if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
1312			ret = mesh_path_send_to_gates(mpath);
1313			if (ret)
1314				mhwmp_dbg(sdata, "no gate was reachable\n");
1315		} else
1316			mesh_path_flush_pending(mpath);
1317	}
1318}
1319
1320void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1321{
1322	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1323	u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
1324	u8 flags, target_flags = 0;
1325
1326	flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
1327			? RANN_FLAG_IS_GATE : 0;
1328
1329	switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) {
1330	case IEEE80211_PROACTIVE_RANN:
1331		mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
1332				       ++ifmsh->sn, 0, NULL, 0, broadcast_addr,
1333				       0, ifmsh->mshcfg.element_ttl,
1334				       interval, 0, 0, sdata);
1335		break;
1336	case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
1337		flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
1338		fallthrough;
1339	case IEEE80211_PROACTIVE_PREQ_NO_PREP:
1340		interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
1341		target_flags |= IEEE80211_PREQ_TO_FLAG |
1342				IEEE80211_PREQ_USN_FLAG;
1343		mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr,
1344				       ++ifmsh->sn, target_flags,
1345				       (u8 *) broadcast_addr, 0, broadcast_addr,
1346				       0, ifmsh->mshcfg.element_ttl, interval,
1347				       0, ifmsh->preq_id++, sdata);
1348		break;
1349	default:
1350		mhwmp_dbg(sdata, "Proactive mechanism not supported\n");
1351		return;
1352	}
1353}