Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 * Copyright (c) 2008, 2009 open80211s Ltd.
  3 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify
  6 * it under the terms of the GNU General Public License version 2 as
  7 * published by the Free Software Foundation.
  8 */
  9
 10#include <linux/etherdevice.h>
 11#include <linux/list.h>
 12#include <linux/random.h>
 13#include <linux/slab.h>
 14#include <linux/spinlock.h>
 15#include <linux/string.h>
 16#include <net/mac80211.h>
 17#include "wme.h"
 18#include "ieee80211_i.h"
 19#include "mesh.h"
 20
 21static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
 
 22
 23static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
 24{
 25	/* Use last four bytes of hw addr as hash index */
 26	return jhash_1word(*(u32 *)(addr+2), seed);
 27}
 28
 29static const struct rhashtable_params mesh_rht_params = {
 30	.nelem_hint = 2,
 31	.automatic_shrinking = true,
 32	.key_len = ETH_ALEN,
 33	.key_offset = offsetof(struct mesh_path, dst),
 34	.head_offset = offsetof(struct mesh_path, rhash),
 35	.hashfn = mesh_table_hash,
 
 
 
 
 36};
 37
 38static inline bool mpath_expired(struct mesh_path *mpath)
 
 
 
 
 
 
 
 
 
 
 
 
 39{
 40	return (mpath->flags & MESH_PATH_ACTIVE) &&
 41	       time_after(jiffies, mpath->exp_time) &&
 42	       !(mpath->flags & MESH_PATH_FIXED);
 43}
 44
 45static void mesh_path_rht_free(void *ptr, void *tblptr)
 46{
 47	struct mesh_path *mpath = ptr;
 48	struct mesh_table *tbl = tblptr;
 49
 50	mesh_path_free_rcu(tbl, mpath);
 51}
 52
 53static struct mesh_table *mesh_table_alloc(void)
 
 
 
 
 
 
 
 
 
 
 
 54{
 
 55	struct mesh_table *newtbl;
 56
 57	newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
 58	if (!newtbl)
 59		return NULL;
 60
 61	INIT_HLIST_HEAD(&newtbl->known_gates);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62	atomic_set(&newtbl->entries,  0);
 63	spin_lock_init(&newtbl->gates_lock);
 
 
 
 64
 65	return newtbl;
 66}
 67
 68static void mesh_table_free(struct mesh_table *tbl)
 69{
 70	rhashtable_free_and_destroy(&tbl->rhead,
 71				    mesh_path_rht_free, tbl);
 72	kfree(tbl);
 73}
 74
 75/**
 76 *
 77 * mesh_path_assign_nexthop - update mesh path next hop
 78 *
 79 * @mpath: mesh path to update
 80 * @sta: next hop to assign
 81 *
 82 * Locking: mpath->state_lock must be held when calling this function
 83 */
 84void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
 85{
 86	struct sk_buff *skb;
 87	struct ieee80211_hdr *hdr;
 88	unsigned long flags;
 89
 90	rcu_assign_pointer(mpath->next_hop, sta);
 91
 92	spin_lock_irqsave(&mpath->frame_queue.lock, flags);
 93	skb_queue_walk(&mpath->frame_queue, skb) {
 94		hdr = (struct ieee80211_hdr *) skb->data;
 95		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
 96		memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
 97		ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
 98	}
 99
100	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
101}
102
103static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
104			     struct mesh_path *gate_mpath)
105{
106	struct ieee80211_hdr *hdr;
107	struct ieee80211s_hdr *mshdr;
108	int mesh_hdrlen, hdrlen;
109	char *next_hop;
110
111	hdr = (struct ieee80211_hdr *) skb->data;
112	hdrlen = ieee80211_hdrlen(hdr->frame_control);
113	mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
114
115	if (!(mshdr->flags & MESH_FLAGS_AE)) {
116		/* size of the fixed part of the mesh header */
117		mesh_hdrlen = 6;
118
119		/* make room for the two extended addresses */
120		skb_push(skb, 2 * ETH_ALEN);
121		memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
122
123		hdr = (struct ieee80211_hdr *) skb->data;
 
 
 
 
 
 
 
 
 
 
 
 
 
124
125		/* we preserve the previous mesh header and only add
126		 * the new addreses */
127		mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
128		mshdr->flags = MESH_FLAGS_AE_A5_A6;
129		memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
130		memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
131	}
 
 
132
133	/* update next hop */
134	hdr = (struct ieee80211_hdr *) skb->data;
135	rcu_read_lock();
136	next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
137	memcpy(hdr->addr1, next_hop, ETH_ALEN);
138	rcu_read_unlock();
139	memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
140	memcpy(hdr->addr3, dst_addr, ETH_ALEN);
141}
142
 
143/**
144 *
145 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
146 *
147 * This function is used to transfer or copy frames from an unresolved mpath to
148 * a gate mpath.  The function also adds the Address Extension field and
149 * updates the next hop.
150 *
151 * If a frame already has an Address Extension field, only the next hop and
152 * destination addresses are updated.
153 *
154 * The gate mpath must be an active mpath with a valid mpath->next_hop.
 
155 *
156 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
157 * @from_mpath: The failed mpath
158 * @copy: When true, copy all the frames to the new mpath queue.  When false,
159 * move them.
160 */
161static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
162				    struct mesh_path *from_mpath,
163				    bool copy)
164{
165	struct sk_buff *skb, *fskb, *tmp;
166	struct sk_buff_head failq;
 
167	unsigned long flags;
168
169	if (WARN_ON(gate_mpath == from_mpath))
170		return;
171	if (WARN_ON(!gate_mpath->next_hop))
172		return;
173
174	__skb_queue_head_init(&failq);
175
176	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
177	skb_queue_splice_init(&from_mpath->frame_queue, &failq);
178	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
179
180	skb_queue_walk_safe(&failq, fskb, tmp) {
181		if (skb_queue_len(&gate_mpath->frame_queue) >=
182				  MESH_FRAME_QUEUE_LEN) {
183			mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
184			break;
185		}
186
187		skb = skb_copy(fskb, GFP_ATOMIC);
188		if (WARN_ON(!skb))
189			break;
190
191		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
192		skb_queue_tail(&gate_mpath->frame_queue, skb);
193
194		if (copy)
195			continue;
196
197		__skb_unlink(fskb, &failq);
198		kfree_skb(fskb);
 
 
199	}
200
201	mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
202		  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
203
204	if (!copy)
205		return;
206
207	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
208	skb_queue_splice(&failq, &from_mpath->frame_queue);
209	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
210}
211
212
213static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
214				      struct ieee80211_sub_if_data *sdata)
215{
216	struct mesh_path *mpath;
217
218	mpath = rhashtable_lookup_fast(&tbl->rhead, dst, mesh_rht_params);
219
220	if (mpath && mpath_expired(mpath)) {
221		spin_lock_bh(&mpath->state_lock);
222		mpath->flags &= ~MESH_PATH_ACTIVE;
223		spin_unlock_bh(&mpath->state_lock);
224	}
225	return mpath;
226}
227
228/**
229 * mesh_path_lookup - look up a path in the mesh path table
230 * @sdata: local subif
231 * @dst: hardware address (ETH_ALEN length) of destination
 
232 *
233 * Returns: pointer to the mesh path structure, or NULL if not found
234 *
235 * Locking: must be called within a read rcu section.
236 */
237struct mesh_path *
238mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
239{
240	return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
241}
 
 
 
242
243struct mesh_path *
244mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
245{
246	return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
 
 
 
 
 
 
 
 
 
 
 
 
 
247}
248
249static struct mesh_path *
250__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
251{
252	int i = 0, ret;
253	struct mesh_path *mpath = NULL;
254	struct rhashtable_iter iter;
255
256	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
257	if (ret)
258		return NULL;
259
260	ret = rhashtable_walk_start(&iter);
261	if (ret && ret != -EAGAIN)
262		goto err;
263
264	while ((mpath = rhashtable_walk_next(&iter))) {
265		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
266			continue;
267		if (IS_ERR(mpath))
268			break;
269		if (i++ == idx)
270			break;
271	}
272err:
273	rhashtable_walk_stop(&iter);
274	rhashtable_walk_exit(&iter);
275
276	if (IS_ERR(mpath) || !mpath)
277		return NULL;
278
279	if (mpath_expired(mpath)) {
280		spin_lock_bh(&mpath->state_lock);
281		mpath->flags &= ~MESH_PATH_ACTIVE;
282		spin_unlock_bh(&mpath->state_lock);
 
 
 
 
 
 
 
 
 
283	}
284	return mpath;
285}
286
 
287/**
288 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
289 * @idx: index
290 * @sdata: local subif, or NULL for all entries
291 *
292 * Returns: pointer to the mesh path structure, or NULL if not found.
293 *
294 * Locking: must be called within a read rcu section.
295 */
296struct mesh_path *
297mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
298{
299	return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
300}
 
 
 
301
302/**
303 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
304 * @idx: index
305 * @sdata: local subif, or NULL for all entries
306 *
307 * Returns: pointer to the proxy path structure, or NULL if not found.
308 *
309 * Locking: must be called within a read rcu section.
310 */
311struct mesh_path *
312mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
313{
314	return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
315}
316
317/**
318 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
319 * @mpath: gate path to add to table
320 */
321int mesh_path_add_gate(struct mesh_path *mpath)
322{
323	struct mesh_table *tbl;
324	int err;
325
326	rcu_read_lock();
327	tbl = mpath->sdata->u.mesh.mesh_paths;
328
329	spin_lock_bh(&mpath->state_lock);
330	if (mpath->is_gate) {
331		err = -EEXIST;
332		spin_unlock_bh(&mpath->state_lock);
333		goto err_rcu;
334	}
335	mpath->is_gate = true;
336	mpath->sdata->u.mesh.num_gates++;
337
338	spin_lock(&tbl->gates_lock);
339	hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
340	spin_unlock(&tbl->gates_lock);
341
342	spin_unlock_bh(&mpath->state_lock);
343
344	mpath_dbg(mpath->sdata,
345		  "Mesh path: Recorded new gate: %pM. %d known gates\n",
346		  mpath->dst, mpath->sdata->u.mesh.num_gates);
347	err = 0;
348err_rcu:
349	rcu_read_unlock();
350	return err;
351}
352
353/**
354 * mesh_gate_del - remove a mesh gate from the list of known gates
355 * @tbl: table which holds our list of known gates
356 * @mpath: gate mpath
 
 
 
 
357 */
358static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
359{
360	lockdep_assert_held(&mpath->state_lock);
361	if (!mpath->is_gate)
362		return;
363
364	mpath->is_gate = false;
365	spin_lock_bh(&tbl->gates_lock);
366	hlist_del_rcu(&mpath->gate_list);
367	mpath->sdata->u.mesh.num_gates--;
368	spin_unlock_bh(&tbl->gates_lock);
 
369
370	mpath_dbg(mpath->sdata,
371		  "Mesh path: Deleted gate: %pM. %d known gates\n",
372		  mpath->dst, mpath->sdata->u.mesh.num_gates);
373}
374
375/**
376 * mesh_gate_num - number of gates known to this interface
377 * @sdata: subif data
378 */
379int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
380{
381	return sdata->u.mesh.num_gates;
382}
383
384static
385struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
386				const u8 *dst, gfp_t gfp_flags)
387{
388	struct mesh_path *new_mpath;
389
390	new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags);
 
391	if (!new_mpath)
392		return NULL;
 
 
 
 
393
 
394	memcpy(new_mpath->dst, dst, ETH_ALEN);
395	eth_broadcast_addr(new_mpath->rann_snd_addr);
396	new_mpath->is_root = false;
397	new_mpath->sdata = sdata;
398	new_mpath->flags = 0;
399	skb_queue_head_init(&new_mpath->frame_queue);
 
400	new_mpath->timer.data = (unsigned long) new_mpath;
401	new_mpath->timer.function = mesh_path_timer;
402	new_mpath->exp_time = jiffies;
403	spin_lock_init(&new_mpath->state_lock);
404	init_timer(&new_mpath->timer);
405
406	return new_mpath;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
407}
408
409/**
410 * mesh_path_add - allocate and add a new path to the mesh path table
411 * @dst: destination address of the path (ETH_ALEN length)
412 * @sdata: local subif
413 *
414 * Returns: 0 on success
415 *
416 * State: the initial state of the new path is set to 0
417 */
418struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
419				const u8 *dst)
420{
421	struct mesh_table *tbl;
422	struct mesh_path *mpath, *new_mpath;
423	int ret;
424
425	if (ether_addr_equal(dst, sdata->vif.addr))
426		/* never add ourselves as neighbours */
427		return ERR_PTR(-ENOTSUPP);
428
429	if (is_multicast_ether_addr(dst))
430		return ERR_PTR(-ENOTSUPP);
 
431
432	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
433		return ERR_PTR(-ENOSPC);
 
 
 
 
 
 
 
 
434
435	new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
436	if (!new_mpath)
437		return ERR_PTR(-ENOMEM);
438
439	tbl = sdata->u.mesh.mesh_paths;
440	do {
441		ret = rhashtable_lookup_insert_fast(&tbl->rhead,
442						    &new_mpath->rhash,
443						    mesh_rht_params);
444
445		if (ret == -EEXIST)
446			mpath = rhashtable_lookup_fast(&tbl->rhead,
447						       dst,
448						       mesh_rht_params);
449
450	} while (unlikely(ret == -EEXIST && !mpath));
451
452	if (ret && ret != -EEXIST)
453		return ERR_PTR(ret);
454
455	/* At this point either new_mpath was added, or we found a
456	 * matching entry already in the table; in the latter case
457	 * free the unnecessary new entry.
458	 */
459	if (ret == -EEXIST) {
460		kfree(new_mpath);
461		new_mpath = mpath;
462	}
463	sdata->u.mesh.mesh_paths_generation++;
464	return new_mpath;
 
 
 
465}
466
467int mpp_path_add(struct ieee80211_sub_if_data *sdata,
468		 const u8 *dst, const u8 *mpp)
469{
 
 
470	struct mesh_table *tbl;
471	struct mesh_path *new_mpath;
472	int ret;
 
 
 
 
 
473
474	if (ether_addr_equal(dst, sdata->vif.addr))
475		/* never add ourselves as neighbours */
476		return -ENOTSUPP;
477
478	if (is_multicast_ether_addr(dst))
479		return -ENOTSUPP;
480
481	new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
482
483	if (!new_mpath)
484		return -ENOMEM;
485
 
 
 
 
 
 
486	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
487	tbl = sdata->u.mesh.mpp_paths;
488	ret = rhashtable_lookup_insert_fast(&tbl->rhead,
489					    &new_mpath->rhash,
490					    mesh_rht_params);
 
 
 
 
491
492	sdata->u.mesh.mpp_paths_generation++;
493	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494}
495
496
497/**
498 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
499 *
500 * @sta: broken peer link
501 *
502 * This function must be called from the rate control algorithm if enough
503 * delivery errors suggest that a peer link is no longer usable.
504 */
505void mesh_plink_broken(struct sta_info *sta)
506{
507	struct ieee80211_sub_if_data *sdata = sta->sdata;
508	struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
509	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
510	struct mesh_path *mpath;
511	struct rhashtable_iter iter;
512	int ret;
513
514	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
515	if (ret)
516		return;
517
518	ret = rhashtable_walk_start(&iter);
519	if (ret && ret != -EAGAIN)
520		goto out;
521
522	while ((mpath = rhashtable_walk_next(&iter))) {
523		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
524			continue;
525		if (IS_ERR(mpath))
526			break;
527		if (rcu_access_pointer(mpath->next_hop) == sta &&
528		    mpath->flags & MESH_PATH_ACTIVE &&
529		    !(mpath->flags & MESH_PATH_FIXED)) {
530			spin_lock_bh(&mpath->state_lock);
531			mpath->flags &= ~MESH_PATH_ACTIVE;
532			++mpath->sn;
533			spin_unlock_bh(&mpath->state_lock);
534			mesh_path_error_tx(sdata,
535				sdata->u.mesh.mshcfg.element_ttl,
536				mpath->dst, mpath->sn,
537				WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
538		}
 
539	}
540out:
541	rhashtable_walk_stop(&iter);
542	rhashtable_walk_exit(&iter);
543}
544
545static void mesh_path_free_rcu(struct mesh_table *tbl,
546			       struct mesh_path *mpath)
547{
548	struct ieee80211_sub_if_data *sdata = mpath->sdata;
549
550	spin_lock_bh(&mpath->state_lock);
551	mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED;
552	mesh_gate_del(tbl, mpath);
553	spin_unlock_bh(&mpath->state_lock);
554	del_timer_sync(&mpath->timer);
555	atomic_dec(&sdata->u.mesh.mpaths);
556	atomic_dec(&tbl->entries);
557	kfree_rcu(mpath, rcu);
558}
559
560static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
561{
562	rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
563	mesh_path_free_rcu(tbl, mpath);
564}
565
566/**
567 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
568 *
569 * @sta: mesh peer to match
570 *
571 * RCU notes: this function is called when a mesh plink transitions from
572 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
573 * allows path creation. This will happen before the sta can be freed (because
574 * sta_info_destroy() calls this) so any reader in a rcu read block will be
575 * protected against the plink disappearing.
576 */
577void mesh_path_flush_by_nexthop(struct sta_info *sta)
578{
579	struct ieee80211_sub_if_data *sdata = sta->sdata;
580	struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
581	struct mesh_path *mpath;
582	struct rhashtable_iter iter;
583	int ret;
584
585	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
586	if (ret)
587		return;
588
589	ret = rhashtable_walk_start(&iter);
590	if (ret && ret != -EAGAIN)
591		goto out;
592
593	while ((mpath = rhashtable_walk_next(&iter))) {
594		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
595			continue;
596		if (IS_ERR(mpath))
597			break;
598
599		if (rcu_access_pointer(mpath->next_hop) == sta)
600			__mesh_path_del(tbl, mpath);
601	}
602out:
603	rhashtable_walk_stop(&iter);
604	rhashtable_walk_exit(&iter);
605}
606
607static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
608			       const u8 *proxy)
609{
610	struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
611	struct mesh_path *mpath;
612	struct rhashtable_iter iter;
613	int ret;
614
615	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
616	if (ret)
617		return;
618
619	ret = rhashtable_walk_start(&iter);
620	if (ret && ret != -EAGAIN)
621		goto out;
622
623	while ((mpath = rhashtable_walk_next(&iter))) {
624		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
625			continue;
626		if (IS_ERR(mpath))
627			break;
628
629		if (ether_addr_equal(mpath->mpp, proxy))
630			__mesh_path_del(tbl, mpath);
631	}
632out:
633	rhashtable_walk_stop(&iter);
634	rhashtable_walk_exit(&iter);
635}
636
637static void table_flush_by_iface(struct mesh_table *tbl)
638{
 
639	struct mesh_path *mpath;
640	struct rhashtable_iter iter;
641	int ret;
642
643	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
644	if (ret)
645		return;
646
647	ret = rhashtable_walk_start(&iter);
648	if (ret && ret != -EAGAIN)
649		goto out;
650
651	while ((mpath = rhashtable_walk_next(&iter))) {
652		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
653			continue;
654		if (IS_ERR(mpath))
655			break;
656		__mesh_path_del(tbl, mpath);
657	}
658out:
659	rhashtable_walk_stop(&iter);
660	rhashtable_walk_exit(&iter);
661}
662
663/**
664 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
665 *
666 * This function deletes both mesh paths as well as mesh portal paths.
667 *
668 * @sdata: interface data to match
669 *
670 */
671void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
672{
673	table_flush_by_iface(sdata->u.mesh.mesh_paths);
674	table_flush_by_iface(sdata->u.mesh.mpp_paths);
675}
676
677/**
678 * table_path_del - delete a path from the mesh or mpp table
679 *
680 * @tbl: mesh or mpp path table
681 * @sdata: local subif
682 * @addr: dst address (ETH_ALEN length)
683 *
684 * Returns: 0 if successful
685 */
686static int table_path_del(struct mesh_table *tbl,
687			  struct ieee80211_sub_if_data *sdata,
688			  const u8 *addr)
689{
690	struct mesh_path *mpath;
691
692	rcu_read_lock();
693	mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
694	if (!mpath) {
695		rcu_read_unlock();
696		return -ENXIO;
697	}
698
699	__mesh_path_del(tbl, mpath);
700	rcu_read_unlock();
701	return 0;
 
702}
703
704
705/**
706 * mesh_path_del - delete a mesh path from the table
707 *
708 * @addr: dst address (ETH_ALEN length)
709 * @sdata: local subif
710 *
711 * Returns: 0 if successful
712 */
713int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
714{
715	int err;
716
717	/* flush relevant mpp entries first */
718	mpp_flush_by_proxy(sdata, addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
719
720	err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
721	sdata->u.mesh.mesh_paths_generation++;
 
 
 
722	return err;
723}
724
725/**
726 * mesh_path_tx_pending - sends pending frames in a mesh path queue
727 *
728 * @mpath: mesh path to activate
729 *
730 * Locking: the state_lock of the mpath structure must NOT be held when calling
731 * this function.
732 */
733void mesh_path_tx_pending(struct mesh_path *mpath)
734{
735	if (mpath->flags & MESH_PATH_ACTIVE)
736		ieee80211_add_pending_skbs(mpath->sdata->local,
737				&mpath->frame_queue);
738}
739
740/**
741 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
742 *
743 * @mpath: mesh path whose queue will be emptied
744 *
745 * If there is only one gate, the frames are transferred from the failed mpath
746 * queue to that gate's queue.  If there are more than one gates, the frames
747 * are copied from each gate to the next.  After frames are copied, the
748 * mpath queues are emptied onto the transmission queue.
749 */
750int mesh_path_send_to_gates(struct mesh_path *mpath)
751{
752	struct ieee80211_sub_if_data *sdata = mpath->sdata;
753	struct mesh_table *tbl;
754	struct mesh_path *from_mpath = mpath;
755	struct mesh_path *gate;
756	bool copy = false;
757
758	tbl = sdata->u.mesh.mesh_paths;
759
760	rcu_read_lock();
761	hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
762		if (gate->flags & MESH_PATH_ACTIVE) {
763			mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
764			mesh_path_move_to_queue(gate, from_mpath, copy);
765			from_mpath = gate;
766			copy = true;
767		} else {
768			mpath_dbg(sdata,
769				  "Not forwarding to %pM (flags %#x)\n",
770				  gate->dst, gate->flags);
771		}
772	}
773
774	hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
775		mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
776		mesh_path_tx_pending(gate);
777	}
778	rcu_read_unlock();
779
780	return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
781}
782
783/**
784 * mesh_path_discard_frame - discard a frame whose path could not be resolved
785 *
786 * @skb: frame to discard
787 * @sdata: network subif the frame was to be sent through
788 *
 
 
 
 
 
789 * Locking: the function must me called within a rcu_read_lock region
790 */
791void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
792			     struct sk_buff *skb)
793{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
794	kfree_skb(skb);
795	sdata->u.mesh.mshstats.dropped_frames_no_route++;
796}
797
798/**
799 * mesh_path_flush_pending - free the pending queue of a mesh path
800 *
801 * @mpath: mesh path whose queue has to be freed
802 *
803 * Locking: the function must me called within a rcu_read_lock region
804 */
805void mesh_path_flush_pending(struct mesh_path *mpath)
806{
807	struct sk_buff *skb;
808
809	while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
810		mesh_path_discard_frame(mpath->sdata, skb);
 
811}
812
813/**
814 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
815 *
816 * @mpath: the mesh path to modify
817 * @next_hop: the next hop to force
818 *
819 * Locking: this function must be called holding mpath->state_lock
820 */
821void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
822{
823	spin_lock_bh(&mpath->state_lock);
824	mesh_path_assign_nexthop(mpath, next_hop);
825	mpath->sn = 0xffff;
826	mpath->metric = 0;
827	mpath->hop_count = 0;
828	mpath->exp_time = 0;
829	mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
830	mesh_path_activate(mpath);
831	spin_unlock_bh(&mpath->state_lock);
832	mesh_path_tx_pending(mpath);
833}
834
835int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
836{
837	struct mesh_table *tbl_path, *tbl_mpp;
838	int ret;
839
840	tbl_path = mesh_table_alloc();
841	if (!tbl_path)
842		return -ENOMEM;
 
 
 
843
844	tbl_mpp = mesh_table_alloc();
845	if (!tbl_mpp) {
846		ret = -ENOMEM;
847		goto free_path;
848	}
849
850	rhashtable_init(&tbl_path->rhead, &mesh_rht_params);
851	rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params);
852
853	sdata->u.mesh.mesh_paths = tbl_path;
854	sdata->u.mesh.mpp_paths = tbl_mpp;
 
855
856	return 0;
857
858free_path:
859	mesh_table_free(tbl_path);
860	return ret;
861}
862
863static
864void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
865			  struct mesh_table *tbl)
866{
 
867	struct mesh_path *mpath;
868	struct rhashtable_iter iter;
869	int ret;
 
870
871	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
872	if (ret)
873		return;
874
875	ret = rhashtable_walk_start(&iter);
876	if (ret && ret != -EAGAIN)
877		goto out;
878
879	while ((mpath = rhashtable_walk_next(&iter))) {
880		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
881			continue;
882		if (IS_ERR(mpath))
883			break;
884		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
885		    (!(mpath->flags & MESH_PATH_FIXED)) &&
886		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
887			__mesh_path_del(tbl, mpath);
 
 
 
888	}
889out:
890	rhashtable_walk_stop(&iter);
891	rhashtable_walk_exit(&iter);
892}
893
894void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
895{
896	mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
897	mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
898}
899
900void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
901{
902	mesh_table_free(sdata->u.mesh.mesh_paths);
903	mesh_table_free(sdata->u.mesh.mpp_paths);
 
904}
v3.1
  1/*
  2 * Copyright (c) 2008, 2009 open80211s Ltd.
  3 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify
  6 * it under the terms of the GNU General Public License version 2 as
  7 * published by the Free Software Foundation.
  8 */
  9
 10#include <linux/etherdevice.h>
 11#include <linux/list.h>
 12#include <linux/random.h>
 13#include <linux/slab.h>
 14#include <linux/spinlock.h>
 15#include <linux/string.h>
 16#include <net/mac80211.h>
 
 17#include "ieee80211_i.h"
 18#include "mesh.h"
 19
 20/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
 21#define INIT_PATHS_SIZE_ORDER	2
 22
 23/* Keep the mean chain length below this constant */
 24#define MEAN_CHAIN_LEN		2
 
 
 
 25
 26#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
 27				time_after(jiffies, mpath->exp_time) && \
 28				!(mpath->flags & MESH_PATH_FIXED))
 29
 30struct mpath_node {
 31	struct hlist_node list;
 32	struct rcu_head rcu;
 33	/* This indirection allows two different tables to point to the same
 34	 * mesh_path structure, useful when resizing
 35	 */
 36	struct mesh_path *mpath;
 37};
 38
 39static struct mesh_table __rcu *mesh_paths;
 40static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
 41
 42int mesh_paths_generation;
 43
 44/* This lock will have the grow table function as writer and add / delete nodes
 45 * as readers. When reading the table (i.e. doing lookups) we are well protected
 46 * by RCU
 47 */
 48static DEFINE_RWLOCK(pathtbl_resize_lock);
 49
 50
 51static inline struct mesh_table *resize_dereference_mesh_paths(void)
 52{
 53	return rcu_dereference_protected(mesh_paths,
 54		lockdep_is_held(&pathtbl_resize_lock));
 
 55}
 56
 57static inline struct mesh_table *resize_dereference_mpp_paths(void)
 58{
 59	return rcu_dereference_protected(mpp_paths,
 60		lockdep_is_held(&pathtbl_resize_lock));
 
 
 61}
 62
 63/*
 64 * CAREFUL -- "tbl" must not be an expression,
 65 * in particular not an rcu_dereference(), since
 66 * it's used twice. So it is illegal to do
 67 *	for_each_mesh_entry(rcu_dereference(...), ...)
 68 */
 69#define for_each_mesh_entry(tbl, p, node, i) \
 70	for (i = 0; i <= tbl->hash_mask; i++) \
 71		hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
 72
 73
 74static struct mesh_table *mesh_table_alloc(int size_order)
 75{
 76	int i;
 77	struct mesh_table *newtbl;
 78
 79	newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
 80	if (!newtbl)
 81		return NULL;
 82
 83	newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
 84			(1 << size_order), GFP_ATOMIC);
 85
 86	if (!newtbl->hash_buckets) {
 87		kfree(newtbl);
 88		return NULL;
 89	}
 90
 91	newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
 92			(1 << size_order), GFP_ATOMIC);
 93	if (!newtbl->hashwlock) {
 94		kfree(newtbl->hash_buckets);
 95		kfree(newtbl);
 96		return NULL;
 97	}
 98
 99	newtbl->size_order = size_order;
100	newtbl->hash_mask = (1 << size_order) - 1;
101	atomic_set(&newtbl->entries,  0);
102	get_random_bytes(&newtbl->hash_rnd,
103			sizeof(newtbl->hash_rnd));
104	for (i = 0; i <= newtbl->hash_mask; i++)
105		spin_lock_init(&newtbl->hashwlock[i]);
106
107	return newtbl;
108}
109
110static void __mesh_table_free(struct mesh_table *tbl)
111{
112	kfree(tbl->hash_buckets);
113	kfree(tbl->hashwlock);
114	kfree(tbl);
115}
116
117static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
 
 
 
 
 
 
 
 
 
118{
119	struct hlist_head *mesh_hash;
120	struct hlist_node *p, *q;
121	int i;
122
123	mesh_hash = tbl->hash_buckets;
124	for (i = 0; i <= tbl->hash_mask; i++) {
125		spin_lock_bh(&tbl->hashwlock[i]);
126		hlist_for_each_safe(p, q, &mesh_hash[i]) {
127			tbl->free_node(p, free_leafs);
128			atomic_dec(&tbl->entries);
129		}
130		spin_unlock_bh(&tbl->hashwlock[i]);
131	}
132	__mesh_table_free(tbl);
 
133}
134
135static int mesh_table_grow(struct mesh_table *oldtbl,
136			   struct mesh_table *newtbl)
137{
138	struct hlist_head *oldhash;
139	struct hlist_node *p, *q;
140	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
141
142	if (atomic_read(&oldtbl->entries)
143			< oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
144		return -EAGAIN;
145
146	newtbl->free_node = oldtbl->free_node;
147	newtbl->mean_chain_len = oldtbl->mean_chain_len;
148	newtbl->copy_node = oldtbl->copy_node;
149	atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
150
151	oldhash = oldtbl->hash_buckets;
152	for (i = 0; i <= oldtbl->hash_mask; i++)
153		hlist_for_each(p, &oldhash[i])
154			if (oldtbl->copy_node(p, newtbl) < 0)
155				goto errcopy;
156
157	return 0;
158
159errcopy:
160	for (i = 0; i <= newtbl->hash_mask; i++) {
161		hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
162			oldtbl->free_node(p, 0);
163	}
164	return -ENOMEM;
165}
166
167static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
168			   struct mesh_table *tbl)
169{
170	/* Use last four bytes of hw addr and interface index as hash index */
171	return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
172		& tbl->hash_mask;
 
 
173}
174
175
176/**
177 *
178 * mesh_path_assign_nexthop - update mesh path next hop
 
 
 
 
 
 
 
179 *
180 * @mpath: mesh path to update
181 * @sta: next hop to assign
182 *
183 * Locking: mpath->state_lock must be held when calling this function
 
 
 
184 */
185void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
 
 
186{
187	struct sk_buff *skb;
188	struct ieee80211_hdr *hdr;
189	struct sk_buff_head tmpq;
190	unsigned long flags;
191
192	rcu_assign_pointer(mpath->next_hop, sta);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
194	__skb_queue_head_init(&tmpq);
 
195
196	spin_lock_irqsave(&mpath->frame_queue.lock, flags);
 
197
198	while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
199		hdr = (struct ieee80211_hdr *) skb->data;
200		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
201		__skb_queue_tail(&tmpq, skb);
202	}
203
204	skb_queue_splice(&tmpq, &mpath->frame_queue);
205	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
 
 
 
 
 
 
 
206}
207
208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209/**
210 * mesh_path_lookup - look up a path in the mesh path table
 
211 * @dst: hardware address (ETH_ALEN length) of destination
212 * @sdata: local subif
213 *
214 * Returns: pointer to the mesh path structure, or NULL if not found
215 *
216 * Locking: must be called within a read rcu section.
217 */
218struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
 
219{
220	struct mesh_path *mpath;
221	struct hlist_node *n;
222	struct hlist_head *bucket;
223	struct mesh_table *tbl;
224	struct mpath_node *node;
225
226	tbl = rcu_dereference(mesh_paths);
227
228	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
229	hlist_for_each_entry_rcu(node, n, bucket, list) {
230		mpath = node->mpath;
231		if (mpath->sdata == sdata &&
232				memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
233			if (MPATH_EXPIRED(mpath)) {
234				spin_lock_bh(&mpath->state_lock);
235				if (MPATH_EXPIRED(mpath))
236					mpath->flags &= ~MESH_PATH_ACTIVE;
237				spin_unlock_bh(&mpath->state_lock);
238			}
239			return mpath;
240		}
241	}
242	return NULL;
243}
244
245struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
 
246{
247	struct mesh_path *mpath;
248	struct hlist_node *n;
249	struct hlist_head *bucket;
250	struct mesh_table *tbl;
251	struct mpath_node *node;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
253	tbl = rcu_dereference(mpp_paths);
 
254
255	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
256	hlist_for_each_entry_rcu(node, n, bucket, list) {
257		mpath = node->mpath;
258		if (mpath->sdata == sdata &&
259		    memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
260			if (MPATH_EXPIRED(mpath)) {
261				spin_lock_bh(&mpath->state_lock);
262				if (MPATH_EXPIRED(mpath))
263					mpath->flags &= ~MESH_PATH_ACTIVE;
264				spin_unlock_bh(&mpath->state_lock);
265			}
266			return mpath;
267		}
268	}
269	return NULL;
270}
271
272
273/**
274 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
275 * @idx: index
276 * @sdata: local subif, or NULL for all entries
277 *
278 * Returns: pointer to the mesh path structure, or NULL if not found.
279 *
280 * Locking: must be called within a read rcu section.
281 */
282struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
 
283{
284	struct mesh_table *tbl = rcu_dereference(mesh_paths);
285	struct mpath_node *node;
286	struct hlist_node *p;
287	int i;
288	int j = 0;
289
290	for_each_mesh_entry(tbl, p, node, i) {
291		if (sdata && node->mpath->sdata != sdata)
292			continue;
293		if (j++ == idx) {
294			if (MPATH_EXPIRED(node->mpath)) {
295				spin_lock_bh(&node->mpath->state_lock);
296				if (MPATH_EXPIRED(node->mpath))
297					node->mpath->flags &= ~MESH_PATH_ACTIVE;
298				spin_unlock_bh(&node->mpath->state_lock);
299			}
300			return node->mpath;
301		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302	}
 
 
 
 
 
 
303
304	return NULL;
 
 
 
 
 
 
 
 
305}
306
307/**
308 * mesh_path_add - allocate and add a new path to the mesh path table
309 * @addr: destination address of the path (ETH_ALEN length)
310 * @sdata: local subif
311 *
312 * Returns: 0 on success
313 *
314 * State: the initial state of the new path is set to 0
315 */
316int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
317{
318	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
319	struct ieee80211_local *local = sdata->local;
320	struct mesh_table *tbl;
321	struct mesh_path *mpath, *new_mpath;
322	struct mpath_node *node, *new_node;
323	struct hlist_head *bucket;
324	struct hlist_node *n;
325	int grow = 0;
326	int err = 0;
327	u32 hash_idx;
328
329	if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
330		/* never add ourselves as neighbours */
331		return -ENOTSUPP;
 
332
333	if (is_multicast_ether_addr(dst))
334		return -ENOTSUPP;
 
 
 
 
 
 
335
336	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
337		return -ENOSPC;
 
 
 
338
339	err = -ENOMEM;
340	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
341	if (!new_mpath)
342		goto err_path_alloc;
343
344	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
345	if (!new_node)
346		goto err_node_alloc;
347
348	read_lock_bh(&pathtbl_resize_lock);
349	memcpy(new_mpath->dst, dst, ETH_ALEN);
 
 
350	new_mpath->sdata = sdata;
351	new_mpath->flags = 0;
352	skb_queue_head_init(&new_mpath->frame_queue);
353	new_node->mpath = new_mpath;
354	new_mpath->timer.data = (unsigned long) new_mpath;
355	new_mpath->timer.function = mesh_path_timer;
356	new_mpath->exp_time = jiffies;
357	spin_lock_init(&new_mpath->state_lock);
358	init_timer(&new_mpath->timer);
359
360	tbl = resize_dereference_mesh_paths();
361
362	hash_idx = mesh_table_hash(dst, sdata, tbl);
363	bucket = &tbl->hash_buckets[hash_idx];
364
365	spin_lock_bh(&tbl->hashwlock[hash_idx]);
366
367	err = -EEXIST;
368	hlist_for_each_entry(node, n, bucket, list) {
369		mpath = node->mpath;
370		if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
371			goto err_exists;
372	}
373
374	hlist_add_head_rcu(&new_node->list, bucket);
375	if (atomic_inc_return(&tbl->entries) >=
376	    tbl->mean_chain_len * (tbl->hash_mask + 1))
377		grow = 1;
378
379	mesh_paths_generation++;
380
381	spin_unlock_bh(&tbl->hashwlock[hash_idx]);
382	read_unlock_bh(&pathtbl_resize_lock);
383	if (grow) {
384		set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
385		ieee80211_queue_work(&local->hw, &sdata->work);
386	}
387	return 0;
388
389err_exists:
390	spin_unlock_bh(&tbl->hashwlock[hash_idx]);
391	read_unlock_bh(&pathtbl_resize_lock);
392	kfree(new_node);
393err_node_alloc:
394	kfree(new_mpath);
395err_path_alloc:
396	atomic_dec(&sdata->u.mesh.mpaths);
397	return err;
398}
399
400static void mesh_table_free_rcu(struct rcu_head *rcu)
 
 
 
 
 
 
 
 
 
 
401{
402	struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
 
 
403
404	mesh_table_free(tbl, false);
405}
 
406
407void mesh_mpath_table_grow(void)
408{
409	struct mesh_table *oldtbl, *newtbl;
410
411	write_lock_bh(&pathtbl_resize_lock);
412	oldtbl = resize_dereference_mesh_paths();
413	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
414	if (!newtbl)
415		goto out;
416	if (mesh_table_grow(oldtbl, newtbl) < 0) {
417		__mesh_table_free(newtbl);
418		goto out;
419	}
420	rcu_assign_pointer(mesh_paths, newtbl);
421
422	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
 
 
423
424 out:
425	write_unlock_bh(&pathtbl_resize_lock);
426}
427
428void mesh_mpp_table_grow(void)
429{
430	struct mesh_table *oldtbl, *newtbl;
431
432	write_lock_bh(&pathtbl_resize_lock);
433	oldtbl = resize_dereference_mpp_paths();
434	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
435	if (!newtbl)
436		goto out;
437	if (mesh_table_grow(oldtbl, newtbl) < 0) {
438		__mesh_table_free(newtbl);
439		goto out;
 
 
 
 
 
 
 
440	}
441	rcu_assign_pointer(mpp_paths, newtbl);
442	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
443
444 out:
445	write_unlock_bh(&pathtbl_resize_lock);
446}
447
448int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
 
449{
450	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
451	struct ieee80211_local *local = sdata->local;
452	struct mesh_table *tbl;
453	struct mesh_path *mpath, *new_mpath;
454	struct mpath_node *node, *new_node;
455	struct hlist_head *bucket;
456	struct hlist_node *n;
457	int grow = 0;
458	int err = 0;
459	u32 hash_idx;
460
461	if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
462		/* never add ourselves as neighbours */
463		return -ENOTSUPP;
464
465	if (is_multicast_ether_addr(dst))
466		return -ENOTSUPP;
467
468	err = -ENOMEM;
469	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
470	if (!new_mpath)
471		goto err_path_alloc;
472
473	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
474	if (!new_node)
475		goto err_node_alloc;
476
477	read_lock_bh(&pathtbl_resize_lock);
478	memcpy(new_mpath->dst, dst, ETH_ALEN);
479	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
480	new_mpath->sdata = sdata;
481	new_mpath->flags = 0;
482	skb_queue_head_init(&new_mpath->frame_queue);
483	new_node->mpath = new_mpath;
484	new_mpath->exp_time = jiffies;
485	spin_lock_init(&new_mpath->state_lock);
486
487	tbl = resize_dereference_mpp_paths();
488
489	hash_idx = mesh_table_hash(dst, sdata, tbl);
490	bucket = &tbl->hash_buckets[hash_idx];
491
492	spin_lock_bh(&tbl->hashwlock[hash_idx]);
493
494	err = -EEXIST;
495	hlist_for_each_entry(node, n, bucket, list) {
496		mpath = node->mpath;
497		if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
498			goto err_exists;
499	}
500
501	hlist_add_head_rcu(&new_node->list, bucket);
502	if (atomic_inc_return(&tbl->entries) >=
503	    tbl->mean_chain_len * (tbl->hash_mask + 1))
504		grow = 1;
505
506	spin_unlock_bh(&tbl->hashwlock[hash_idx]);
507	read_unlock_bh(&pathtbl_resize_lock);
508	if (grow) {
509		set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
510		ieee80211_queue_work(&local->hw, &sdata->work);
511	}
512	return 0;
513
514err_exists:
515	spin_unlock_bh(&tbl->hashwlock[hash_idx]);
516	read_unlock_bh(&pathtbl_resize_lock);
517	kfree(new_node);
518err_node_alloc:
519	kfree(new_mpath);
520err_path_alloc:
521	return err;
522}
523
524
525/**
526 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
527 *
528 * @sta: broken peer link
529 *
530 * This function must be called from the rate control algorithm if enough
531 * delivery errors suggest that a peer link is no longer usable.
532 */
533void mesh_plink_broken(struct sta_info *sta)
534{
535	struct mesh_table *tbl;
 
536	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
537	struct mesh_path *mpath;
538	struct mpath_node *node;
539	struct hlist_node *p;
540	struct ieee80211_sub_if_data *sdata = sta->sdata;
541	int i;
 
 
 
 
 
 
542
543	rcu_read_lock();
544	tbl = rcu_dereference(mesh_paths);
545	for_each_mesh_entry(tbl, p, node, i) {
546		mpath = node->mpath;
547		spin_lock_bh(&mpath->state_lock);
548		if (rcu_dereference(mpath->next_hop) == sta &&
549		    mpath->flags & MESH_PATH_ACTIVE &&
550		    !(mpath->flags & MESH_PATH_FIXED)) {
 
551			mpath->flags &= ~MESH_PATH_ACTIVE;
552			++mpath->sn;
553			spin_unlock_bh(&mpath->state_lock);
554			mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
555					mpath->dst, cpu_to_le32(mpath->sn),
556					cpu_to_le16(PERR_RCODE_DEST_UNREACH),
557					bcast, sdata);
558		} else
559		spin_unlock_bh(&mpath->state_lock);
560	}
561	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
562}
563
564/**
565 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
566 *
567 * @sta - mesh peer to match
568 *
569 * RCU notes: this function is called when a mesh plink transitions from
570 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
571 * allows path creation. This will happen before the sta can be freed (because
572 * sta_info_destroy() calls this) so any reader in a rcu read block will be
573 * protected against the plink disappearing.
574 */
575void mesh_path_flush_by_nexthop(struct sta_info *sta)
576{
577	struct mesh_table *tbl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578	struct mesh_path *mpath;
579	struct mpath_node *node;
580	struct hlist_node *p;
581	int i;
 
 
 
582
583	rcu_read_lock();
584	tbl = rcu_dereference(mesh_paths);
585	for_each_mesh_entry(tbl, p, node, i) {
586		mpath = node->mpath;
587		if (rcu_dereference(mpath->next_hop) == sta)
588			mesh_path_del(mpath->dst, mpath->sdata);
 
 
 
 
 
 
589	}
590	rcu_read_unlock();
 
 
591}
592
593void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
594{
595	struct mesh_table *tbl;
596	struct mesh_path *mpath;
597	struct mpath_node *node;
598	struct hlist_node *p;
599	int i;
 
 
 
 
 
 
 
600
601	rcu_read_lock();
602	tbl = rcu_dereference(mesh_paths);
603	for_each_mesh_entry(tbl, p, node, i) {
604		mpath = node->mpath;
605		if (mpath->sdata == sdata)
606			mesh_path_del(mpath->dst, mpath->sdata);
607	}
608	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
609}
610
611static void mesh_path_node_reclaim(struct rcu_head *rp)
 
 
 
 
 
 
 
 
 
 
 
612{
613	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
614	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
 
 
 
 
 
 
615
616	del_timer_sync(&node->mpath->timer);
617	atomic_dec(&sdata->u.mesh.mpaths);
618	kfree(node->mpath);
619	kfree(node);
620}
621
 
622/**
623 * mesh_path_del - delete a mesh path from the table
624 *
625 * @addr: dst address (ETH_ALEN length)
626 * @sdata: local subif
627 *
628 * Returns: 0 if successful
629 */
630int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
631{
632	struct mesh_table *tbl;
633	struct mesh_path *mpath;
634	struct mpath_node *node;
635	struct hlist_head *bucket;
636	struct hlist_node *n;
637	int hash_idx;
638	int err = 0;
639
640	read_lock_bh(&pathtbl_resize_lock);
641	tbl = resize_dereference_mesh_paths();
642	hash_idx = mesh_table_hash(addr, sdata, tbl);
643	bucket = &tbl->hash_buckets[hash_idx];
644
645	spin_lock_bh(&tbl->hashwlock[hash_idx]);
646	hlist_for_each_entry(node, n, bucket, list) {
647		mpath = node->mpath;
648		if (mpath->sdata == sdata &&
649		    memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
650			spin_lock(&mpath->state_lock);
651			mpath->flags |= MESH_PATH_RESOLVING;
652			hlist_del_rcu(&node->list);
653			call_rcu(&node->rcu, mesh_path_node_reclaim);
654			atomic_dec(&tbl->entries);
655			spin_unlock(&mpath->state_lock);
656			goto enddel;
657		}
658	}
659
660	err = -ENXIO;
661enddel:
662	mesh_paths_generation++;
663	spin_unlock_bh(&tbl->hashwlock[hash_idx]);
664	read_unlock_bh(&pathtbl_resize_lock);
665	return err;
666}
667
668/**
669 * mesh_path_tx_pending - sends pending frames in a mesh path queue
670 *
671 * @mpath: mesh path to activate
672 *
673 * Locking: the state_lock of the mpath structure must NOT be held when calling
674 * this function.
675 */
676void mesh_path_tx_pending(struct mesh_path *mpath)
677{
678	if (mpath->flags & MESH_PATH_ACTIVE)
679		ieee80211_add_pending_skbs(mpath->sdata->local,
680				&mpath->frame_queue);
681}
682
683/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
684 * mesh_path_discard_frame - discard a frame whose path could not be resolved
685 *
686 * @skb: frame to discard
687 * @sdata: network subif the frame was to be sent through
688 *
689 * If the frame was being forwarded from another MP, a PERR frame will be sent
690 * to the precursor.  The precursor's address (i.e. the previous hop) was saved
691 * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
692 * the destination is successfully resolved.
693 *
694 * Locking: the function must me called within a rcu_read_lock region
695 */
696void mesh_path_discard_frame(struct sk_buff *skb,
697			     struct ieee80211_sub_if_data *sdata)
698{
699	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
700	struct mesh_path *mpath;
701	u32 sn = 0;
702
703	if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
704		u8 *ra, *da;
705
706		da = hdr->addr3;
707		ra = hdr->addr1;
708		mpath = mesh_path_lookup(da, sdata);
709		if (mpath)
710			sn = ++mpath->sn;
711		mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
712				   cpu_to_le32(sn),
713				   cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
714	}
715
716	kfree_skb(skb);
717	sdata->u.mesh.mshstats.dropped_frames_no_route++;
718}
719
720/**
721 * mesh_path_flush_pending - free the pending queue of a mesh path
722 *
723 * @mpath: mesh path whose queue has to be freed
724 *
725 * Locking: the function must me called within a rcu_read_lock region
726 */
727void mesh_path_flush_pending(struct mesh_path *mpath)
728{
729	struct sk_buff *skb;
730
731	while ((skb = skb_dequeue(&mpath->frame_queue)) &&
732			(mpath->flags & MESH_PATH_ACTIVE))
733		mesh_path_discard_frame(skb, mpath->sdata);
734}
735
736/**
737 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
738 *
739 * @mpath: the mesh path to modify
740 * @next_hop: the next hop to force
741 *
742 * Locking: this function must be called holding mpath->state_lock
743 */
744void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
745{
746	spin_lock_bh(&mpath->state_lock);
747	mesh_path_assign_nexthop(mpath, next_hop);
748	mpath->sn = 0xffff;
749	mpath->metric = 0;
750	mpath->hop_count = 0;
751	mpath->exp_time = 0;
752	mpath->flags |= MESH_PATH_FIXED;
753	mesh_path_activate(mpath);
754	spin_unlock_bh(&mpath->state_lock);
755	mesh_path_tx_pending(mpath);
756}
757
758static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
759{
760	struct mesh_path *mpath;
761	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
762	mpath = node->mpath;
763	hlist_del_rcu(p);
764	if (free_leafs) {
765		del_timer_sync(&mpath->timer);
766		kfree(mpath);
767	}
768	kfree(node);
769}
770
771static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
772{
773	struct mesh_path *mpath;
774	struct mpath_node *node, *new_node;
775	u32 hash_idx;
776
777	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
778	if (new_node == NULL)
779		return -ENOMEM;
780
781	node = hlist_entry(p, struct mpath_node, list);
782	mpath = node->mpath;
783	new_node->mpath = mpath;
784	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
785	hlist_add_head(&new_node->list,
786			&newtbl->hash_buckets[hash_idx]);
787	return 0;
788}
789
790int mesh_pathtbl_init(void)
791{
792	struct mesh_table *tbl_path, *tbl_mpp;
 
793
794	tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
795	if (!tbl_path)
796		return -ENOMEM;
797	tbl_path->free_node = &mesh_path_node_free;
798	tbl_path->copy_node = &mesh_path_node_copy;
799	tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
800
801	tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
802	if (!tbl_mpp) {
803		mesh_table_free(tbl_path, true);
804		return -ENOMEM;
805	}
806	tbl_mpp->free_node = &mesh_path_node_free;
807	tbl_mpp->copy_node = &mesh_path_node_copy;
808	tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
809
810	/* Need no locking since this is during init */
811	RCU_INIT_POINTER(mesh_paths, tbl_path);
812	RCU_INIT_POINTER(mpp_paths, tbl_mpp);
813
814	return 0;
 
 
 
 
815}
816
817void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
 
 
818{
819	struct mesh_table *tbl;
820	struct mesh_path *mpath;
821	struct mpath_node *node;
822	struct hlist_node *p;
823	int i;
824
825	rcu_read_lock();
826	tbl = rcu_dereference(mesh_paths);
827	for_each_mesh_entry(tbl, p, node, i) {
828		if (node->mpath->sdata != sdata)
 
 
 
 
 
 
829			continue;
830		mpath = node->mpath;
831		spin_lock_bh(&mpath->state_lock);
832		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
833		    (!(mpath->flags & MESH_PATH_FIXED)) &&
834		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) {
835			spin_unlock_bh(&mpath->state_lock);
836			mesh_path_del(mpath->dst, mpath->sdata);
837		} else
838			spin_unlock_bh(&mpath->state_lock);
839	}
840	rcu_read_unlock();
 
 
 
 
 
 
 
 
841}
842
843void mesh_pathtbl_unregister(void)
844{
845	/* no need for locking during exit path */
846	mesh_table_free(rcu_dereference_raw(mesh_paths), true);
847	mesh_table_free(rcu_dereference_raw(mpp_paths), true);
848}