Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2008, 2009 open80211s Ltd.
4 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 */
6
7#include <linux/etherdevice.h>
8#include <linux/list.h>
9#include <linux/random.h>
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/string.h>
13#include <net/mac80211.h>
14#include "wme.h"
15#include "ieee80211_i.h"
16#include "mesh.h"
17
18static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
19
20static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
21{
22 /* Use last four bytes of hw addr as hash index */
23 return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
24}
25
26static const struct rhashtable_params mesh_rht_params = {
27 .nelem_hint = 2,
28 .automatic_shrinking = true,
29 .key_len = ETH_ALEN,
30 .key_offset = offsetof(struct mesh_path, dst),
31 .head_offset = offsetof(struct mesh_path, rhash),
32 .hashfn = mesh_table_hash,
33};
34
35static inline bool mpath_expired(struct mesh_path *mpath)
36{
37 return (mpath->flags & MESH_PATH_ACTIVE) &&
38 time_after(jiffies, mpath->exp_time) &&
39 !(mpath->flags & MESH_PATH_FIXED);
40}
41
42static void mesh_path_rht_free(void *ptr, void *tblptr)
43{
44 struct mesh_path *mpath = ptr;
45 struct mesh_table *tbl = tblptr;
46
47 mesh_path_free_rcu(tbl, mpath);
48}
49
50static struct mesh_table *mesh_table_alloc(void)
51{
52 struct mesh_table *newtbl;
53
54 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
55 if (!newtbl)
56 return NULL;
57
58 INIT_HLIST_HEAD(&newtbl->known_gates);
59 INIT_HLIST_HEAD(&newtbl->walk_head);
60 atomic_set(&newtbl->entries, 0);
61 spin_lock_init(&newtbl->gates_lock);
62 spin_lock_init(&newtbl->walk_lock);
63 if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
64 kfree(newtbl);
65 return NULL;
66 }
67
68 return newtbl;
69}
70
71static void mesh_table_free(struct mesh_table *tbl)
72{
73 rhashtable_free_and_destroy(&tbl->rhead,
74 mesh_path_rht_free, tbl);
75 kfree(tbl);
76}
77
78/**
79 * mesh_path_assign_nexthop - update mesh path next hop
80 *
81 * @mpath: mesh path to update
82 * @sta: next hop to assign
83 *
84 * Locking: mpath->state_lock must be held when calling this function
85 */
86void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
87{
88 struct sk_buff *skb;
89 struct ieee80211_hdr *hdr;
90 unsigned long flags;
91
92 rcu_assign_pointer(mpath->next_hop, sta);
93
94 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
95 skb_queue_walk(&mpath->frame_queue, skb) {
96 hdr = (struct ieee80211_hdr *) skb->data;
97 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
98 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
99 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
100 }
101
102 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
103}
104
105static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
106 struct mesh_path *gate_mpath)
107{
108 struct ieee80211_hdr *hdr;
109 struct ieee80211s_hdr *mshdr;
110 int mesh_hdrlen, hdrlen;
111 char *next_hop;
112
113 hdr = (struct ieee80211_hdr *) skb->data;
114 hdrlen = ieee80211_hdrlen(hdr->frame_control);
115 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
116
117 if (!(mshdr->flags & MESH_FLAGS_AE)) {
118 /* size of the fixed part of the mesh header */
119 mesh_hdrlen = 6;
120
121 /* make room for the two extended addresses */
122 skb_push(skb, 2 * ETH_ALEN);
123 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
124
125 hdr = (struct ieee80211_hdr *) skb->data;
126
127 /* we preserve the previous mesh header and only add
128 * the new addresses */
129 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
130 mshdr->flags = MESH_FLAGS_AE_A5_A6;
131 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
132 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
133 }
134
135 /* update next hop */
136 hdr = (struct ieee80211_hdr *) skb->data;
137 rcu_read_lock();
138 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
139 memcpy(hdr->addr1, next_hop, ETH_ALEN);
140 rcu_read_unlock();
141 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
142 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
143}
144
145/**
146 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
147 *
148 * This function is used to transfer or copy frames from an unresolved mpath to
149 * a gate mpath. The function also adds the Address Extension field and
150 * updates the next hop.
151 *
152 * If a frame already has an Address Extension field, only the next hop and
153 * destination addresses are updated.
154 *
155 * The gate mpath must be an active mpath with a valid mpath->next_hop.
156 *
157 * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate)
158 * @from_mpath: The failed mpath
159 * @copy: When true, copy all the frames to the new mpath queue. When false,
160 * move them.
161 */
162static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
163 struct mesh_path *from_mpath,
164 bool copy)
165{
166 struct sk_buff *skb, *fskb, *tmp;
167 struct sk_buff_head failq;
168 unsigned long flags;
169
170 if (WARN_ON(gate_mpath == from_mpath))
171 return;
172 if (WARN_ON(!gate_mpath->next_hop))
173 return;
174
175 __skb_queue_head_init(&failq);
176
177 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
178 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
179 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
180
181 skb_queue_walk_safe(&failq, fskb, tmp) {
182 if (skb_queue_len(&gate_mpath->frame_queue) >=
183 MESH_FRAME_QUEUE_LEN) {
184 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
185 break;
186 }
187
188 skb = skb_copy(fskb, GFP_ATOMIC);
189 if (WARN_ON(!skb))
190 break;
191
192 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
193 skb_queue_tail(&gate_mpath->frame_queue, skb);
194
195 if (copy)
196 continue;
197
198 __skb_unlink(fskb, &failq);
199 kfree_skb(fskb);
200 }
201
202 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
203 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
204
205 if (!copy)
206 return;
207
208 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
209 skb_queue_splice(&failq, &from_mpath->frame_queue);
210 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
211}
212
213
214static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
215 struct ieee80211_sub_if_data *sdata)
216{
217 struct mesh_path *mpath;
218
219 mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params);
220
221 if (mpath && mpath_expired(mpath)) {
222 spin_lock_bh(&mpath->state_lock);
223 mpath->flags &= ~MESH_PATH_ACTIVE;
224 spin_unlock_bh(&mpath->state_lock);
225 }
226 return mpath;
227}
228
229/**
230 * mesh_path_lookup - look up a path in the mesh path table
231 * @sdata: local subif
232 * @dst: hardware address (ETH_ALEN length) of destination
233 *
234 * Returns: pointer to the mesh path structure, or NULL if not found
235 *
236 * Locking: must be called within a read rcu section.
237 */
238struct mesh_path *
239mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
240{
241 return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
242}
243
244struct mesh_path *
245mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
246{
247 return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
248}
249
250static struct mesh_path *
251__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
252{
253 int i = 0;
254 struct mesh_path *mpath;
255
256 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
257 if (i++ == idx)
258 break;
259 }
260
261 if (!mpath)
262 return NULL;
263
264 if (mpath_expired(mpath)) {
265 spin_lock_bh(&mpath->state_lock);
266 mpath->flags &= ~MESH_PATH_ACTIVE;
267 spin_unlock_bh(&mpath->state_lock);
268 }
269 return mpath;
270}
271
272/**
273 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
274 * @idx: index
275 * @sdata: local subif, or NULL for all entries
276 *
277 * Returns: pointer to the mesh path structure, or NULL if not found.
278 *
279 * Locking: must be called within a read rcu section.
280 */
281struct mesh_path *
282mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
283{
284 return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
285}
286
287/**
288 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
289 * @idx: index
290 * @sdata: local subif, or NULL for all entries
291 *
292 * Returns: pointer to the proxy path structure, or NULL if not found.
293 *
294 * Locking: must be called within a read rcu section.
295 */
296struct mesh_path *
297mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
298{
299 return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
300}
301
302/**
303 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
304 * @mpath: gate path to add to table
305 */
306int mesh_path_add_gate(struct mesh_path *mpath)
307{
308 struct mesh_table *tbl;
309 int err;
310
311 rcu_read_lock();
312 tbl = mpath->sdata->u.mesh.mesh_paths;
313
314 spin_lock_bh(&mpath->state_lock);
315 if (mpath->is_gate) {
316 err = -EEXIST;
317 spin_unlock_bh(&mpath->state_lock);
318 goto err_rcu;
319 }
320 mpath->is_gate = true;
321 mpath->sdata->u.mesh.num_gates++;
322
323 spin_lock(&tbl->gates_lock);
324 hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
325 spin_unlock(&tbl->gates_lock);
326
327 spin_unlock_bh(&mpath->state_lock);
328
329 mpath_dbg(mpath->sdata,
330 "Mesh path: Recorded new gate: %pM. %d known gates\n",
331 mpath->dst, mpath->sdata->u.mesh.num_gates);
332 err = 0;
333err_rcu:
334 rcu_read_unlock();
335 return err;
336}
337
338/**
339 * mesh_gate_del - remove a mesh gate from the list of known gates
340 * @tbl: table which holds our list of known gates
341 * @mpath: gate mpath
342 */
343static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
344{
345 lockdep_assert_held(&mpath->state_lock);
346 if (!mpath->is_gate)
347 return;
348
349 mpath->is_gate = false;
350 spin_lock_bh(&tbl->gates_lock);
351 hlist_del_rcu(&mpath->gate_list);
352 mpath->sdata->u.mesh.num_gates--;
353 spin_unlock_bh(&tbl->gates_lock);
354
355 mpath_dbg(mpath->sdata,
356 "Mesh path: Deleted gate: %pM. %d known gates\n",
357 mpath->dst, mpath->sdata->u.mesh.num_gates);
358}
359
360/**
361 * mesh_gate_num - number of gates known to this interface
362 * @sdata: subif data
363 */
364int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
365{
366 return sdata->u.mesh.num_gates;
367}
368
369static
370struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
371 const u8 *dst, gfp_t gfp_flags)
372{
373 struct mesh_path *new_mpath;
374
375 new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags);
376 if (!new_mpath)
377 return NULL;
378
379 memcpy(new_mpath->dst, dst, ETH_ALEN);
380 eth_broadcast_addr(new_mpath->rann_snd_addr);
381 new_mpath->is_root = false;
382 new_mpath->sdata = sdata;
383 new_mpath->flags = 0;
384 skb_queue_head_init(&new_mpath->frame_queue);
385 new_mpath->exp_time = jiffies;
386 spin_lock_init(&new_mpath->state_lock);
387 timer_setup(&new_mpath->timer, mesh_path_timer, 0);
388
389 return new_mpath;
390}
391
392/**
393 * mesh_path_add - allocate and add a new path to the mesh path table
394 * @dst: destination address of the path (ETH_ALEN length)
395 * @sdata: local subif
396 *
397 * Returns: 0 on success
398 *
399 * State: the initial state of the new path is set to 0
400 */
401struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
402 const u8 *dst)
403{
404 struct mesh_table *tbl;
405 struct mesh_path *mpath, *new_mpath;
406
407 if (ether_addr_equal(dst, sdata->vif.addr))
408 /* never add ourselves as neighbours */
409 return ERR_PTR(-ENOTSUPP);
410
411 if (is_multicast_ether_addr(dst))
412 return ERR_PTR(-ENOTSUPP);
413
414 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
415 return ERR_PTR(-ENOSPC);
416
417 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
418 if (!new_mpath)
419 return ERR_PTR(-ENOMEM);
420
421 tbl = sdata->u.mesh.mesh_paths;
422 spin_lock_bh(&tbl->walk_lock);
423 mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
424 &new_mpath->rhash,
425 mesh_rht_params);
426 if (!mpath)
427 hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
428 spin_unlock_bh(&tbl->walk_lock);
429
430 if (mpath) {
431 kfree(new_mpath);
432
433 if (IS_ERR(mpath))
434 return mpath;
435
436 new_mpath = mpath;
437 }
438
439 sdata->u.mesh.mesh_paths_generation++;
440 return new_mpath;
441}
442
443int mpp_path_add(struct ieee80211_sub_if_data *sdata,
444 const u8 *dst, const u8 *mpp)
445{
446 struct mesh_table *tbl;
447 struct mesh_path *new_mpath;
448 int ret;
449
450 if (ether_addr_equal(dst, sdata->vif.addr))
451 /* never add ourselves as neighbours */
452 return -ENOTSUPP;
453
454 if (is_multicast_ether_addr(dst))
455 return -ENOTSUPP;
456
457 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
458
459 if (!new_mpath)
460 return -ENOMEM;
461
462 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
463 tbl = sdata->u.mesh.mpp_paths;
464
465 spin_lock_bh(&tbl->walk_lock);
466 ret = rhashtable_lookup_insert_fast(&tbl->rhead,
467 &new_mpath->rhash,
468 mesh_rht_params);
469 if (!ret)
470 hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
471 spin_unlock_bh(&tbl->walk_lock);
472
473 if (ret)
474 kfree(new_mpath);
475
476 sdata->u.mesh.mpp_paths_generation++;
477 return ret;
478}
479
480
481/**
482 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
483 *
484 * @sta: broken peer link
485 *
486 * This function must be called from the rate control algorithm if enough
487 * delivery errors suggest that a peer link is no longer usable.
488 */
489void mesh_plink_broken(struct sta_info *sta)
490{
491 struct ieee80211_sub_if_data *sdata = sta->sdata;
492 struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
493 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
494 struct mesh_path *mpath;
495
496 rcu_read_lock();
497 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
498 if (rcu_access_pointer(mpath->next_hop) == sta &&
499 mpath->flags & MESH_PATH_ACTIVE &&
500 !(mpath->flags & MESH_PATH_FIXED)) {
501 spin_lock_bh(&mpath->state_lock);
502 mpath->flags &= ~MESH_PATH_ACTIVE;
503 ++mpath->sn;
504 spin_unlock_bh(&mpath->state_lock);
505 mesh_path_error_tx(sdata,
506 sdata->u.mesh.mshcfg.element_ttl,
507 mpath->dst, mpath->sn,
508 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
509 }
510 }
511 rcu_read_unlock();
512}
513
514static void mesh_path_free_rcu(struct mesh_table *tbl,
515 struct mesh_path *mpath)
516{
517 struct ieee80211_sub_if_data *sdata = mpath->sdata;
518
519 spin_lock_bh(&mpath->state_lock);
520 mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED;
521 mesh_gate_del(tbl, mpath);
522 spin_unlock_bh(&mpath->state_lock);
523 del_timer_sync(&mpath->timer);
524 atomic_dec(&sdata->u.mesh.mpaths);
525 atomic_dec(&tbl->entries);
526 mesh_path_flush_pending(mpath);
527 kfree_rcu(mpath, rcu);
528}
529
530static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
531{
532 hlist_del_rcu(&mpath->walk_list);
533 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
534 mesh_path_free_rcu(tbl, mpath);
535}
536
537/**
538 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
539 *
540 * @sta: mesh peer to match
541 *
542 * RCU notes: this function is called when a mesh plink transitions from
543 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
544 * allows path creation. This will happen before the sta can be freed (because
545 * sta_info_destroy() calls this) so any reader in a rcu read block will be
546 * protected against the plink disappearing.
547 */
548void mesh_path_flush_by_nexthop(struct sta_info *sta)
549{
550 struct ieee80211_sub_if_data *sdata = sta->sdata;
551 struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
552 struct mesh_path *mpath;
553 struct hlist_node *n;
554
555 spin_lock_bh(&tbl->walk_lock);
556 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
557 if (rcu_access_pointer(mpath->next_hop) == sta)
558 __mesh_path_del(tbl, mpath);
559 }
560 spin_unlock_bh(&tbl->walk_lock);
561}
562
563static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
564 const u8 *proxy)
565{
566 struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
567 struct mesh_path *mpath;
568 struct hlist_node *n;
569
570 spin_lock_bh(&tbl->walk_lock);
571 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
572 if (ether_addr_equal(mpath->mpp, proxy))
573 __mesh_path_del(tbl, mpath);
574 }
575 spin_unlock_bh(&tbl->walk_lock);
576}
577
578static void table_flush_by_iface(struct mesh_table *tbl)
579{
580 struct mesh_path *mpath;
581 struct hlist_node *n;
582
583 spin_lock_bh(&tbl->walk_lock);
584 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
585 __mesh_path_del(tbl, mpath);
586 }
587 spin_unlock_bh(&tbl->walk_lock);
588}
589
590/**
591 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
592 *
593 * This function deletes both mesh paths as well as mesh portal paths.
594 *
595 * @sdata: interface data to match
596 *
597 */
598void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
599{
600 table_flush_by_iface(sdata->u.mesh.mesh_paths);
601 table_flush_by_iface(sdata->u.mesh.mpp_paths);
602}
603
604/**
605 * table_path_del - delete a path from the mesh or mpp table
606 *
607 * @tbl: mesh or mpp path table
608 * @sdata: local subif
609 * @addr: dst address (ETH_ALEN length)
610 *
611 * Returns: 0 if successful
612 */
613static int table_path_del(struct mesh_table *tbl,
614 struct ieee80211_sub_if_data *sdata,
615 const u8 *addr)
616{
617 struct mesh_path *mpath;
618
619 spin_lock_bh(&tbl->walk_lock);
620 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
621 if (!mpath) {
622 spin_unlock_bh(&tbl->walk_lock);
623 return -ENXIO;
624 }
625
626 __mesh_path_del(tbl, mpath);
627 spin_unlock_bh(&tbl->walk_lock);
628 return 0;
629}
630
631
632/**
633 * mesh_path_del - delete a mesh path from the table
634 *
635 * @addr: dst address (ETH_ALEN length)
636 * @sdata: local subif
637 *
638 * Returns: 0 if successful
639 */
640int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
641{
642 int err;
643
644 /* flush relevant mpp entries first */
645 mpp_flush_by_proxy(sdata, addr);
646
647 err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
648 sdata->u.mesh.mesh_paths_generation++;
649 return err;
650}
651
652/**
653 * mesh_path_tx_pending - sends pending frames in a mesh path queue
654 *
655 * @mpath: mesh path to activate
656 *
657 * Locking: the state_lock of the mpath structure must NOT be held when calling
658 * this function.
659 */
660void mesh_path_tx_pending(struct mesh_path *mpath)
661{
662 if (mpath->flags & MESH_PATH_ACTIVE)
663 ieee80211_add_pending_skbs(mpath->sdata->local,
664 &mpath->frame_queue);
665}
666
667/**
668 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
669 *
670 * @mpath: mesh path whose queue will be emptied
671 *
672 * If there is only one gate, the frames are transferred from the failed mpath
673 * queue to that gate's queue. If there are more than one gates, the frames
674 * are copied from each gate to the next. After frames are copied, the
675 * mpath queues are emptied onto the transmission queue.
676 */
677int mesh_path_send_to_gates(struct mesh_path *mpath)
678{
679 struct ieee80211_sub_if_data *sdata = mpath->sdata;
680 struct mesh_table *tbl;
681 struct mesh_path *from_mpath = mpath;
682 struct mesh_path *gate;
683 bool copy = false;
684
685 tbl = sdata->u.mesh.mesh_paths;
686
687 rcu_read_lock();
688 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
689 if (gate->flags & MESH_PATH_ACTIVE) {
690 mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
691 mesh_path_move_to_queue(gate, from_mpath, copy);
692 from_mpath = gate;
693 copy = true;
694 } else {
695 mpath_dbg(sdata,
696 "Not forwarding to %pM (flags %#x)\n",
697 gate->dst, gate->flags);
698 }
699 }
700
701 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
702 mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
703 mesh_path_tx_pending(gate);
704 }
705 rcu_read_unlock();
706
707 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
708}
709
710/**
711 * mesh_path_discard_frame - discard a frame whose path could not be resolved
712 *
713 * @skb: frame to discard
714 * @sdata: network subif the frame was to be sent through
715 *
716 * Locking: the function must me called within a rcu_read_lock region
717 */
718void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
719 struct sk_buff *skb)
720{
721 kfree_skb(skb);
722 sdata->u.mesh.mshstats.dropped_frames_no_route++;
723}
724
725/**
726 * mesh_path_flush_pending - free the pending queue of a mesh path
727 *
728 * @mpath: mesh path whose queue has to be freed
729 *
730 * Locking: the function must me called within a rcu_read_lock region
731 */
732void mesh_path_flush_pending(struct mesh_path *mpath)
733{
734 struct sk_buff *skb;
735
736 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
737 mesh_path_discard_frame(mpath->sdata, skb);
738}
739
740/**
741 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
742 *
743 * @mpath: the mesh path to modify
744 * @next_hop: the next hop to force
745 *
746 * Locking: this function must be called holding mpath->state_lock
747 */
748void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
749{
750 spin_lock_bh(&mpath->state_lock);
751 mesh_path_assign_nexthop(mpath, next_hop);
752 mpath->sn = 0xffff;
753 mpath->metric = 0;
754 mpath->hop_count = 0;
755 mpath->exp_time = 0;
756 mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
757 mesh_path_activate(mpath);
758 spin_unlock_bh(&mpath->state_lock);
759 ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg);
760 /* init it at a low value - 0 start is tricky */
761 ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1);
762 mesh_path_tx_pending(mpath);
763}
764
765int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
766{
767 struct mesh_table *tbl_path, *tbl_mpp;
768 int ret;
769
770 tbl_path = mesh_table_alloc();
771 if (!tbl_path)
772 return -ENOMEM;
773
774 tbl_mpp = mesh_table_alloc();
775 if (!tbl_mpp) {
776 ret = -ENOMEM;
777 goto free_path;
778 }
779
780 sdata->u.mesh.mesh_paths = tbl_path;
781 sdata->u.mesh.mpp_paths = tbl_mpp;
782
783 return 0;
784
785free_path:
786 mesh_table_free(tbl_path);
787 return ret;
788}
789
790static
791void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
792 struct mesh_table *tbl)
793{
794 struct mesh_path *mpath;
795 struct hlist_node *n;
796
797 spin_lock_bh(&tbl->walk_lock);
798 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
799 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
800 (!(mpath->flags & MESH_PATH_FIXED)) &&
801 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
802 __mesh_path_del(tbl, mpath);
803 }
804 spin_unlock_bh(&tbl->walk_lock);
805}
806
807void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
808{
809 mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
810 mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
811}
812
813void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
814{
815 mesh_table_free(sdata->u.mesh.mesh_paths);
816 mesh_table_free(sdata->u.mesh.mpp_paths);
817}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2008, 2009 open80211s Ltd.
4 * Copyright (C) 2023 Intel Corporation
5 * Author: Luis Carlos Cobo <luisca@cozybit.com>
6 */
7
8#include <linux/etherdevice.h>
9#include <linux/list.h>
10#include <linux/random.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/string.h>
14#include <net/mac80211.h>
15#include "wme.h"
16#include "ieee80211_i.h"
17#include "mesh.h"
18#include <linux/rhashtable.h>
19
20static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
21
22static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
23{
24 /* Use last four bytes of hw addr as hash index */
25 return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
26}
27
28static const struct rhashtable_params mesh_rht_params = {
29 .nelem_hint = 2,
30 .automatic_shrinking = true,
31 .key_len = ETH_ALEN,
32 .key_offset = offsetof(struct mesh_path, dst),
33 .head_offset = offsetof(struct mesh_path, rhash),
34 .hashfn = mesh_table_hash,
35};
36
37static const struct rhashtable_params fast_tx_rht_params = {
38 .nelem_hint = 10,
39 .automatic_shrinking = true,
40 .key_len = sizeof_field(struct ieee80211_mesh_fast_tx, key),
41 .key_offset = offsetof(struct ieee80211_mesh_fast_tx, key),
42 .head_offset = offsetof(struct ieee80211_mesh_fast_tx, rhash),
43 .hashfn = mesh_table_hash,
44};
45
46static void __mesh_fast_tx_entry_free(void *ptr, void *tblptr)
47{
48 struct ieee80211_mesh_fast_tx *entry = ptr;
49
50 kfree_rcu(entry, fast_tx.rcu_head);
51}
52
53static void mesh_fast_tx_deinit(struct ieee80211_sub_if_data *sdata)
54{
55 struct mesh_tx_cache *cache;
56
57 cache = &sdata->u.mesh.tx_cache;
58 rhashtable_free_and_destroy(&cache->rht,
59 __mesh_fast_tx_entry_free, NULL);
60}
61
62static void mesh_fast_tx_init(struct ieee80211_sub_if_data *sdata)
63{
64 struct mesh_tx_cache *cache;
65
66 cache = &sdata->u.mesh.tx_cache;
67 rhashtable_init(&cache->rht, &fast_tx_rht_params);
68 INIT_HLIST_HEAD(&cache->walk_head);
69 spin_lock_init(&cache->walk_lock);
70}
71
72static inline bool mpath_expired(struct mesh_path *mpath)
73{
74 return (mpath->flags & MESH_PATH_ACTIVE) &&
75 time_after(jiffies, mpath->exp_time) &&
76 !(mpath->flags & MESH_PATH_FIXED);
77}
78
79static void mesh_path_rht_free(void *ptr, void *tblptr)
80{
81 struct mesh_path *mpath = ptr;
82 struct mesh_table *tbl = tblptr;
83
84 mesh_path_free_rcu(tbl, mpath);
85}
86
87static void mesh_table_init(struct mesh_table *tbl)
88{
89 INIT_HLIST_HEAD(&tbl->known_gates);
90 INIT_HLIST_HEAD(&tbl->walk_head);
91 atomic_set(&tbl->entries, 0);
92 spin_lock_init(&tbl->gates_lock);
93 spin_lock_init(&tbl->walk_lock);
94
95 /* rhashtable_init() may fail only in case of wrong
96 * mesh_rht_params
97 */
98 WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
99}
100
101static void mesh_table_free(struct mesh_table *tbl)
102{
103 rhashtable_free_and_destroy(&tbl->rhead,
104 mesh_path_rht_free, tbl);
105}
106
107/**
108 * mesh_path_assign_nexthop - update mesh path next hop
109 *
110 * @mpath: mesh path to update
111 * @sta: next hop to assign
112 *
113 * Locking: mpath->state_lock must be held when calling this function
114 */
115void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
116{
117 struct sk_buff *skb;
118 struct ieee80211_hdr *hdr;
119 unsigned long flags;
120
121 rcu_assign_pointer(mpath->next_hop, sta);
122
123 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
124 skb_queue_walk(&mpath->frame_queue, skb) {
125 hdr = (struct ieee80211_hdr *) skb->data;
126 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
127 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
128 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
129 }
130
131 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
132}
133
134static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
135 struct mesh_path *gate_mpath)
136{
137 struct ieee80211_hdr *hdr;
138 struct ieee80211s_hdr *mshdr;
139 int mesh_hdrlen, hdrlen;
140 char *next_hop;
141
142 hdr = (struct ieee80211_hdr *) skb->data;
143 hdrlen = ieee80211_hdrlen(hdr->frame_control);
144 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
145
146 if (!(mshdr->flags & MESH_FLAGS_AE)) {
147 /* size of the fixed part of the mesh header */
148 mesh_hdrlen = 6;
149
150 /* make room for the two extended addresses */
151 skb_push(skb, 2 * ETH_ALEN);
152 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
153
154 hdr = (struct ieee80211_hdr *) skb->data;
155
156 /* we preserve the previous mesh header and only add
157 * the new addresses */
158 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
159 mshdr->flags = MESH_FLAGS_AE_A5_A6;
160 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
161 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
162 }
163
164 /* update next hop */
165 hdr = (struct ieee80211_hdr *) skb->data;
166 rcu_read_lock();
167 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
168 memcpy(hdr->addr1, next_hop, ETH_ALEN);
169 rcu_read_unlock();
170 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
171 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
172}
173
174/**
175 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
176 *
177 * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate)
178 * @from_mpath: The failed mpath
179 * @copy: When true, copy all the frames to the new mpath queue. When false,
180 * move them.
181 *
182 * This function is used to transfer or copy frames from an unresolved mpath to
183 * a gate mpath. The function also adds the Address Extension field and
184 * updates the next hop.
185 *
186 * If a frame already has an Address Extension field, only the next hop and
187 * destination addresses are updated.
188 *
189 * The gate mpath must be an active mpath with a valid mpath->next_hop.
190 */
191static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
192 struct mesh_path *from_mpath,
193 bool copy)
194{
195 struct sk_buff *skb, *fskb, *tmp;
196 struct sk_buff_head failq;
197 unsigned long flags;
198
199 if (WARN_ON(gate_mpath == from_mpath))
200 return;
201 if (WARN_ON(!gate_mpath->next_hop))
202 return;
203
204 __skb_queue_head_init(&failq);
205
206 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
207 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
208 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
209
210 skb_queue_walk_safe(&failq, fskb, tmp) {
211 if (skb_queue_len(&gate_mpath->frame_queue) >=
212 MESH_FRAME_QUEUE_LEN) {
213 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
214 break;
215 }
216
217 skb = skb_copy(fskb, GFP_ATOMIC);
218 if (WARN_ON(!skb))
219 break;
220
221 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
222 skb_queue_tail(&gate_mpath->frame_queue, skb);
223
224 if (copy)
225 continue;
226
227 __skb_unlink(fskb, &failq);
228 kfree_skb(fskb);
229 }
230
231 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
232 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
233
234 if (!copy)
235 return;
236
237 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
238 skb_queue_splice(&failq, &from_mpath->frame_queue);
239 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
240}
241
242
243static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
244 struct ieee80211_sub_if_data *sdata)
245{
246 struct mesh_path *mpath;
247
248 mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params);
249
250 if (mpath && mpath_expired(mpath)) {
251 spin_lock_bh(&mpath->state_lock);
252 mpath->flags &= ~MESH_PATH_ACTIVE;
253 spin_unlock_bh(&mpath->state_lock);
254 }
255 return mpath;
256}
257
258/**
259 * mesh_path_lookup - look up a path in the mesh path table
260 * @sdata: local subif
261 * @dst: hardware address (ETH_ALEN length) of destination
262 *
263 * Returns: pointer to the mesh path structure, or NULL if not found
264 *
265 * Locking: must be called within a read rcu section.
266 */
267struct mesh_path *
268mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
269{
270 return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
271}
272
273struct mesh_path *
274mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
275{
276 return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
277}
278
279static struct mesh_path *
280__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
281{
282 int i = 0;
283 struct mesh_path *mpath;
284
285 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
286 if (i++ == idx)
287 break;
288 }
289
290 if (!mpath)
291 return NULL;
292
293 if (mpath_expired(mpath)) {
294 spin_lock_bh(&mpath->state_lock);
295 mpath->flags &= ~MESH_PATH_ACTIVE;
296 spin_unlock_bh(&mpath->state_lock);
297 }
298 return mpath;
299}
300
301/**
302 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
303 * @sdata: local subif, or NULL for all entries
304 * @idx: index
305 *
306 * Returns: pointer to the mesh path structure, or NULL if not found.
307 *
308 * Locking: must be called within a read rcu section.
309 */
310struct mesh_path *
311mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
312{
313 return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
314}
315
316/**
317 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
318 * @sdata: local subif, or NULL for all entries
319 * @idx: index
320 *
321 * Returns: pointer to the proxy path structure, or NULL if not found.
322 *
323 * Locking: must be called within a read rcu section.
324 */
325struct mesh_path *
326mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
327{
328 return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
329}
330
331/**
332 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
333 * @mpath: gate path to add to table
334 *
335 * Returns: 0 on success, -EEXIST
336 */
337int mesh_path_add_gate(struct mesh_path *mpath)
338{
339 struct mesh_table *tbl;
340 int err;
341
342 rcu_read_lock();
343 tbl = &mpath->sdata->u.mesh.mesh_paths;
344
345 spin_lock_bh(&mpath->state_lock);
346 if (mpath->is_gate) {
347 err = -EEXIST;
348 spin_unlock_bh(&mpath->state_lock);
349 goto err_rcu;
350 }
351 mpath->is_gate = true;
352 mpath->sdata->u.mesh.num_gates++;
353
354 spin_lock(&tbl->gates_lock);
355 hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
356 spin_unlock(&tbl->gates_lock);
357
358 spin_unlock_bh(&mpath->state_lock);
359
360 mpath_dbg(mpath->sdata,
361 "Mesh path: Recorded new gate: %pM. %d known gates\n",
362 mpath->dst, mpath->sdata->u.mesh.num_gates);
363 err = 0;
364err_rcu:
365 rcu_read_unlock();
366 return err;
367}
368
369/**
370 * mesh_gate_del - remove a mesh gate from the list of known gates
371 * @tbl: table which holds our list of known gates
372 * @mpath: gate mpath
373 */
374static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
375{
376 lockdep_assert_held(&mpath->state_lock);
377 if (!mpath->is_gate)
378 return;
379
380 mpath->is_gate = false;
381 spin_lock_bh(&tbl->gates_lock);
382 hlist_del_rcu(&mpath->gate_list);
383 mpath->sdata->u.mesh.num_gates--;
384 spin_unlock_bh(&tbl->gates_lock);
385
386 mpath_dbg(mpath->sdata,
387 "Mesh path: Deleted gate: %pM. %d known gates\n",
388 mpath->dst, mpath->sdata->u.mesh.num_gates);
389}
390
391/**
392 * mesh_gate_num - number of gates known to this interface
393 * @sdata: subif data
394 *
395 * Returns: The number of gates
396 */
397int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
398{
399 return sdata->u.mesh.num_gates;
400}
401
402static
403struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
404 const u8 *dst, gfp_t gfp_flags)
405{
406 struct mesh_path *new_mpath;
407
408 new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags);
409 if (!new_mpath)
410 return NULL;
411
412 memcpy(new_mpath->dst, dst, ETH_ALEN);
413 eth_broadcast_addr(new_mpath->rann_snd_addr);
414 new_mpath->is_root = false;
415 new_mpath->sdata = sdata;
416 new_mpath->flags = 0;
417 skb_queue_head_init(&new_mpath->frame_queue);
418 new_mpath->exp_time = jiffies;
419 spin_lock_init(&new_mpath->state_lock);
420 timer_setup(&new_mpath->timer, mesh_path_timer, 0);
421
422 return new_mpath;
423}
424
425static void mesh_fast_tx_entry_free(struct mesh_tx_cache *cache,
426 struct ieee80211_mesh_fast_tx *entry)
427{
428 hlist_del_rcu(&entry->walk_list);
429 rhashtable_remove_fast(&cache->rht, &entry->rhash, fast_tx_rht_params);
430 kfree_rcu(entry, fast_tx.rcu_head);
431}
432
433struct ieee80211_mesh_fast_tx *
434mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata,
435 struct ieee80211_mesh_fast_tx_key *key)
436{
437 struct ieee80211_mesh_fast_tx *entry;
438 struct mesh_tx_cache *cache;
439
440 cache = &sdata->u.mesh.tx_cache;
441 entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
442 if (!entry)
443 return NULL;
444
445 if (!(entry->mpath->flags & MESH_PATH_ACTIVE) ||
446 mpath_expired(entry->mpath)) {
447 spin_lock_bh(&cache->walk_lock);
448 entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
449 if (entry)
450 mesh_fast_tx_entry_free(cache, entry);
451 spin_unlock_bh(&cache->walk_lock);
452 return NULL;
453 }
454
455 mesh_path_refresh(sdata, entry->mpath, NULL);
456 if (entry->mppath)
457 entry->mppath->exp_time = jiffies;
458 entry->timestamp = jiffies;
459
460 return entry;
461}
462
463void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
464 struct sk_buff *skb, struct mesh_path *mpath)
465{
466 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
467 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
468 struct ieee80211_mesh_fast_tx *entry, *prev;
469 struct ieee80211_mesh_fast_tx build = {};
470 struct ieee80211s_hdr *meshhdr;
471 struct mesh_tx_cache *cache;
472 struct ieee80211_key *key;
473 struct mesh_path *mppath;
474 struct sta_info *sta;
475 u8 *qc;
476
477 if (sdata->noack_map ||
478 !ieee80211_is_data_qos(hdr->frame_control))
479 return;
480
481 build.fast_tx.hdr_len = ieee80211_hdrlen(hdr->frame_control);
482 meshhdr = (struct ieee80211s_hdr *)(skb->data + build.fast_tx.hdr_len);
483 build.hdrlen = ieee80211_get_mesh_hdrlen(meshhdr);
484
485 cache = &sdata->u.mesh.tx_cache;
486 if (atomic_read(&cache->rht.nelems) >= MESH_FAST_TX_CACHE_MAX_SIZE)
487 return;
488
489 sta = rcu_dereference(mpath->next_hop);
490 if (!sta)
491 return;
492
493 build.key.type = MESH_FAST_TX_TYPE_LOCAL;
494 if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) {
495 /* This is required to keep the mppath alive */
496 mppath = mpp_path_lookup(sdata, meshhdr->eaddr1);
497 if (!mppath)
498 return;
499 build.mppath = mppath;
500 if (!ether_addr_equal(meshhdr->eaddr2, sdata->vif.addr))
501 build.key.type = MESH_FAST_TX_TYPE_PROXIED;
502 } else if (ieee80211_has_a4(hdr->frame_control)) {
503 mppath = mpath;
504 } else {
505 return;
506 }
507
508 if (!ether_addr_equal(hdr->addr4, sdata->vif.addr))
509 build.key.type = MESH_FAST_TX_TYPE_FORWARDED;
510
511 /* rate limit, in case fast xmit can't be enabled */
512 if (mppath->fast_tx_check == jiffies)
513 return;
514
515 mppath->fast_tx_check = jiffies;
516
517 /*
518 * Same use of the sta lock as in ieee80211_check_fast_xmit, in order
519 * to protect against concurrent sta key updates.
520 */
521 spin_lock_bh(&sta->lock);
522 key = rcu_access_pointer(sta->ptk[sta->ptk_idx]);
523 if (!key)
524 key = rcu_access_pointer(sdata->default_unicast_key);
525 build.fast_tx.key = key;
526
527 if (key) {
528 bool gen_iv, iv_spc;
529
530 gen_iv = key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
531 iv_spc = key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
532
533 if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
534 (key->flags & KEY_FLAG_TAINTED))
535 goto unlock_sta;
536
537 switch (key->conf.cipher) {
538 case WLAN_CIPHER_SUITE_CCMP:
539 case WLAN_CIPHER_SUITE_CCMP_256:
540 if (gen_iv)
541 build.fast_tx.pn_offs = build.fast_tx.hdr_len;
542 if (gen_iv || iv_spc)
543 build.fast_tx.hdr_len += IEEE80211_CCMP_HDR_LEN;
544 break;
545 case WLAN_CIPHER_SUITE_GCMP:
546 case WLAN_CIPHER_SUITE_GCMP_256:
547 if (gen_iv)
548 build.fast_tx.pn_offs = build.fast_tx.hdr_len;
549 if (gen_iv || iv_spc)
550 build.fast_tx.hdr_len += IEEE80211_GCMP_HDR_LEN;
551 break;
552 default:
553 goto unlock_sta;
554 }
555 }
556
557 memcpy(build.key.addr, mppath->dst, ETH_ALEN);
558 build.timestamp = jiffies;
559 build.fast_tx.band = info->band;
560 build.fast_tx.da_offs = offsetof(struct ieee80211_hdr, addr3);
561 build.fast_tx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
562 build.mpath = mpath;
563 memcpy(build.hdr, meshhdr, build.hdrlen);
564 memcpy(build.hdr + build.hdrlen, rfc1042_header, sizeof(rfc1042_header));
565 build.hdrlen += sizeof(rfc1042_header);
566 memcpy(build.fast_tx.hdr, hdr, build.fast_tx.hdr_len);
567
568 hdr = (struct ieee80211_hdr *)build.fast_tx.hdr;
569 if (build.fast_tx.key)
570 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
571
572 qc = ieee80211_get_qos_ctl(hdr);
573 qc[1] |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8;
574
575 entry = kmemdup(&build, sizeof(build), GFP_ATOMIC);
576 if (!entry)
577 goto unlock_sta;
578
579 spin_lock(&cache->walk_lock);
580 prev = rhashtable_lookup_get_insert_fast(&cache->rht,
581 &entry->rhash,
582 fast_tx_rht_params);
583 if (IS_ERR(prev)) {
584 kfree(entry);
585 goto unlock_cache;
586 }
587
588 /*
589 * replace any previous entry in the hash table, in case we're
590 * replacing it with a different type (e.g. mpath -> mpp)
591 */
592 if (unlikely(prev)) {
593 rhashtable_replace_fast(&cache->rht, &prev->rhash,
594 &entry->rhash, fast_tx_rht_params);
595 hlist_del_rcu(&prev->walk_list);
596 kfree_rcu(prev, fast_tx.rcu_head);
597 }
598
599 hlist_add_head(&entry->walk_list, &cache->walk_head);
600
601unlock_cache:
602 spin_unlock(&cache->walk_lock);
603unlock_sta:
604 spin_unlock_bh(&sta->lock);
605}
606
607void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata)
608{
609 unsigned long timeout = msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT);
610 struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
611 struct ieee80211_mesh_fast_tx *entry;
612 struct hlist_node *n;
613
614 if (atomic_read(&cache->rht.nelems) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE)
615 return;
616
617 spin_lock_bh(&cache->walk_lock);
618 hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
619 if (!time_is_after_jiffies(entry->timestamp + timeout))
620 mesh_fast_tx_entry_free(cache, entry);
621 spin_unlock_bh(&cache->walk_lock);
622}
623
624void mesh_fast_tx_flush_mpath(struct mesh_path *mpath)
625{
626 struct ieee80211_sub_if_data *sdata = mpath->sdata;
627 struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
628 struct ieee80211_mesh_fast_tx *entry;
629 struct hlist_node *n;
630
631 spin_lock_bh(&cache->walk_lock);
632 hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
633 if (entry->mpath == mpath)
634 mesh_fast_tx_entry_free(cache, entry);
635 spin_unlock_bh(&cache->walk_lock);
636}
637
638void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata,
639 struct sta_info *sta)
640{
641 struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
642 struct ieee80211_mesh_fast_tx *entry;
643 struct hlist_node *n;
644
645 spin_lock_bh(&cache->walk_lock);
646 hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
647 if (rcu_access_pointer(entry->mpath->next_hop) == sta)
648 mesh_fast_tx_entry_free(cache, entry);
649 spin_unlock_bh(&cache->walk_lock);
650}
651
652void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
653 const u8 *addr)
654{
655 struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
656 struct ieee80211_mesh_fast_tx_key key = {};
657 struct ieee80211_mesh_fast_tx *entry;
658 int i;
659
660 ether_addr_copy(key.addr, addr);
661 spin_lock_bh(&cache->walk_lock);
662 for (i = 0; i < NUM_MESH_FAST_TX_TYPE; i++) {
663 key.type = i;
664 entry = rhashtable_lookup_fast(&cache->rht, &key, fast_tx_rht_params);
665 if (entry)
666 mesh_fast_tx_entry_free(cache, entry);
667 }
668 spin_unlock_bh(&cache->walk_lock);
669}
670
671/**
672 * mesh_path_add - allocate and add a new path to the mesh path table
673 * @sdata: local subif
674 * @dst: destination address of the path (ETH_ALEN length)
675 *
676 * Returns: 0 on success
677 *
678 * State: the initial state of the new path is set to 0
679 */
680struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
681 const u8 *dst)
682{
683 struct mesh_table *tbl;
684 struct mesh_path *mpath, *new_mpath;
685
686 if (ether_addr_equal(dst, sdata->vif.addr))
687 /* never add ourselves as neighbours */
688 return ERR_PTR(-EOPNOTSUPP);
689
690 if (is_multicast_ether_addr(dst))
691 return ERR_PTR(-EOPNOTSUPP);
692
693 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
694 return ERR_PTR(-ENOSPC);
695
696 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
697 if (!new_mpath)
698 return ERR_PTR(-ENOMEM);
699
700 tbl = &sdata->u.mesh.mesh_paths;
701 spin_lock_bh(&tbl->walk_lock);
702 mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
703 &new_mpath->rhash,
704 mesh_rht_params);
705 if (!mpath)
706 hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
707 spin_unlock_bh(&tbl->walk_lock);
708
709 if (mpath) {
710 kfree(new_mpath);
711
712 if (IS_ERR(mpath))
713 return mpath;
714
715 new_mpath = mpath;
716 }
717
718 sdata->u.mesh.mesh_paths_generation++;
719 return new_mpath;
720}
721
722int mpp_path_add(struct ieee80211_sub_if_data *sdata,
723 const u8 *dst, const u8 *mpp)
724{
725 struct mesh_table *tbl;
726 struct mesh_path *new_mpath;
727 int ret;
728
729 if (ether_addr_equal(dst, sdata->vif.addr))
730 /* never add ourselves as neighbours */
731 return -EOPNOTSUPP;
732
733 if (is_multicast_ether_addr(dst))
734 return -EOPNOTSUPP;
735
736 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
737
738 if (!new_mpath)
739 return -ENOMEM;
740
741 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
742 tbl = &sdata->u.mesh.mpp_paths;
743
744 spin_lock_bh(&tbl->walk_lock);
745 ret = rhashtable_lookup_insert_fast(&tbl->rhead,
746 &new_mpath->rhash,
747 mesh_rht_params);
748 if (!ret)
749 hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
750 spin_unlock_bh(&tbl->walk_lock);
751
752 if (ret)
753 kfree(new_mpath);
754 else
755 mesh_fast_tx_flush_addr(sdata, dst);
756
757 sdata->u.mesh.mpp_paths_generation++;
758 return ret;
759}
760
761
762/**
763 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
764 *
765 * @sta: broken peer link
766 *
767 * This function must be called from the rate control algorithm if enough
768 * delivery errors suggest that a peer link is no longer usable.
769 */
770void mesh_plink_broken(struct sta_info *sta)
771{
772 struct ieee80211_sub_if_data *sdata = sta->sdata;
773 struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
774 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
775 struct mesh_path *mpath;
776
777 rcu_read_lock();
778 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
779 if (rcu_access_pointer(mpath->next_hop) == sta &&
780 mpath->flags & MESH_PATH_ACTIVE &&
781 !(mpath->flags & MESH_PATH_FIXED)) {
782 spin_lock_bh(&mpath->state_lock);
783 mpath->flags &= ~MESH_PATH_ACTIVE;
784 ++mpath->sn;
785 spin_unlock_bh(&mpath->state_lock);
786 mesh_path_error_tx(sdata,
787 sdata->u.mesh.mshcfg.element_ttl,
788 mpath->dst, mpath->sn,
789 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
790 }
791 }
792 rcu_read_unlock();
793}
794
795static void mesh_path_free_rcu(struct mesh_table *tbl,
796 struct mesh_path *mpath)
797{
798 struct ieee80211_sub_if_data *sdata = mpath->sdata;
799
800 spin_lock_bh(&mpath->state_lock);
801 mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED;
802 mesh_gate_del(tbl, mpath);
803 spin_unlock_bh(&mpath->state_lock);
804 timer_shutdown_sync(&mpath->timer);
805 atomic_dec(&sdata->u.mesh.mpaths);
806 atomic_dec(&tbl->entries);
807 mesh_path_flush_pending(mpath);
808 kfree_rcu(mpath, rcu);
809}
810
811static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
812{
813 hlist_del_rcu(&mpath->walk_list);
814 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
815 if (tbl == &mpath->sdata->u.mesh.mpp_paths)
816 mesh_fast_tx_flush_addr(mpath->sdata, mpath->dst);
817 else
818 mesh_fast_tx_flush_mpath(mpath);
819 mesh_path_free_rcu(tbl, mpath);
820}
821
822/**
823 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
824 *
825 * @sta: mesh peer to match
826 *
827 * RCU notes: this function is called when a mesh plink transitions from
828 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
829 * allows path creation. This will happen before the sta can be freed (because
830 * sta_info_destroy() calls this) so any reader in a rcu read block will be
831 * protected against the plink disappearing.
832 */
833void mesh_path_flush_by_nexthop(struct sta_info *sta)
834{
835 struct ieee80211_sub_if_data *sdata = sta->sdata;
836 struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
837 struct mesh_path *mpath;
838 struct hlist_node *n;
839
840 spin_lock_bh(&tbl->walk_lock);
841 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
842 if (rcu_access_pointer(mpath->next_hop) == sta)
843 __mesh_path_del(tbl, mpath);
844 }
845 spin_unlock_bh(&tbl->walk_lock);
846}
847
848static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
849 const u8 *proxy)
850{
851 struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
852 struct mesh_path *mpath;
853 struct hlist_node *n;
854
855 spin_lock_bh(&tbl->walk_lock);
856 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
857 if (ether_addr_equal(mpath->mpp, proxy))
858 __mesh_path_del(tbl, mpath);
859 }
860 spin_unlock_bh(&tbl->walk_lock);
861}
862
863static void table_flush_by_iface(struct mesh_table *tbl)
864{
865 struct mesh_path *mpath;
866 struct hlist_node *n;
867
868 spin_lock_bh(&tbl->walk_lock);
869 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
870 __mesh_path_del(tbl, mpath);
871 }
872 spin_unlock_bh(&tbl->walk_lock);
873}
874
875/**
876 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
877 *
878 * @sdata: interface data to match
879 *
880 * This function deletes both mesh paths as well as mesh portal paths.
881 */
882void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
883{
884 table_flush_by_iface(&sdata->u.mesh.mesh_paths);
885 table_flush_by_iface(&sdata->u.mesh.mpp_paths);
886}
887
888/**
889 * table_path_del - delete a path from the mesh or mpp table
890 *
891 * @tbl: mesh or mpp path table
892 * @sdata: local subif
893 * @addr: dst address (ETH_ALEN length)
894 *
895 * Returns: 0 if successful
896 */
897static int table_path_del(struct mesh_table *tbl,
898 struct ieee80211_sub_if_data *sdata,
899 const u8 *addr)
900{
901 struct mesh_path *mpath;
902
903 spin_lock_bh(&tbl->walk_lock);
904 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
905 if (!mpath) {
906 spin_unlock_bh(&tbl->walk_lock);
907 return -ENXIO;
908 }
909
910 __mesh_path_del(tbl, mpath);
911 spin_unlock_bh(&tbl->walk_lock);
912 return 0;
913}
914
915
916/**
917 * mesh_path_del - delete a mesh path from the table
918 *
919 * @sdata: local subif
920 * @addr: dst address (ETH_ALEN length)
921 *
922 * Returns: 0 if successful
923 */
924int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
925{
926 int err;
927
928 /* flush relevant mpp entries first */
929 mpp_flush_by_proxy(sdata, addr);
930
931 err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
932 sdata->u.mesh.mesh_paths_generation++;
933 return err;
934}
935
936/**
937 * mesh_path_tx_pending - sends pending frames in a mesh path queue
938 *
939 * @mpath: mesh path to activate
940 *
941 * Locking: the state_lock of the mpath structure must NOT be held when calling
942 * this function.
943 */
944void mesh_path_tx_pending(struct mesh_path *mpath)
945{
946 if (mpath->flags & MESH_PATH_ACTIVE)
947 ieee80211_add_pending_skbs(mpath->sdata->local,
948 &mpath->frame_queue);
949}
950
951/**
952 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
953 *
954 * @mpath: mesh path whose queue will be emptied
955 *
956 * If there is only one gate, the frames are transferred from the failed mpath
957 * queue to that gate's queue. If there are more than one gates, the frames
958 * are copied from each gate to the next. After frames are copied, the
959 * mpath queues are emptied onto the transmission queue.
960 *
961 * Returns: 0 on success, -EHOSTUNREACH
962 */
963int mesh_path_send_to_gates(struct mesh_path *mpath)
964{
965 struct ieee80211_sub_if_data *sdata = mpath->sdata;
966 struct mesh_table *tbl;
967 struct mesh_path *from_mpath = mpath;
968 struct mesh_path *gate;
969 bool copy = false;
970
971 tbl = &sdata->u.mesh.mesh_paths;
972
973 rcu_read_lock();
974 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
975 if (gate->flags & MESH_PATH_ACTIVE) {
976 mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
977 mesh_path_move_to_queue(gate, from_mpath, copy);
978 from_mpath = gate;
979 copy = true;
980 } else {
981 mpath_dbg(sdata,
982 "Not forwarding to %pM (flags %#x)\n",
983 gate->dst, gate->flags);
984 }
985 }
986
987 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
988 mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
989 mesh_path_tx_pending(gate);
990 }
991 rcu_read_unlock();
992
993 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
994}
995
996/**
997 * mesh_path_discard_frame - discard a frame whose path could not be resolved
998 *
999 * @sdata: network subif the frame was to be sent through
1000 * @skb: frame to discard
1001 *
1002 * Locking: the function must me called within a rcu_read_lock region
1003 */
1004void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
1005 struct sk_buff *skb)
1006{
1007 ieee80211_free_txskb(&sdata->local->hw, skb);
1008 sdata->u.mesh.mshstats.dropped_frames_no_route++;
1009}
1010
1011/**
1012 * mesh_path_flush_pending - free the pending queue of a mesh path
1013 *
1014 * @mpath: mesh path whose queue has to be freed
1015 *
1016 * Locking: the function must me called within a rcu_read_lock region
1017 */
1018void mesh_path_flush_pending(struct mesh_path *mpath)
1019{
1020 struct ieee80211_sub_if_data *sdata = mpath->sdata;
1021 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1022 struct mesh_preq_queue *preq, *tmp;
1023 struct sk_buff *skb;
1024
1025 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
1026 mesh_path_discard_frame(mpath->sdata, skb);
1027
1028 spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
1029 list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) {
1030 if (ether_addr_equal(mpath->dst, preq->dst)) {
1031 list_del(&preq->list);
1032 kfree(preq);
1033 --ifmsh->preq_queue_len;
1034 }
1035 }
1036 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1037}
1038
1039/**
1040 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
1041 *
1042 * @mpath: the mesh path to modify
1043 * @next_hop: the next hop to force
1044 *
1045 * Locking: this function must be called holding mpath->state_lock
1046 */
1047void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
1048{
1049 spin_lock_bh(&mpath->state_lock);
1050 mesh_path_assign_nexthop(mpath, next_hop);
1051 mpath->sn = 0xffff;
1052 mpath->metric = 0;
1053 mpath->hop_count = 0;
1054 mpath->exp_time = 0;
1055 mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
1056 mesh_path_activate(mpath);
1057 mesh_fast_tx_flush_mpath(mpath);
1058 spin_unlock_bh(&mpath->state_lock);
1059 ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg);
1060 /* init it at a low value - 0 start is tricky */
1061 ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1);
1062 mesh_path_tx_pending(mpath);
1063}
1064
1065void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
1066{
1067 mesh_table_init(&sdata->u.mesh.mesh_paths);
1068 mesh_table_init(&sdata->u.mesh.mpp_paths);
1069 mesh_fast_tx_init(sdata);
1070}
1071
1072static
1073void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
1074 struct mesh_table *tbl)
1075{
1076 struct mesh_path *mpath;
1077 struct hlist_node *n;
1078
1079 spin_lock_bh(&tbl->walk_lock);
1080 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
1081 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1082 (!(mpath->flags & MESH_PATH_FIXED)) &&
1083 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1084 __mesh_path_del(tbl, mpath);
1085 }
1086 spin_unlock_bh(&tbl->walk_lock);
1087}
1088
1089void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1090{
1091 mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
1092 mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
1093}
1094
1095void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
1096{
1097 mesh_fast_tx_deinit(sdata);
1098 mesh_table_free(&sdata->u.mesh.mesh_paths);
1099 mesh_table_free(&sdata->u.mesh.mpp_paths);
1100}