Loading...
Note: File does not exist in v3.15.
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 */
5
6#ifndef _WG_QUEUEING_H
7#define _WG_QUEUEING_H
8
9#include "peer.h"
10#include <linux/types.h>
11#include <linux/skbuff.h>
12#include <linux/ip.h>
13#include <linux/ipv6.h>
14#include <net/ip_tunnels.h>
15
16struct wg_device;
17struct wg_peer;
18struct multicore_worker;
19struct crypt_queue;
20struct prev_queue;
21struct sk_buff;
22
23/* queueing.c APIs: */
24int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
25 unsigned int len);
26void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
27struct multicore_worker __percpu *
28wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
29
30/* receive.c APIs: */
31void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
32void wg_packet_handshake_receive_worker(struct work_struct *work);
33/* NAPI poll function: */
34int wg_packet_rx_poll(struct napi_struct *napi, int budget);
35/* Workqueue worker: */
36void wg_packet_decrypt_worker(struct work_struct *work);
37
38/* send.c APIs: */
39void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
40 bool is_retry);
41void wg_packet_send_handshake_response(struct wg_peer *peer);
42void wg_packet_send_handshake_cookie(struct wg_device *wg,
43 struct sk_buff *initiating_skb,
44 __le32 sender_index);
45void wg_packet_send_keepalive(struct wg_peer *peer);
46void wg_packet_purge_staged_packets(struct wg_peer *peer);
47void wg_packet_send_staged_packets(struct wg_peer *peer);
48/* Workqueue workers: */
49void wg_packet_handshake_send_worker(struct work_struct *work);
50void wg_packet_tx_worker(struct work_struct *work);
51void wg_packet_encrypt_worker(struct work_struct *work);
52
53enum packet_state {
54 PACKET_STATE_UNCRYPTED,
55 PACKET_STATE_CRYPTED,
56 PACKET_STATE_DEAD
57};
58
59struct packet_cb {
60 u64 nonce;
61 struct noise_keypair *keypair;
62 atomic_t state;
63 u32 mtu;
64 u8 ds;
65};
66
67#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
68#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
69
70static inline bool wg_check_packet_protocol(struct sk_buff *skb)
71{
72 __be16 real_protocol = ip_tunnel_parse_protocol(skb);
73 return real_protocol && skb->protocol == real_protocol;
74}
75
76static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
77{
78 u8 l4_hash = skb->l4_hash;
79 u8 sw_hash = skb->sw_hash;
80 u32 hash = skb->hash;
81 skb_scrub_packet(skb, true);
82 memset(&skb->headers, 0, sizeof(skb->headers));
83 if (encapsulating) {
84 skb->l4_hash = l4_hash;
85 skb->sw_hash = sw_hash;
86 skb->hash = hash;
87 }
88 skb->queue_mapping = 0;
89 skb->nohdr = 0;
90 skb->peeked = 0;
91 skb->mac_len = 0;
92 skb->dev = NULL;
93#ifdef CONFIG_NET_SCHED
94 skb->tc_index = 0;
95#endif
96 skb_reset_redirect(skb);
97 skb->hdr_len = skb_headroom(skb);
98 skb_reset_mac_header(skb);
99 skb_reset_network_header(skb);
100 skb_reset_transport_header(skb);
101 skb_probe_transport_header(skb);
102 skb_reset_inner_headers(skb);
103}
104
105static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
106{
107 unsigned int cpu = *stored_cpu, cpu_index, i;
108
109 if (unlikely(cpu == nr_cpumask_bits ||
110 !cpumask_test_cpu(cpu, cpu_online_mask))) {
111 cpu_index = id % cpumask_weight(cpu_online_mask);
112 cpu = cpumask_first(cpu_online_mask);
113 for (i = 0; i < cpu_index; ++i)
114 cpu = cpumask_next(cpu, cpu_online_mask);
115 *stored_cpu = cpu;
116 }
117 return cpu;
118}
119
120/* This function is racy, in the sense that next is unlocked, so it could return
121 * the same CPU twice. A race-free version of this would be to instead store an
122 * atomic sequence number, do an increment-and-return, and then iterate through
123 * every possible CPU until we get to that index -- choose_cpu. However that's
124 * a bit slower, and it doesn't seem like this potential race actually
125 * introduces any performance loss, so we live with it.
126 */
127static inline int wg_cpumask_next_online(int *next)
128{
129 int cpu = *next;
130
131 while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
132 cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
133 *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
134 return cpu;
135}
136
137void wg_prev_queue_init(struct prev_queue *queue);
138
139/* Multi producer */
140bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);
141
142/* Single consumer */
143struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);
144
145/* Single consumer */
146static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
147{
148 if (queue->peeked)
149 return queue->peeked;
150 queue->peeked = wg_prev_queue_dequeue(queue);
151 return queue->peeked;
152}
153
154/* Single consumer */
155static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
156{
157 queue->peeked = NULL;
158}
159
160static inline int wg_queue_enqueue_per_device_and_peer(
161 struct crypt_queue *device_queue, struct prev_queue *peer_queue,
162 struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
163{
164 int cpu;
165
166 atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
167 /* We first queue this up for the peer ingestion, but the consumer
168 * will wait for the state to change to CRYPTED or DEAD before.
169 */
170 if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
171 return -ENOSPC;
172
173 /* Then we queue it up in the device queue, which consumes the
174 * packet as soon as it can.
175 */
176 cpu = wg_cpumask_next_online(next_cpu);
177 if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
178 return -EPIPE;
179 queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
180 return 0;
181}
182
183static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
184{
185 /* We take a reference, because as soon as we call atomic_set, the
186 * peer can be freed from below us.
187 */
188 struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
189
190 atomic_set_release(&PACKET_CB(skb)->state, state);
191 queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
192 peer->device->packet_crypt_wq, &peer->transmit_packet_work);
193 wg_peer_put(peer);
194}
195
196static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
197{
198 /* We take a reference, because as soon as we call atomic_set, the
199 * peer can be freed from below us.
200 */
201 struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
202
203 atomic_set_release(&PACKET_CB(skb)->state, state);
204 napi_schedule(&peer->napi);
205 wg_peer_put(peer);
206}
207
208#ifdef DEBUG
209bool wg_packet_counter_selftest(void);
210#endif
211
212#endif /* _WG_QUEUEING_H */