Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
  4 */
  5
  6#ifndef _WG_QUEUEING_H
  7#define _WG_QUEUEING_H
  8
  9#include "peer.h"
 10#include <linux/types.h>
 11#include <linux/skbuff.h>
 12#include <linux/ip.h>
 13#include <linux/ipv6.h>
 14#include <net/ip_tunnels.h>
 15
 16struct wg_device;
 17struct wg_peer;
 18struct multicore_worker;
 19struct crypt_queue;
 20struct sk_buff;
 21
 22/* queueing.c APIs: */
 23int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
 24			 bool multicore, unsigned int len);
 25void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
 26struct multicore_worker __percpu *
 27wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
 28
 29/* receive.c APIs: */
 30void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
 31void wg_packet_handshake_receive_worker(struct work_struct *work);
 32/* NAPI poll function: */
 33int wg_packet_rx_poll(struct napi_struct *napi, int budget);
 34/* Workqueue worker: */
 35void wg_packet_decrypt_worker(struct work_struct *work);
 36
 37/* send.c APIs: */
 38void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
 39						bool is_retry);
 40void wg_packet_send_handshake_response(struct wg_peer *peer);
 41void wg_packet_send_handshake_cookie(struct wg_device *wg,
 42				     struct sk_buff *initiating_skb,
 43				     __le32 sender_index);
 44void wg_packet_send_keepalive(struct wg_peer *peer);
 45void wg_packet_purge_staged_packets(struct wg_peer *peer);
 46void wg_packet_send_staged_packets(struct wg_peer *peer);
 47/* Workqueue workers: */
 48void wg_packet_handshake_send_worker(struct work_struct *work);
 49void wg_packet_tx_worker(struct work_struct *work);
 50void wg_packet_encrypt_worker(struct work_struct *work);
 51
 52enum packet_state {
 53	PACKET_STATE_UNCRYPTED,
 54	PACKET_STATE_CRYPTED,
 55	PACKET_STATE_DEAD
 56};
 57
 58struct packet_cb {
 59	u64 nonce;
 60	struct noise_keypair *keypair;
 61	atomic_t state;
 62	u32 mtu;
 63	u8 ds;
 64};
 65
 66#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
 67#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
 68
 69static inline bool wg_check_packet_protocol(struct sk_buff *skb)
 70{
 71	__be16 real_protocol = ip_tunnel_parse_protocol(skb);
 72	return real_protocol && skb->protocol == real_protocol;
 73}
 74
 75static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
 76{
 77	u8 l4_hash = skb->l4_hash;
 78	u8 sw_hash = skb->sw_hash;
 79	u32 hash = skb->hash;
 80	skb_scrub_packet(skb, true);
 81	memset(&skb->headers_start, 0,
 82	       offsetof(struct sk_buff, headers_end) -
 83		       offsetof(struct sk_buff, headers_start));
 84	if (encapsulating) {
 85		skb->l4_hash = l4_hash;
 86		skb->sw_hash = sw_hash;
 87		skb->hash = hash;
 88	}
 89	skb->queue_mapping = 0;
 90	skb->nohdr = 0;
 91	skb->peeked = 0;
 92	skb->mac_len = 0;
 93	skb->dev = NULL;
 94#ifdef CONFIG_NET_SCHED
 95	skb->tc_index = 0;
 96#endif
 97	skb_reset_redirect(skb);
 98	skb->hdr_len = skb_headroom(skb);
 99	skb_reset_mac_header(skb);
100	skb_reset_network_header(skb);
101	skb_reset_transport_header(skb);
102	skb_probe_transport_header(skb);
103	skb_reset_inner_headers(skb);
104}
105
106static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
107{
108	unsigned int cpu = *stored_cpu, cpu_index, i;
109
110	if (unlikely(cpu == nr_cpumask_bits ||
111		     !cpumask_test_cpu(cpu, cpu_online_mask))) {
112		cpu_index = id % cpumask_weight(cpu_online_mask);
113		cpu = cpumask_first(cpu_online_mask);
114		for (i = 0; i < cpu_index; ++i)
115			cpu = cpumask_next(cpu, cpu_online_mask);
116		*stored_cpu = cpu;
117	}
118	return cpu;
119}
120
121/* This function is racy, in the sense that next is unlocked, so it could return
122 * the same CPU twice. A race-free version of this would be to instead store an
123 * atomic sequence number, do an increment-and-return, and then iterate through
124 * every possible CPU until we get to that index -- choose_cpu. However that's
125 * a bit slower, and it doesn't seem like this potential race actually
126 * introduces any performance loss, so we live with it.
127 */
128static inline int wg_cpumask_next_online(int *next)
129{
130	int cpu = *next;
131
132	while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
133		cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
134	*next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
135	return cpu;
136}
137
138static inline int wg_queue_enqueue_per_device_and_peer(
139	struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
140	struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
141{
142	int cpu;
143
144	atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
145	/* We first queue this up for the peer ingestion, but the consumer
146	 * will wait for the state to change to CRYPTED or DEAD before.
147	 */
148	if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
149		return -ENOSPC;
150	/* Then we queue it up in the device queue, which consumes the
151	 * packet as soon as it can.
152	 */
153	cpu = wg_cpumask_next_online(next_cpu);
154	if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
155		return -EPIPE;
156	queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
157	return 0;
158}
159
160static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
161					     struct sk_buff *skb,
162					     enum packet_state state)
163{
164	/* We take a reference, because as soon as we call atomic_set, the
165	 * peer can be freed from below us.
166	 */
167	struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
168
169	atomic_set_release(&PACKET_CB(skb)->state, state);
170	queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
171					       peer->internal_id),
172		      peer->device->packet_crypt_wq, &queue->work);
173	wg_peer_put(peer);
174}
175
176static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
177						  enum packet_state state)
178{
179	/* We take a reference, because as soon as we call atomic_set, the
180	 * peer can be freed from below us.
181	 */
182	struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
183
184	atomic_set_release(&PACKET_CB(skb)->state, state);
185	napi_schedule(&peer->napi);
186	wg_peer_put(peer);
187}
188
189#ifdef DEBUG
190bool wg_packet_counter_selftest(void);
191#endif
192
193#endif /* _WG_QUEUEING_H */