Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
  4 */
  5
  6#include "queueing.h"
  7#include <linux/skb_array.h>
  8
  9struct multicore_worker __percpu *
 10wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
 11{
 12	int cpu;
 13	struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
 14
 15	if (!worker)
 16		return NULL;
 17
 18	for_each_possible_cpu(cpu) {
 19		per_cpu_ptr(worker, cpu)->ptr = ptr;
 20		INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
 21	}
 22	return worker;
 23}
 24
 25int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
 26			 unsigned int len)
 27{
 28	int ret;
 29
 30	memset(queue, 0, sizeof(*queue));
 31	queue->last_cpu = -1;
 32	ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
 33	if (ret)
 34		return ret;
 35	queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
 36	if (!queue->worker) {
 37		ptr_ring_cleanup(&queue->ring, NULL);
 38		return -ENOMEM;
 39	}
 40	return 0;
 41}
 42
 43void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
 44{
 45	free_percpu(queue->worker);
 46	WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
 47	ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
 48}
 49
 50#define NEXT(skb) ((skb)->prev)
 51#define STUB(queue) ((struct sk_buff *)&queue->empty)
 52
 53void wg_prev_queue_init(struct prev_queue *queue)
 54{
 55	NEXT(STUB(queue)) = NULL;
 56	queue->head = queue->tail = STUB(queue);
 57	queue->peeked = NULL;
 58	atomic_set(&queue->count, 0);
 59	BUILD_BUG_ON(
 60		offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
 61							offsetof(struct prev_queue, empty) ||
 62		offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
 63							 offsetof(struct prev_queue, empty));
 64}
 65
 66static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
 67{
 68	WRITE_ONCE(NEXT(skb), NULL);
 69	WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
 70}
 71
 72bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
 73{
 74	if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
 75		return false;
 76	__wg_prev_queue_enqueue(queue, skb);
 77	return true;
 78}
 79
 80struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
 81{
 82	struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
 83
 84	if (tail == STUB(queue)) {
 85		if (!next)
 86			return NULL;
 87		queue->tail = next;
 88		tail = next;
 89		next = smp_load_acquire(&NEXT(next));
 90	}
 91	if (next) {
 92		queue->tail = next;
 93		atomic_dec(&queue->count);
 94		return tail;
 95	}
 96	if (tail != READ_ONCE(queue->head))
 97		return NULL;
 98	__wg_prev_queue_enqueue(queue, STUB(queue));
 99	next = smp_load_acquire(&NEXT(tail));
100	if (next) {
101		queue->tail = next;
102		atomic_dec(&queue->count);
103		return tail;
104	}
105	return NULL;
106}
107
108#undef NEXT
109#undef STUB
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
  4 */
  5
  6#include "queueing.h"
 
  7
  8struct multicore_worker __percpu *
  9wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
 10{
 11	int cpu;
 12	struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
 13
 14	if (!worker)
 15		return NULL;
 16
 17	for_each_possible_cpu(cpu) {
 18		per_cpu_ptr(worker, cpu)->ptr = ptr;
 19		INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
 20	}
 21	return worker;
 22}
 23
 24int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
 25			 unsigned int len)
 26{
 27	int ret;
 28
 29	memset(queue, 0, sizeof(*queue));
 
 30	ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
 31	if (ret)
 32		return ret;
 33	queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
 34	if (!queue->worker) {
 35		ptr_ring_cleanup(&queue->ring, NULL);
 36		return -ENOMEM;
 37	}
 38	return 0;
 39}
 40
 41void wg_packet_queue_free(struct crypt_queue *queue)
 42{
 43	free_percpu(queue->worker);
 44	WARN_ON(!__ptr_ring_empty(&queue->ring));
 45	ptr_ring_cleanup(&queue->ring, NULL);
 46}
 47
 48#define NEXT(skb) ((skb)->prev)
 49#define STUB(queue) ((struct sk_buff *)&queue->empty)
 50
 51void wg_prev_queue_init(struct prev_queue *queue)
 52{
 53	NEXT(STUB(queue)) = NULL;
 54	queue->head = queue->tail = STUB(queue);
 55	queue->peeked = NULL;
 56	atomic_set(&queue->count, 0);
 57	BUILD_BUG_ON(
 58		offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
 59							offsetof(struct prev_queue, empty) ||
 60		offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
 61							 offsetof(struct prev_queue, empty));
 62}
 63
 64static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
 65{
 66	WRITE_ONCE(NEXT(skb), NULL);
 67	WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
 68}
 69
 70bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
 71{
 72	if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
 73		return false;
 74	__wg_prev_queue_enqueue(queue, skb);
 75	return true;
 76}
 77
 78struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
 79{
 80	struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
 81
 82	if (tail == STUB(queue)) {
 83		if (!next)
 84			return NULL;
 85		queue->tail = next;
 86		tail = next;
 87		next = smp_load_acquire(&NEXT(next));
 88	}
 89	if (next) {
 90		queue->tail = next;
 91		atomic_dec(&queue->count);
 92		return tail;
 93	}
 94	if (tail != READ_ONCE(queue->head))
 95		return NULL;
 96	__wg_prev_queue_enqueue(queue, STUB(queue));
 97	next = smp_load_acquire(&NEXT(tail));
 98	if (next) {
 99		queue->tail = next;
100		atomic_dec(&queue->count);
101		return tail;
102	}
103	return NULL;
104}
105
106#undef NEXT
107#undef STUB