Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: LGPL-2.1
  2#define _GNU_SOURCE
  3#include <assert.h>
  4#include <pthread.h>
  5#include <sched.h>
  6#include <stdint.h>
  7#include <stdio.h>
  8#include <stdlib.h>
  9#include <string.h>
 10#include <stddef.h>
 11
 12#include "../kselftest.h"
 13#include "rseq.h"
 14
 15#ifdef BUILDOPT_RSEQ_PERCPU_MM_CID
 16# define RSEQ_PERCPU	RSEQ_PERCPU_MM_CID
 17static
 18int get_current_cpu_id(void)
 19{
 20	return rseq_current_mm_cid();
 21}
 22static
 23bool rseq_validate_cpu_id(void)
 24{
 25	return rseq_mm_cid_available();
 26}
 27static
 28bool rseq_use_cpu_index(void)
 29{
 30	return false;	/* Use mm_cid */
 31}
 32#else
 33# define RSEQ_PERCPU	RSEQ_PERCPU_CPU_ID
 34static
 35int get_current_cpu_id(void)
 36{
 37	return rseq_cpu_start();
 38}
 39static
 40bool rseq_validate_cpu_id(void)
 41{
 42	return rseq_current_cpu_raw() >= 0;
 43}
 44static
 45bool rseq_use_cpu_index(void)
 46{
 47	return true;	/* Use cpu_id as index. */
 48}
 49#endif
 50
 51struct percpu_lock_entry {
 52	intptr_t v;
 53} __attribute__((aligned(128)));
 54
 55struct percpu_lock {
 56	struct percpu_lock_entry c[CPU_SETSIZE];
 57};
 58
 59struct test_data_entry {
 60	intptr_t count;
 61} __attribute__((aligned(128)));
 62
 63struct spinlock_test_data {
 64	struct percpu_lock lock;
 65	struct test_data_entry c[CPU_SETSIZE];
 66	int reps;
 67};
 68
 69struct percpu_list_node {
 70	intptr_t data;
 71	struct percpu_list_node *next;
 72};
 73
 74struct percpu_list_entry {
 75	struct percpu_list_node *head;
 76} __attribute__((aligned(128)));
 77
 78struct percpu_list {
 79	struct percpu_list_entry c[CPU_SETSIZE];
 80};
 81
 82/* A simple percpu spinlock.  Returns the cpu lock was acquired on. */
 83int rseq_this_cpu_lock(struct percpu_lock *lock)
 84{
 85	int cpu;
 86
 87	for (;;) {
 88		int ret;
 89
 90		cpu = get_current_cpu_id();
 91		ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
 92					 &lock->c[cpu].v, 0, 1, cpu);
 93		if (rseq_likely(!ret))
 94			break;
 95		/* Retry if comparison fails or rseq aborts. */
 96	}
 97	/*
 98	 * Acquire semantic when taking lock after control dependency.
 99	 * Matches rseq_smp_store_release().
100	 */
101	rseq_smp_acquire__after_ctrl_dep();
102	return cpu;
103}
104
105void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
106{
107	assert(lock->c[cpu].v == 1);
108	/*
109	 * Release lock, with release semantic. Matches
110	 * rseq_smp_acquire__after_ctrl_dep().
111	 */
112	rseq_smp_store_release(&lock->c[cpu].v, 0);
113}
114
115void *test_percpu_spinlock_thread(void *arg)
116{
117	struct spinlock_test_data *data = arg;
118	int i, cpu;
119
120	if (rseq_register_current_thread()) {
121		fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
122			errno, strerror(errno));
123		abort();
124	}
125	for (i = 0; i < data->reps; i++) {
126		cpu = rseq_this_cpu_lock(&data->lock);
127		data->c[cpu].count++;
128		rseq_percpu_unlock(&data->lock, cpu);
129	}
130	if (rseq_unregister_current_thread()) {
131		fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
132			errno, strerror(errno));
133		abort();
134	}
135
136	return NULL;
137}
138
139/*
140 * A simple test which implements a sharded counter using a per-cpu
141 * lock.  Obviously real applications might prefer to simply use a
142 * per-cpu increment; however, this is reasonable for a test and the
143 * lock can be extended to synchronize more complicated operations.
144 */
145void test_percpu_spinlock(void)
146{
147	const int num_threads = 200;
148	int i;
149	uint64_t sum;
150	pthread_t test_threads[num_threads];
151	struct spinlock_test_data data;
152
153	memset(&data, 0, sizeof(data));
154	data.reps = 5000;
155
156	for (i = 0; i < num_threads; i++)
157		pthread_create(&test_threads[i], NULL,
158			       test_percpu_spinlock_thread, &data);
159
160	for (i = 0; i < num_threads; i++)
161		pthread_join(test_threads[i], NULL);
162
163	sum = 0;
164	for (i = 0; i < CPU_SETSIZE; i++)
165		sum += data.c[i].count;
166
167	assert(sum == (uint64_t)data.reps * num_threads);
168}
169
170void this_cpu_list_push(struct percpu_list *list,
171			struct percpu_list_node *node,
172			int *_cpu)
173{
174	int cpu;
175
176	for (;;) {
177		intptr_t *targetptr, newval, expect;
178		int ret;
179
180		cpu = get_current_cpu_id();
181		/* Load list->c[cpu].head with single-copy atomicity. */
182		expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
183		newval = (intptr_t)node;
184		targetptr = (intptr_t *)&list->c[cpu].head;
185		node->next = (struct percpu_list_node *)expect;
186		ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
187					 targetptr, expect, newval, cpu);
188		if (rseq_likely(!ret))
189			break;
190		/* Retry if comparison fails or rseq aborts. */
191	}
192	if (_cpu)
193		*_cpu = cpu;
194}
195
196/*
197 * Unlike a traditional lock-less linked list; the availability of a
198 * rseq primitive allows us to implement pop without concerns over
199 * ABA-type races.
200 */
201struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list,
202					   int *_cpu)
203{
204	for (;;) {
205		struct percpu_list_node *head;
206		intptr_t *targetptr, expectnot, *load;
207		long offset;
208		int ret, cpu;
209
210		cpu = get_current_cpu_id();
211		targetptr = (intptr_t *)&list->c[cpu].head;
212		expectnot = (intptr_t)NULL;
213		offset = offsetof(struct percpu_list_node, next);
214		load = (intptr_t *)&head;
215		ret = rseq_cmpnev_storeoffp_load(RSEQ_MO_RELAXED, RSEQ_PERCPU,
216						 targetptr, expectnot,
217						 offset, load, cpu);
218		if (rseq_likely(!ret)) {
219			if (_cpu)
220				*_cpu = cpu;
221			return head;
222		}
223		if (ret > 0)
224			return NULL;
225		/* Retry if rseq aborts. */
226	}
227}
228
229/*
230 * __percpu_list_pop is not safe against concurrent accesses. Should
231 * only be used on lists that are not concurrently modified.
232 */
233struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
234{
235	struct percpu_list_node *node;
236
237	node = list->c[cpu].head;
238	if (!node)
239		return NULL;
240	list->c[cpu].head = node->next;
241	return node;
242}
243
244void *test_percpu_list_thread(void *arg)
245{
246	int i;
247	struct percpu_list *list = (struct percpu_list *)arg;
248
249	if (rseq_register_current_thread()) {
250		fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
251			errno, strerror(errno));
252		abort();
253	}
254
255	for (i = 0; i < 100000; i++) {
256		struct percpu_list_node *node;
257
258		node = this_cpu_list_pop(list, NULL);
259		sched_yield();  /* encourage shuffling */
260		if (node)
261			this_cpu_list_push(list, node, NULL);
262	}
263
264	if (rseq_unregister_current_thread()) {
265		fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
266			errno, strerror(errno));
267		abort();
268	}
269
270	return NULL;
271}
272
273/* Simultaneous modification to a per-cpu linked list from many threads.  */
274void test_percpu_list(void)
275{
276	int i, j;
277	uint64_t sum = 0, expected_sum = 0;
278	struct percpu_list list;
279	pthread_t test_threads[200];
280	cpu_set_t allowed_cpus;
281
282	memset(&list, 0, sizeof(list));
283
284	/* Generate list entries for every usable cpu. */
285	sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
286	for (i = 0; i < CPU_SETSIZE; i++) {
287		if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
288			continue;
289		for (j = 1; j <= 100; j++) {
290			struct percpu_list_node *node;
291
292			expected_sum += j;
293
294			node = malloc(sizeof(*node));
295			assert(node);
296			node->data = j;
297			node->next = list.c[i].head;
298			list.c[i].head = node;
299		}
300	}
301
302	for (i = 0; i < 200; i++)
303		pthread_create(&test_threads[i], NULL,
304		       test_percpu_list_thread, &list);
305
306	for (i = 0; i < 200; i++)
307		pthread_join(test_threads[i], NULL);
308
309	for (i = 0; i < CPU_SETSIZE; i++) {
310		struct percpu_list_node *node;
311
312		if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
313			continue;
314
315		while ((node = __percpu_list_pop(&list, i))) {
316			sum += node->data;
317			free(node);
318		}
319	}
320
321	/*
322	 * All entries should now be accounted for (unless some external
323	 * actor is interfering with our allowed affinity while this
324	 * test is running).
325	 */
326	assert(sum == expected_sum);
327}
328
329int main(int argc, char **argv)
330{
331	if (rseq_register_current_thread()) {
332		fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
333			errno, strerror(errno));
334		goto error;
335	}
336	if (!rseq_validate_cpu_id()) {
337		fprintf(stderr, "Error: cpu id getter unavailable\n");
338		goto error;
339	}
340	printf("spinlock\n");
341	test_percpu_spinlock();
342	printf("percpu_list\n");
343	test_percpu_list();
344	if (rseq_unregister_current_thread()) {
345		fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
346			errno, strerror(errno));
347		goto error;
348	}
349	return 0;
350
351error:
352	return -1;
353}
v6.2
  1// SPDX-License-Identifier: LGPL-2.1
  2#define _GNU_SOURCE
  3#include <assert.h>
  4#include <pthread.h>
  5#include <sched.h>
  6#include <stdint.h>
  7#include <stdio.h>
  8#include <stdlib.h>
  9#include <string.h>
 10#include <stddef.h>
 11
 12#include "../kselftest.h"
 13#include "rseq.h"
 14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 15struct percpu_lock_entry {
 16	intptr_t v;
 17} __attribute__((aligned(128)));
 18
 19struct percpu_lock {
 20	struct percpu_lock_entry c[CPU_SETSIZE];
 21};
 22
 23struct test_data_entry {
 24	intptr_t count;
 25} __attribute__((aligned(128)));
 26
 27struct spinlock_test_data {
 28	struct percpu_lock lock;
 29	struct test_data_entry c[CPU_SETSIZE];
 30	int reps;
 31};
 32
 33struct percpu_list_node {
 34	intptr_t data;
 35	struct percpu_list_node *next;
 36};
 37
 38struct percpu_list_entry {
 39	struct percpu_list_node *head;
 40} __attribute__((aligned(128)));
 41
 42struct percpu_list {
 43	struct percpu_list_entry c[CPU_SETSIZE];
 44};
 45
 46/* A simple percpu spinlock.  Returns the cpu lock was acquired on. */
 47int rseq_this_cpu_lock(struct percpu_lock *lock)
 48{
 49	int cpu;
 50
 51	for (;;) {
 52		int ret;
 53
 54		cpu = rseq_cpu_start();
 55		ret = rseq_cmpeqv_storev(&lock->c[cpu].v,
 56					 0, 1, cpu);
 57		if (rseq_likely(!ret))
 58			break;
 59		/* Retry if comparison fails or rseq aborts. */
 60	}
 61	/*
 62	 * Acquire semantic when taking lock after control dependency.
 63	 * Matches rseq_smp_store_release().
 64	 */
 65	rseq_smp_acquire__after_ctrl_dep();
 66	return cpu;
 67}
 68
 69void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
 70{
 71	assert(lock->c[cpu].v == 1);
 72	/*
 73	 * Release lock, with release semantic. Matches
 74	 * rseq_smp_acquire__after_ctrl_dep().
 75	 */
 76	rseq_smp_store_release(&lock->c[cpu].v, 0);
 77}
 78
 79void *test_percpu_spinlock_thread(void *arg)
 80{
 81	struct spinlock_test_data *data = arg;
 82	int i, cpu;
 83
 84	if (rseq_register_current_thread()) {
 85		fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
 86			errno, strerror(errno));
 87		abort();
 88	}
 89	for (i = 0; i < data->reps; i++) {
 90		cpu = rseq_this_cpu_lock(&data->lock);
 91		data->c[cpu].count++;
 92		rseq_percpu_unlock(&data->lock, cpu);
 93	}
 94	if (rseq_unregister_current_thread()) {
 95		fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
 96			errno, strerror(errno));
 97		abort();
 98	}
 99
100	return NULL;
101}
102
103/*
104 * A simple test which implements a sharded counter using a per-cpu
105 * lock.  Obviously real applications might prefer to simply use a
106 * per-cpu increment; however, this is reasonable for a test and the
107 * lock can be extended to synchronize more complicated operations.
108 */
109void test_percpu_spinlock(void)
110{
111	const int num_threads = 200;
112	int i;
113	uint64_t sum;
114	pthread_t test_threads[num_threads];
115	struct spinlock_test_data data;
116
117	memset(&data, 0, sizeof(data));
118	data.reps = 5000;
119
120	for (i = 0; i < num_threads; i++)
121		pthread_create(&test_threads[i], NULL,
122			       test_percpu_spinlock_thread, &data);
123
124	for (i = 0; i < num_threads; i++)
125		pthread_join(test_threads[i], NULL);
126
127	sum = 0;
128	for (i = 0; i < CPU_SETSIZE; i++)
129		sum += data.c[i].count;
130
131	assert(sum == (uint64_t)data.reps * num_threads);
132}
133
134void this_cpu_list_push(struct percpu_list *list,
135			struct percpu_list_node *node,
136			int *_cpu)
137{
138	int cpu;
139
140	for (;;) {
141		intptr_t *targetptr, newval, expect;
142		int ret;
143
144		cpu = rseq_cpu_start();
145		/* Load list->c[cpu].head with single-copy atomicity. */
146		expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
147		newval = (intptr_t)node;
148		targetptr = (intptr_t *)&list->c[cpu].head;
149		node->next = (struct percpu_list_node *)expect;
150		ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu);
 
151		if (rseq_likely(!ret))
152			break;
153		/* Retry if comparison fails or rseq aborts. */
154	}
155	if (_cpu)
156		*_cpu = cpu;
157}
158
159/*
160 * Unlike a traditional lock-less linked list; the availability of a
161 * rseq primitive allows us to implement pop without concerns over
162 * ABA-type races.
163 */
164struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list,
165					   int *_cpu)
166{
167	for (;;) {
168		struct percpu_list_node *head;
169		intptr_t *targetptr, expectnot, *load;
170		long offset;
171		int ret, cpu;
172
173		cpu = rseq_cpu_start();
174		targetptr = (intptr_t *)&list->c[cpu].head;
175		expectnot = (intptr_t)NULL;
176		offset = offsetof(struct percpu_list_node, next);
177		load = (intptr_t *)&head;
178		ret = rseq_cmpnev_storeoffp_load(targetptr, expectnot,
 
179						 offset, load, cpu);
180		if (rseq_likely(!ret)) {
181			if (_cpu)
182				*_cpu = cpu;
183			return head;
184		}
185		if (ret > 0)
186			return NULL;
187		/* Retry if rseq aborts. */
188	}
189}
190
191/*
192 * __percpu_list_pop is not safe against concurrent accesses. Should
193 * only be used on lists that are not concurrently modified.
194 */
195struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
196{
197	struct percpu_list_node *node;
198
199	node = list->c[cpu].head;
200	if (!node)
201		return NULL;
202	list->c[cpu].head = node->next;
203	return node;
204}
205
206void *test_percpu_list_thread(void *arg)
207{
208	int i;
209	struct percpu_list *list = (struct percpu_list *)arg;
210
211	if (rseq_register_current_thread()) {
212		fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
213			errno, strerror(errno));
214		abort();
215	}
216
217	for (i = 0; i < 100000; i++) {
218		struct percpu_list_node *node;
219
220		node = this_cpu_list_pop(list, NULL);
221		sched_yield();  /* encourage shuffling */
222		if (node)
223			this_cpu_list_push(list, node, NULL);
224	}
225
226	if (rseq_unregister_current_thread()) {
227		fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
228			errno, strerror(errno));
229		abort();
230	}
231
232	return NULL;
233}
234
235/* Simultaneous modification to a per-cpu linked list from many threads.  */
236void test_percpu_list(void)
237{
238	int i, j;
239	uint64_t sum = 0, expected_sum = 0;
240	struct percpu_list list;
241	pthread_t test_threads[200];
242	cpu_set_t allowed_cpus;
243
244	memset(&list, 0, sizeof(list));
245
246	/* Generate list entries for every usable cpu. */
247	sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
248	for (i = 0; i < CPU_SETSIZE; i++) {
249		if (!CPU_ISSET(i, &allowed_cpus))
250			continue;
251		for (j = 1; j <= 100; j++) {
252			struct percpu_list_node *node;
253
254			expected_sum += j;
255
256			node = malloc(sizeof(*node));
257			assert(node);
258			node->data = j;
259			node->next = list.c[i].head;
260			list.c[i].head = node;
261		}
262	}
263
264	for (i = 0; i < 200; i++)
265		pthread_create(&test_threads[i], NULL,
266		       test_percpu_list_thread, &list);
267
268	for (i = 0; i < 200; i++)
269		pthread_join(test_threads[i], NULL);
270
271	for (i = 0; i < CPU_SETSIZE; i++) {
272		struct percpu_list_node *node;
273
274		if (!CPU_ISSET(i, &allowed_cpus))
275			continue;
276
277		while ((node = __percpu_list_pop(&list, i))) {
278			sum += node->data;
279			free(node);
280		}
281	}
282
283	/*
284	 * All entries should now be accounted for (unless some external
285	 * actor is interfering with our allowed affinity while this
286	 * test is running).
287	 */
288	assert(sum == expected_sum);
289}
290
291int main(int argc, char **argv)
292{
293	if (rseq_register_current_thread()) {
294		fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
295			errno, strerror(errno));
 
 
 
 
296		goto error;
297	}
298	printf("spinlock\n");
299	test_percpu_spinlock();
300	printf("percpu_list\n");
301	test_percpu_list();
302	if (rseq_unregister_current_thread()) {
303		fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
304			errno, strerror(errno));
305		goto error;
306	}
307	return 0;
308
309error:
310	return -1;
311}