Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
  1/*
  2 * RT-Mutex-tester: scriptable tester for rt mutexes
  3 *
  4 * started by Thomas Gleixner:
  5 *
  6 *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  7 *
  8 */
  9#include <linux/kthread.h>
 10#include <linux/module.h>
 11#include <linux/sched.h>
 12#include <linux/spinlock.h>
 13#include <linux/sysdev.h>
 14#include <linux/timer.h>
 15#include <linux/freezer.h>
 16
 17#include "rtmutex.h"
 18
 19#define MAX_RT_TEST_THREADS	8
 20#define MAX_RT_TEST_MUTEXES	8
 21
 22static spinlock_t rttest_lock;
 23static atomic_t rttest_event;
 24
 25struct test_thread_data {
 26	int			opcode;
 27	int			opdata;
 28	int			mutexes[MAX_RT_TEST_MUTEXES];
 29	int			event;
 30	struct sys_device	sysdev;
 31};
 32
 33static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
 34static struct task_struct *threads[MAX_RT_TEST_THREADS];
 35static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
 36
 37enum test_opcodes {
 38	RTTEST_NOP = 0,
 39	RTTEST_SCHEDOT,		/* 1 Sched other, data = nice */
 40	RTTEST_SCHEDRT,		/* 2 Sched fifo, data = prio */
 41	RTTEST_LOCK,		/* 3 Lock uninterruptible, data = lockindex */
 42	RTTEST_LOCKNOWAIT,	/* 4 Lock uninterruptible no wait in wakeup, data = lockindex */
 43	RTTEST_LOCKINT,		/* 5 Lock interruptible, data = lockindex */
 44	RTTEST_LOCKINTNOWAIT,	/* 6 Lock interruptible no wait in wakeup, data = lockindex */
 45	RTTEST_LOCKCONT,	/* 7 Continue locking after the wakeup delay */
 46	RTTEST_UNLOCK,		/* 8 Unlock, data = lockindex */
 47	/* 9, 10 - reserved for BKL commemoration */
 48	RTTEST_SIGNAL = 11,	/* 11 Signal other test thread, data = thread id */
 49	RTTEST_RESETEVENT = 98,	/* 98 Reset event counter */
 50	RTTEST_RESET = 99,	/* 99 Reset all pending operations */
 51};
 52
 53static int handle_op(struct test_thread_data *td, int lockwakeup)
 54{
 55	int i, id, ret = -EINVAL;
 56
 57	switch(td->opcode) {
 58
 59	case RTTEST_NOP:
 60		return 0;
 61
 62	case RTTEST_LOCKCONT:
 63		td->mutexes[td->opdata] = 1;
 64		td->event = atomic_add_return(1, &rttest_event);
 65		return 0;
 66
 67	case RTTEST_RESET:
 68		for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
 69			if (td->mutexes[i] == 4) {
 70				rt_mutex_unlock(&mutexes[i]);
 71				td->mutexes[i] = 0;
 72			}
 73		}
 74		return 0;
 75
 76	case RTTEST_RESETEVENT:
 77		atomic_set(&rttest_event, 0);
 78		return 0;
 79
 80	default:
 81		if (lockwakeup)
 82			return ret;
 83	}
 84
 85	switch(td->opcode) {
 86
 87	case RTTEST_LOCK:
 88	case RTTEST_LOCKNOWAIT:
 89		id = td->opdata;
 90		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
 91			return ret;
 92
 93		td->mutexes[id] = 1;
 94		td->event = atomic_add_return(1, &rttest_event);
 95		rt_mutex_lock(&mutexes[id]);
 96		td->event = atomic_add_return(1, &rttest_event);
 97		td->mutexes[id] = 4;
 98		return 0;
 99
100	case RTTEST_LOCKINT:
101	case RTTEST_LOCKINTNOWAIT:
102		id = td->opdata;
103		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
104			return ret;
105
106		td->mutexes[id] = 1;
107		td->event = atomic_add_return(1, &rttest_event);
108		ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
109		td->event = atomic_add_return(1, &rttest_event);
110		td->mutexes[id] = ret ? 0 : 4;
111		return ret ? -EINTR : 0;
112
113	case RTTEST_UNLOCK:
114		id = td->opdata;
115		if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
116			return ret;
117
118		td->event = atomic_add_return(1, &rttest_event);
119		rt_mutex_unlock(&mutexes[id]);
120		td->event = atomic_add_return(1, &rttest_event);
121		td->mutexes[id] = 0;
122		return 0;
123
124	default:
125		break;
126	}
127	return ret;
128}
129
130/*
131 * Schedule replacement for rtsem_down(). Only called for threads with
132 * PF_MUTEX_TESTER set.
133 *
134 * This allows us to have finegrained control over the event flow.
135 *
136 */
137void schedule_rt_mutex_test(struct rt_mutex *mutex)
138{
139	int tid, op, dat;
140	struct test_thread_data *td;
141
142	/* We have to lookup the task */
143	for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) {
144		if (threads[tid] == current)
145			break;
146	}
147
148	BUG_ON(tid == MAX_RT_TEST_THREADS);
149
150	td = &thread_data[tid];
151
152	op = td->opcode;
153	dat = td->opdata;
154
155	switch (op) {
156	case RTTEST_LOCK:
157	case RTTEST_LOCKINT:
158	case RTTEST_LOCKNOWAIT:
159	case RTTEST_LOCKINTNOWAIT:
160		if (mutex != &mutexes[dat])
161			break;
162
163		if (td->mutexes[dat] != 1)
164			break;
165
166		td->mutexes[dat] = 2;
167		td->event = atomic_add_return(1, &rttest_event);
168		break;
169
170	default:
171		break;
172	}
173
174	schedule();
175
176
177	switch (op) {
178	case RTTEST_LOCK:
179	case RTTEST_LOCKINT:
180		if (mutex != &mutexes[dat])
181			return;
182
183		if (td->mutexes[dat] != 2)
184			return;
185
186		td->mutexes[dat] = 3;
187		td->event = atomic_add_return(1, &rttest_event);
188		break;
189
190	case RTTEST_LOCKNOWAIT:
191	case RTTEST_LOCKINTNOWAIT:
192		if (mutex != &mutexes[dat])
193			return;
194
195		if (td->mutexes[dat] != 2)
196			return;
197
198		td->mutexes[dat] = 1;
199		td->event = atomic_add_return(1, &rttest_event);
200		return;
201
202	default:
203		return;
204	}
205
206	td->opcode = 0;
207
208	for (;;) {
209		set_current_state(TASK_INTERRUPTIBLE);
210
211		if (td->opcode > 0) {
212			int ret;
213
214			set_current_state(TASK_RUNNING);
215			ret = handle_op(td, 1);
216			set_current_state(TASK_INTERRUPTIBLE);
217			if (td->opcode == RTTEST_LOCKCONT)
218				break;
219			td->opcode = ret;
220		}
221
222		/* Wait for the next command to be executed */
223		schedule();
224	}
225
226	/* Restore previous command and data */
227	td->opcode = op;
228	td->opdata = dat;
229}
230
231static int test_func(void *data)
232{
233	struct test_thread_data *td = data;
234	int ret;
235
236	current->flags |= PF_MUTEX_TESTER;
237	set_freezable();
238	allow_signal(SIGHUP);
239
240	for(;;) {
241
242		set_current_state(TASK_INTERRUPTIBLE);
243
244		if (td->opcode > 0) {
245			set_current_state(TASK_RUNNING);
246			ret = handle_op(td, 0);
247			set_current_state(TASK_INTERRUPTIBLE);
248			td->opcode = ret;
249		}
250
251		/* Wait for the next command to be executed */
252		schedule();
253		try_to_freeze();
254
255		if (signal_pending(current))
256			flush_signals(current);
257
258		if(kthread_should_stop())
259			break;
260	}
261	return 0;
262}
263
264/**
265 * sysfs_test_command - interface for test commands
266 * @dev:	thread reference
267 * @buf:	command for actual step
268 * @count:	length of buffer
269 *
270 * command syntax:
271 *
272 * opcode:data
273 */
274static ssize_t sysfs_test_command(struct sys_device *dev, struct sysdev_attribute *attr,
275				  const char *buf, size_t count)
276{
277	struct sched_param schedpar;
278	struct test_thread_data *td;
279	char cmdbuf[32];
280	int op, dat, tid, ret;
281
282	td = container_of(dev, struct test_thread_data, sysdev);
283	tid = td->sysdev.id;
284
285	/* strings from sysfs write are not 0 terminated! */
286	if (count >= sizeof(cmdbuf))
287		return -EINVAL;
288
289	/* strip of \n: */
290	if (buf[count-1] == '\n')
291		count--;
292	if (count < 1)
293		return -EINVAL;
294
295	memcpy(cmdbuf, buf, count);
296	cmdbuf[count] = 0;
297
298	if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2)
299		return -EINVAL;
300
301	switch (op) {
302	case RTTEST_SCHEDOT:
303		schedpar.sched_priority = 0;
304		ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar);
305		if (ret)
306			return ret;
307		set_user_nice(current, 0);
308		break;
309
310	case RTTEST_SCHEDRT:
311		schedpar.sched_priority = dat;
312		ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar);
313		if (ret)
314			return ret;
315		break;
316
317	case RTTEST_SIGNAL:
318		send_sig(SIGHUP, threads[tid], 0);
319		break;
320
321	default:
322		if (td->opcode > 0)
323			return -EBUSY;
324		td->opdata = dat;
325		td->opcode = op;
326		wake_up_process(threads[tid]);
327	}
328
329	return count;
330}
331
332/**
333 * sysfs_test_status - sysfs interface for rt tester
334 * @dev:	thread to query
335 * @buf:	char buffer to be filled with thread status info
336 */
337static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute *attr,
338				 char *buf)
339{
340	struct test_thread_data *td;
341	struct task_struct *tsk;
342	char *curr = buf;
343	int i;
344
345	td = container_of(dev, struct test_thread_data, sysdev);
346	tsk = threads[td->sysdev.id];
347
348	spin_lock(&rttest_lock);
349
350	curr += sprintf(curr,
351		"O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, M:",
352		td->opcode, td->event, tsk->state,
353			(MAX_RT_PRIO - 1) - tsk->prio,
354			(MAX_RT_PRIO - 1) - tsk->normal_prio,
355		tsk->pi_blocked_on);
356
357	for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
358		curr += sprintf(curr, "%d", td->mutexes[i]);
359
360	spin_unlock(&rttest_lock);
361
362	curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
363			mutexes[td->sysdev.id].owner);
364
365	return curr - buf;
366}
367
368static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL);
369static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command);
370
371static struct sysdev_class rttest_sysclass = {
372	.name = "rttest",
373};
374
375static int init_test_thread(int id)
376{
377	thread_data[id].sysdev.cls = &rttest_sysclass;
378	thread_data[id].sysdev.id = id;
379
380	threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
381	if (IS_ERR(threads[id]))
382		return PTR_ERR(threads[id]);
383
384	return sysdev_register(&thread_data[id].sysdev);
385}
386
387static int init_rttest(void)
388{
389	int ret, i;
390
391	spin_lock_init(&rttest_lock);
392
393	for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
394		rt_mutex_init(&mutexes[i]);
395
396	ret = sysdev_class_register(&rttest_sysclass);
397	if (ret)
398		return ret;
399
400	for (i = 0; i < MAX_RT_TEST_THREADS; i++) {
401		ret = init_test_thread(i);
402		if (ret)
403			break;
404		ret = sysdev_create_file(&thread_data[i].sysdev, &attr_status);
405		if (ret)
406			break;
407		ret = sysdev_create_file(&thread_data[i].sysdev, &attr_command);
408		if (ret)
409			break;
410	}
411
412	printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" );
413
414	return ret;
415}
416
417device_initcall(init_rttest);