Loading...
1/*
2 * ring buffer tester and benchmark
3 *
4 * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/completion.h>
8#include <linux/kthread.h>
9#include <uapi/linux/sched/types.h>
10#include <linux/module.h>
11#include <linux/ktime.h>
12#include <asm/local.h>
13
14struct rb_page {
15 u64 ts;
16 local_t commit;
17 char data[4080];
18};
19
20/* run time and sleep time in seconds */
21#define RUN_TIME 10ULL
22#define SLEEP_TIME 10
23
24/* number of events for writer to wake up the reader */
25static int wakeup_interval = 100;
26
27static int reader_finish;
28static DECLARE_COMPLETION(read_start);
29static DECLARE_COMPLETION(read_done);
30
31static struct ring_buffer *buffer;
32static struct task_struct *producer;
33static struct task_struct *consumer;
34static unsigned long read;
35
36static unsigned int disable_reader;
37module_param(disable_reader, uint, 0644);
38MODULE_PARM_DESC(disable_reader, "only run producer");
39
40static unsigned int write_iteration = 50;
41module_param(write_iteration, uint, 0644);
42MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
43
44static int producer_nice = MAX_NICE;
45static int consumer_nice = MAX_NICE;
46
47static int producer_fifo = -1;
48static int consumer_fifo = -1;
49
50module_param(producer_nice, int, 0644);
51MODULE_PARM_DESC(producer_nice, "nice prio for producer");
52
53module_param(consumer_nice, int, 0644);
54MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
55
56module_param(producer_fifo, int, 0644);
57MODULE_PARM_DESC(producer_fifo, "fifo prio for producer");
58
59module_param(consumer_fifo, int, 0644);
60MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
61
62static int read_events;
63
64static int test_error;
65
66#define TEST_ERROR() \
67 do { \
68 if (!test_error) { \
69 test_error = 1; \
70 WARN_ON(1); \
71 } \
72 } while (0)
73
74enum event_status {
75 EVENT_FOUND,
76 EVENT_DROPPED,
77};
78
79static bool break_test(void)
80{
81 return test_error || kthread_should_stop();
82}
83
84static enum event_status read_event(int cpu)
85{
86 struct ring_buffer_event *event;
87 int *entry;
88 u64 ts;
89
90 event = ring_buffer_consume(buffer, cpu, &ts, NULL);
91 if (!event)
92 return EVENT_DROPPED;
93
94 entry = ring_buffer_event_data(event);
95 if (*entry != cpu) {
96 TEST_ERROR();
97 return EVENT_DROPPED;
98 }
99
100 read++;
101 return EVENT_FOUND;
102}
103
104static enum event_status read_page(int cpu)
105{
106 struct ring_buffer_event *event;
107 struct rb_page *rpage;
108 unsigned long commit;
109 void *bpage;
110 int *entry;
111 int ret;
112 int inc;
113 int i;
114
115 bpage = ring_buffer_alloc_read_page(buffer, cpu);
116 if (IS_ERR(bpage))
117 return EVENT_DROPPED;
118
119 ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
120 if (ret >= 0) {
121 rpage = bpage;
122 /* The commit may have missed event flags set, clear them */
123 commit = local_read(&rpage->commit) & 0xfffff;
124 for (i = 0; i < commit && !test_error ; i += inc) {
125
126 if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
127 TEST_ERROR();
128 break;
129 }
130
131 inc = -1;
132 event = (void *)&rpage->data[i];
133 switch (event->type_len) {
134 case RINGBUF_TYPE_PADDING:
135 /* failed writes may be discarded events */
136 if (!event->time_delta)
137 TEST_ERROR();
138 inc = event->array[0] + 4;
139 break;
140 case RINGBUF_TYPE_TIME_EXTEND:
141 inc = 8;
142 break;
143 case 0:
144 entry = ring_buffer_event_data(event);
145 if (*entry != cpu) {
146 TEST_ERROR();
147 break;
148 }
149 read++;
150 if (!event->array[0]) {
151 TEST_ERROR();
152 break;
153 }
154 inc = event->array[0] + 4;
155 break;
156 default:
157 entry = ring_buffer_event_data(event);
158 if (*entry != cpu) {
159 TEST_ERROR();
160 break;
161 }
162 read++;
163 inc = ((event->type_len + 1) * 4);
164 }
165 if (test_error)
166 break;
167
168 if (inc <= 0) {
169 TEST_ERROR();
170 break;
171 }
172 }
173 }
174 ring_buffer_free_read_page(buffer, cpu, bpage);
175
176 if (ret < 0)
177 return EVENT_DROPPED;
178 return EVENT_FOUND;
179}
180
181static void ring_buffer_consumer(void)
182{
183 /* toggle between reading pages and events */
184 read_events ^= 1;
185
186 read = 0;
187 /*
188 * Continue running until the producer specifically asks to stop
189 * and is ready for the completion.
190 */
191 while (!READ_ONCE(reader_finish)) {
192 int found = 1;
193
194 while (found && !test_error) {
195 int cpu;
196
197 found = 0;
198 for_each_online_cpu(cpu) {
199 enum event_status stat;
200
201 if (read_events)
202 stat = read_event(cpu);
203 else
204 stat = read_page(cpu);
205
206 if (test_error)
207 break;
208
209 if (stat == EVENT_FOUND)
210 found = 1;
211
212 }
213 }
214
215 /* Wait till the producer wakes us up when there is more data
216 * available or when the producer wants us to finish reading.
217 */
218 set_current_state(TASK_INTERRUPTIBLE);
219 if (reader_finish)
220 break;
221
222 schedule();
223 }
224 __set_current_state(TASK_RUNNING);
225 reader_finish = 0;
226 complete(&read_done);
227}
228
229static void ring_buffer_producer(void)
230{
231 ktime_t start_time, end_time, timeout;
232 unsigned long long time;
233 unsigned long long entries;
234 unsigned long long overruns;
235 unsigned long missed = 0;
236 unsigned long hit = 0;
237 unsigned long avg;
238 int cnt = 0;
239
240 /*
241 * Hammer the buffer for 10 secs (this may
242 * make the system stall)
243 */
244 trace_printk("Starting ring buffer hammer\n");
245 start_time = ktime_get();
246 timeout = ktime_add_ns(start_time, RUN_TIME * NSEC_PER_SEC);
247 do {
248 struct ring_buffer_event *event;
249 int *entry;
250 int i;
251
252 for (i = 0; i < write_iteration; i++) {
253 event = ring_buffer_lock_reserve(buffer, 10);
254 if (!event) {
255 missed++;
256 } else {
257 hit++;
258 entry = ring_buffer_event_data(event);
259 *entry = smp_processor_id();
260 ring_buffer_unlock_commit(buffer, event);
261 }
262 }
263 end_time = ktime_get();
264
265 cnt++;
266 if (consumer && !(cnt % wakeup_interval))
267 wake_up_process(consumer);
268
269#ifndef CONFIG_PREEMPT
270 /*
271 * If we are a non preempt kernel, the 10 second run will
272 * stop everything while it runs. Instead, we will call
273 * cond_resched and also add any time that was lost by a
274 * rescedule.
275 *
276 * Do a cond resched at the same frequency we would wake up
277 * the reader.
278 */
279 if (cnt % wakeup_interval)
280 cond_resched();
281#endif
282 } while (ktime_before(end_time, timeout) && !break_test());
283 trace_printk("End ring buffer hammer\n");
284
285 if (consumer) {
286 /* Init both completions here to avoid races */
287 init_completion(&read_start);
288 init_completion(&read_done);
289 /* the completions must be visible before the finish var */
290 smp_wmb();
291 reader_finish = 1;
292 wake_up_process(consumer);
293 wait_for_completion(&read_done);
294 }
295
296 time = ktime_us_delta(end_time, start_time);
297
298 entries = ring_buffer_entries(buffer);
299 overruns = ring_buffer_overruns(buffer);
300
301 if (test_error)
302 trace_printk("ERROR!\n");
303
304 if (!disable_reader) {
305 if (consumer_fifo < 0)
306 trace_printk("Running Consumer at nice: %d\n",
307 consumer_nice);
308 else
309 trace_printk("Running Consumer at SCHED_FIFO %d\n",
310 consumer_fifo);
311 }
312 if (producer_fifo < 0)
313 trace_printk("Running Producer at nice: %d\n",
314 producer_nice);
315 else
316 trace_printk("Running Producer at SCHED_FIFO %d\n",
317 producer_fifo);
318
319 /* Let the user know that the test is running at low priority */
320 if (producer_fifo < 0 && consumer_fifo < 0 &&
321 producer_nice == MAX_NICE && consumer_nice == MAX_NICE)
322 trace_printk("WARNING!!! This test is running at lowest priority.\n");
323
324 trace_printk("Time: %lld (usecs)\n", time);
325 trace_printk("Overruns: %lld\n", overruns);
326 if (disable_reader)
327 trace_printk("Read: (reader disabled)\n");
328 else
329 trace_printk("Read: %ld (by %s)\n", read,
330 read_events ? "events" : "pages");
331 trace_printk("Entries: %lld\n", entries);
332 trace_printk("Total: %lld\n", entries + overruns + read);
333 trace_printk("Missed: %ld\n", missed);
334 trace_printk("Hit: %ld\n", hit);
335
336 /* Convert time from usecs to millisecs */
337 do_div(time, USEC_PER_MSEC);
338 if (time)
339 hit /= (long)time;
340 else
341 trace_printk("TIME IS ZERO??\n");
342
343 trace_printk("Entries per millisec: %ld\n", hit);
344
345 if (hit) {
346 /* Calculate the average time in nanosecs */
347 avg = NSEC_PER_MSEC / hit;
348 trace_printk("%ld ns per entry\n", avg);
349 }
350
351 if (missed) {
352 if (time)
353 missed /= (long)time;
354
355 trace_printk("Total iterations per millisec: %ld\n",
356 hit + missed);
357
358 /* it is possible that hit + missed will overflow and be zero */
359 if (!(hit + missed)) {
360 trace_printk("hit + missed overflowed and totalled zero!\n");
361 hit--; /* make it non zero */
362 }
363
364 /* Caculate the average time in nanosecs */
365 avg = NSEC_PER_MSEC / (hit + missed);
366 trace_printk("%ld ns per entry\n", avg);
367 }
368}
369
370static void wait_to_die(void)
371{
372 set_current_state(TASK_INTERRUPTIBLE);
373 while (!kthread_should_stop()) {
374 schedule();
375 set_current_state(TASK_INTERRUPTIBLE);
376 }
377 __set_current_state(TASK_RUNNING);
378}
379
380static int ring_buffer_consumer_thread(void *arg)
381{
382 while (!break_test()) {
383 complete(&read_start);
384
385 ring_buffer_consumer();
386
387 set_current_state(TASK_INTERRUPTIBLE);
388 if (break_test())
389 break;
390 schedule();
391 }
392 __set_current_state(TASK_RUNNING);
393
394 if (!kthread_should_stop())
395 wait_to_die();
396
397 return 0;
398}
399
400static int ring_buffer_producer_thread(void *arg)
401{
402 while (!break_test()) {
403 ring_buffer_reset(buffer);
404
405 if (consumer) {
406 wake_up_process(consumer);
407 wait_for_completion(&read_start);
408 }
409
410 ring_buffer_producer();
411 if (break_test())
412 goto out_kill;
413
414 trace_printk("Sleeping for 10 secs\n");
415 set_current_state(TASK_INTERRUPTIBLE);
416 if (break_test())
417 goto out_kill;
418 schedule_timeout(HZ * SLEEP_TIME);
419 }
420
421out_kill:
422 __set_current_state(TASK_RUNNING);
423 if (!kthread_should_stop())
424 wait_to_die();
425
426 return 0;
427}
428
429static int __init ring_buffer_benchmark_init(void)
430{
431 int ret;
432
433 /* make a one meg buffer in overwite mode */
434 buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
435 if (!buffer)
436 return -ENOMEM;
437
438 if (!disable_reader) {
439 consumer = kthread_create(ring_buffer_consumer_thread,
440 NULL, "rb_consumer");
441 ret = PTR_ERR(consumer);
442 if (IS_ERR(consumer))
443 goto out_fail;
444 }
445
446 producer = kthread_run(ring_buffer_producer_thread,
447 NULL, "rb_producer");
448 ret = PTR_ERR(producer);
449
450 if (IS_ERR(producer))
451 goto out_kill;
452
453 /*
454 * Run them as low-prio background tasks by default:
455 */
456 if (!disable_reader) {
457 if (consumer_fifo >= 0) {
458 struct sched_param param = {
459 .sched_priority = consumer_fifo
460 };
461 sched_setscheduler(consumer, SCHED_FIFO, ¶m);
462 } else
463 set_user_nice(consumer, consumer_nice);
464 }
465
466 if (producer_fifo >= 0) {
467 struct sched_param param = {
468 .sched_priority = producer_fifo
469 };
470 sched_setscheduler(producer, SCHED_FIFO, ¶m);
471 } else
472 set_user_nice(producer, producer_nice);
473
474 return 0;
475
476 out_kill:
477 if (consumer)
478 kthread_stop(consumer);
479
480 out_fail:
481 ring_buffer_free(buffer);
482 return ret;
483}
484
485static void __exit ring_buffer_benchmark_exit(void)
486{
487 kthread_stop(producer);
488 if (consumer)
489 kthread_stop(consumer);
490 ring_buffer_free(buffer);
491}
492
493module_init(ring_buffer_benchmark_init);
494module_exit(ring_buffer_benchmark_exit);
495
496MODULE_AUTHOR("Steven Rostedt");
497MODULE_DESCRIPTION("ring_buffer_benchmark");
498MODULE_LICENSE("GPL");
1/*
2 * ring buffer tester and benchmark
3 *
4 * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/completion.h>
8#include <linux/kthread.h>
9#include <linux/module.h>
10#include <linux/ktime.h>
11#include <asm/local.h>
12
13struct rb_page {
14 u64 ts;
15 local_t commit;
16 char data[4080];
17};
18
19/* run time and sleep time in seconds */
20#define RUN_TIME 10ULL
21#define SLEEP_TIME 10
22
23/* number of events for writer to wake up the reader */
24static int wakeup_interval = 100;
25
26static int reader_finish;
27static DECLARE_COMPLETION(read_start);
28static DECLARE_COMPLETION(read_done);
29
30static struct ring_buffer *buffer;
31static struct task_struct *producer;
32static struct task_struct *consumer;
33static unsigned long read;
34
35static unsigned int disable_reader;
36module_param(disable_reader, uint, 0644);
37MODULE_PARM_DESC(disable_reader, "only run producer");
38
39static unsigned int write_iteration = 50;
40module_param(write_iteration, uint, 0644);
41MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
42
43static int producer_nice = MAX_NICE;
44static int consumer_nice = MAX_NICE;
45
46static int producer_fifo = -1;
47static int consumer_fifo = -1;
48
49module_param(producer_nice, int, 0644);
50MODULE_PARM_DESC(producer_nice, "nice prio for producer");
51
52module_param(consumer_nice, int, 0644);
53MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
54
55module_param(producer_fifo, int, 0644);
56MODULE_PARM_DESC(producer_fifo, "fifo prio for producer");
57
58module_param(consumer_fifo, int, 0644);
59MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
60
61static int read_events;
62
63static int test_error;
64
65#define TEST_ERROR() \
66 do { \
67 if (!test_error) { \
68 test_error = 1; \
69 WARN_ON(1); \
70 } \
71 } while (0)
72
73enum event_status {
74 EVENT_FOUND,
75 EVENT_DROPPED,
76};
77
78static bool break_test(void)
79{
80 return test_error || kthread_should_stop();
81}
82
83static enum event_status read_event(int cpu)
84{
85 struct ring_buffer_event *event;
86 int *entry;
87 u64 ts;
88
89 event = ring_buffer_consume(buffer, cpu, &ts, NULL);
90 if (!event)
91 return EVENT_DROPPED;
92
93 entry = ring_buffer_event_data(event);
94 if (*entry != cpu) {
95 TEST_ERROR();
96 return EVENT_DROPPED;
97 }
98
99 read++;
100 return EVENT_FOUND;
101}
102
103static enum event_status read_page(int cpu)
104{
105 struct ring_buffer_event *event;
106 struct rb_page *rpage;
107 unsigned long commit;
108 void *bpage;
109 int *entry;
110 int ret;
111 int inc;
112 int i;
113
114 bpage = ring_buffer_alloc_read_page(buffer, cpu);
115 if (!bpage)
116 return EVENT_DROPPED;
117
118 ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
119 if (ret >= 0) {
120 rpage = bpage;
121 /* The commit may have missed event flags set, clear them */
122 commit = local_read(&rpage->commit) & 0xfffff;
123 for (i = 0; i < commit && !test_error ; i += inc) {
124
125 if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
126 TEST_ERROR();
127 break;
128 }
129
130 inc = -1;
131 event = (void *)&rpage->data[i];
132 switch (event->type_len) {
133 case RINGBUF_TYPE_PADDING:
134 /* failed writes may be discarded events */
135 if (!event->time_delta)
136 TEST_ERROR();
137 inc = event->array[0] + 4;
138 break;
139 case RINGBUF_TYPE_TIME_EXTEND:
140 inc = 8;
141 break;
142 case 0:
143 entry = ring_buffer_event_data(event);
144 if (*entry != cpu) {
145 TEST_ERROR();
146 break;
147 }
148 read++;
149 if (!event->array[0]) {
150 TEST_ERROR();
151 break;
152 }
153 inc = event->array[0] + 4;
154 break;
155 default:
156 entry = ring_buffer_event_data(event);
157 if (*entry != cpu) {
158 TEST_ERROR();
159 break;
160 }
161 read++;
162 inc = ((event->type_len + 1) * 4);
163 }
164 if (test_error)
165 break;
166
167 if (inc <= 0) {
168 TEST_ERROR();
169 break;
170 }
171 }
172 }
173 ring_buffer_free_read_page(buffer, bpage);
174
175 if (ret < 0)
176 return EVENT_DROPPED;
177 return EVENT_FOUND;
178}
179
180static void ring_buffer_consumer(void)
181{
182 /* toggle between reading pages and events */
183 read_events ^= 1;
184
185 read = 0;
186 /*
187 * Continue running until the producer specifically asks to stop
188 * and is ready for the completion.
189 */
190 while (!READ_ONCE(reader_finish)) {
191 int found = 1;
192
193 while (found && !test_error) {
194 int cpu;
195
196 found = 0;
197 for_each_online_cpu(cpu) {
198 enum event_status stat;
199
200 if (read_events)
201 stat = read_event(cpu);
202 else
203 stat = read_page(cpu);
204
205 if (test_error)
206 break;
207
208 if (stat == EVENT_FOUND)
209 found = 1;
210
211 }
212 }
213
214 /* Wait till the producer wakes us up when there is more data
215 * available or when the producer wants us to finish reading.
216 */
217 set_current_state(TASK_INTERRUPTIBLE);
218 if (reader_finish)
219 break;
220
221 schedule();
222 }
223 __set_current_state(TASK_RUNNING);
224 reader_finish = 0;
225 complete(&read_done);
226}
227
228static void ring_buffer_producer(void)
229{
230 ktime_t start_time, end_time, timeout;
231 unsigned long long time;
232 unsigned long long entries;
233 unsigned long long overruns;
234 unsigned long missed = 0;
235 unsigned long hit = 0;
236 unsigned long avg;
237 int cnt = 0;
238
239 /*
240 * Hammer the buffer for 10 secs (this may
241 * make the system stall)
242 */
243 trace_printk("Starting ring buffer hammer\n");
244 start_time = ktime_get();
245 timeout = ktime_add_ns(start_time, RUN_TIME * NSEC_PER_SEC);
246 do {
247 struct ring_buffer_event *event;
248 int *entry;
249 int i;
250
251 for (i = 0; i < write_iteration; i++) {
252 event = ring_buffer_lock_reserve(buffer, 10);
253 if (!event) {
254 missed++;
255 } else {
256 hit++;
257 entry = ring_buffer_event_data(event);
258 *entry = smp_processor_id();
259 ring_buffer_unlock_commit(buffer, event);
260 }
261 }
262 end_time = ktime_get();
263
264 cnt++;
265 if (consumer && !(cnt % wakeup_interval))
266 wake_up_process(consumer);
267
268#ifndef CONFIG_PREEMPT
269 /*
270 * If we are a non preempt kernel, the 10 second run will
271 * stop everything while it runs. Instead, we will call
272 * cond_resched and also add any time that was lost by a
273 * rescedule.
274 *
275 * Do a cond resched at the same frequency we would wake up
276 * the reader.
277 */
278 if (cnt % wakeup_interval)
279 cond_resched();
280#endif
281 } while (ktime_before(end_time, timeout) && !break_test());
282 trace_printk("End ring buffer hammer\n");
283
284 if (consumer) {
285 /* Init both completions here to avoid races */
286 init_completion(&read_start);
287 init_completion(&read_done);
288 /* the completions must be visible before the finish var */
289 smp_wmb();
290 reader_finish = 1;
291 wake_up_process(consumer);
292 wait_for_completion(&read_done);
293 }
294
295 time = ktime_us_delta(end_time, start_time);
296
297 entries = ring_buffer_entries(buffer);
298 overruns = ring_buffer_overruns(buffer);
299
300 if (test_error)
301 trace_printk("ERROR!\n");
302
303 if (!disable_reader) {
304 if (consumer_fifo < 0)
305 trace_printk("Running Consumer at nice: %d\n",
306 consumer_nice);
307 else
308 trace_printk("Running Consumer at SCHED_FIFO %d\n",
309 consumer_fifo);
310 }
311 if (producer_fifo < 0)
312 trace_printk("Running Producer at nice: %d\n",
313 producer_nice);
314 else
315 trace_printk("Running Producer at SCHED_FIFO %d\n",
316 producer_fifo);
317
318 /* Let the user know that the test is running at low priority */
319 if (producer_fifo < 0 && consumer_fifo < 0 &&
320 producer_nice == MAX_NICE && consumer_nice == MAX_NICE)
321 trace_printk("WARNING!!! This test is running at lowest priority.\n");
322
323 trace_printk("Time: %lld (usecs)\n", time);
324 trace_printk("Overruns: %lld\n", overruns);
325 if (disable_reader)
326 trace_printk("Read: (reader disabled)\n");
327 else
328 trace_printk("Read: %ld (by %s)\n", read,
329 read_events ? "events" : "pages");
330 trace_printk("Entries: %lld\n", entries);
331 trace_printk("Total: %lld\n", entries + overruns + read);
332 trace_printk("Missed: %ld\n", missed);
333 trace_printk("Hit: %ld\n", hit);
334
335 /* Convert time from usecs to millisecs */
336 do_div(time, USEC_PER_MSEC);
337 if (time)
338 hit /= (long)time;
339 else
340 trace_printk("TIME IS ZERO??\n");
341
342 trace_printk("Entries per millisec: %ld\n", hit);
343
344 if (hit) {
345 /* Calculate the average time in nanosecs */
346 avg = NSEC_PER_MSEC / hit;
347 trace_printk("%ld ns per entry\n", avg);
348 }
349
350 if (missed) {
351 if (time)
352 missed /= (long)time;
353
354 trace_printk("Total iterations per millisec: %ld\n",
355 hit + missed);
356
357 /* it is possible that hit + missed will overflow and be zero */
358 if (!(hit + missed)) {
359 trace_printk("hit + missed overflowed and totalled zero!\n");
360 hit--; /* make it non zero */
361 }
362
363 /* Caculate the average time in nanosecs */
364 avg = NSEC_PER_MSEC / (hit + missed);
365 trace_printk("%ld ns per entry\n", avg);
366 }
367}
368
369static void wait_to_die(void)
370{
371 set_current_state(TASK_INTERRUPTIBLE);
372 while (!kthread_should_stop()) {
373 schedule();
374 set_current_state(TASK_INTERRUPTIBLE);
375 }
376 __set_current_state(TASK_RUNNING);
377}
378
379static int ring_buffer_consumer_thread(void *arg)
380{
381 while (!break_test()) {
382 complete(&read_start);
383
384 ring_buffer_consumer();
385
386 set_current_state(TASK_INTERRUPTIBLE);
387 if (break_test())
388 break;
389 schedule();
390 }
391 __set_current_state(TASK_RUNNING);
392
393 if (!kthread_should_stop())
394 wait_to_die();
395
396 return 0;
397}
398
399static int ring_buffer_producer_thread(void *arg)
400{
401 while (!break_test()) {
402 ring_buffer_reset(buffer);
403
404 if (consumer) {
405 wake_up_process(consumer);
406 wait_for_completion(&read_start);
407 }
408
409 ring_buffer_producer();
410 if (break_test())
411 goto out_kill;
412
413 trace_printk("Sleeping for 10 secs\n");
414 set_current_state(TASK_INTERRUPTIBLE);
415 if (break_test())
416 goto out_kill;
417 schedule_timeout(HZ * SLEEP_TIME);
418 }
419
420out_kill:
421 __set_current_state(TASK_RUNNING);
422 if (!kthread_should_stop())
423 wait_to_die();
424
425 return 0;
426}
427
428static int __init ring_buffer_benchmark_init(void)
429{
430 int ret;
431
432 /* make a one meg buffer in overwite mode */
433 buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
434 if (!buffer)
435 return -ENOMEM;
436
437 if (!disable_reader) {
438 consumer = kthread_create(ring_buffer_consumer_thread,
439 NULL, "rb_consumer");
440 ret = PTR_ERR(consumer);
441 if (IS_ERR(consumer))
442 goto out_fail;
443 }
444
445 producer = kthread_run(ring_buffer_producer_thread,
446 NULL, "rb_producer");
447 ret = PTR_ERR(producer);
448
449 if (IS_ERR(producer))
450 goto out_kill;
451
452 /*
453 * Run them as low-prio background tasks by default:
454 */
455 if (!disable_reader) {
456 if (consumer_fifo >= 0) {
457 struct sched_param param = {
458 .sched_priority = consumer_fifo
459 };
460 sched_setscheduler(consumer, SCHED_FIFO, ¶m);
461 } else
462 set_user_nice(consumer, consumer_nice);
463 }
464
465 if (producer_fifo >= 0) {
466 struct sched_param param = {
467 .sched_priority = producer_fifo
468 };
469 sched_setscheduler(producer, SCHED_FIFO, ¶m);
470 } else
471 set_user_nice(producer, producer_nice);
472
473 return 0;
474
475 out_kill:
476 if (consumer)
477 kthread_stop(consumer);
478
479 out_fail:
480 ring_buffer_free(buffer);
481 return ret;
482}
483
484static void __exit ring_buffer_benchmark_exit(void)
485{
486 kthread_stop(producer);
487 if (consumer)
488 kthread_stop(consumer);
489 ring_buffer_free(buffer);
490}
491
492module_init(ring_buffer_benchmark_init);
493module_exit(ring_buffer_benchmark_exit);
494
495MODULE_AUTHOR("Steven Rostedt");
496MODULE_DESCRIPTION("ring_buffer_benchmark");
497MODULE_LICENSE("GPL");