Loading...
1/*
2 * Copyright (c) 2012 Neratec Solutions AG
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19
20#include "ath.h"
21#include "dfs_pattern_detector.h"
22#include "dfs_pri_detector.h"
23
24struct ath_dfs_pool_stats global_dfs_pool_stats = {};
25
26#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
27#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
28#define GET_PRI_TO_USE(MIN, MAX, RUNTIME) \
29 (MIN + PRI_TOLERANCE == MAX - PRI_TOLERANCE ? \
30 MIN + PRI_TOLERANCE : RUNTIME)
31
32/*
33 * struct pulse_elem - elements in pulse queue
34 */
35struct pulse_elem {
36 struct list_head head;
37 u64 ts;
38};
39
40/*
41 * pde_get_multiple() - get number of multiples considering a given tolerance
42 * Return value: factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
43 */
44static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
45{
46 u32 remainder;
47 u32 factor;
48 u32 delta;
49
50 if (fraction == 0)
51 return 0;
52
53 delta = (val < fraction) ? (fraction - val) : (val - fraction);
54
55 if (delta <= tolerance)
56 /* val and fraction are within tolerance */
57 return 1;
58
59 factor = val / fraction;
60 remainder = val % fraction;
61 if (remainder > tolerance) {
62 /* no exact match */
63 if ((fraction - remainder) <= tolerance)
64 /* remainder is within tolerance */
65 factor++;
66 else
67 factor = 0;
68 }
69 return factor;
70}
71
72/*
73 * DOC: Singleton Pulse and Sequence Pools
74 *
75 * Instances of pri_sequence and pulse_elem are kept in singleton pools to
76 * reduce the number of dynamic allocations. They are shared between all
77 * instances and grow up to the peak number of simultaneously used objects.
78 *
79 * Memory is freed after all references to the pools are released.
80 */
81static u32 singleton_pool_references;
82static LIST_HEAD(pulse_pool);
83static LIST_HEAD(pseq_pool);
84static DEFINE_SPINLOCK(pool_lock);
85
86static void pool_register_ref(void)
87{
88 spin_lock_bh(&pool_lock);
89 singleton_pool_references++;
90 DFS_POOL_STAT_INC(pool_reference);
91 spin_unlock_bh(&pool_lock);
92}
93
94static void pool_deregister_ref(void)
95{
96 spin_lock_bh(&pool_lock);
97 singleton_pool_references--;
98 DFS_POOL_STAT_DEC(pool_reference);
99 if (singleton_pool_references == 0) {
100 /* free singleton pools with no references left */
101 struct pri_sequence *ps, *ps0;
102 struct pulse_elem *p, *p0;
103
104 list_for_each_entry_safe(p, p0, &pulse_pool, head) {
105 list_del(&p->head);
106 DFS_POOL_STAT_DEC(pulse_allocated);
107 kfree(p);
108 }
109 list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
110 list_del(&ps->head);
111 DFS_POOL_STAT_DEC(pseq_allocated);
112 kfree(ps);
113 }
114 }
115 spin_unlock_bh(&pool_lock);
116}
117
118static void pool_put_pulse_elem(struct pulse_elem *pe)
119{
120 spin_lock_bh(&pool_lock);
121 list_add(&pe->head, &pulse_pool);
122 DFS_POOL_STAT_DEC(pulse_used);
123 spin_unlock_bh(&pool_lock);
124}
125
126static void pool_put_pseq_elem(struct pri_sequence *pse)
127{
128 spin_lock_bh(&pool_lock);
129 list_add(&pse->head, &pseq_pool);
130 DFS_POOL_STAT_DEC(pseq_used);
131 spin_unlock_bh(&pool_lock);
132}
133
134static struct pri_sequence *pool_get_pseq_elem(void)
135{
136 struct pri_sequence *pse = NULL;
137 spin_lock_bh(&pool_lock);
138 if (!list_empty(&pseq_pool)) {
139 pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
140 list_del(&pse->head);
141 DFS_POOL_STAT_INC(pseq_used);
142 }
143 spin_unlock_bh(&pool_lock);
144 return pse;
145}
146
147static struct pulse_elem *pool_get_pulse_elem(void)
148{
149 struct pulse_elem *pe = NULL;
150 spin_lock_bh(&pool_lock);
151 if (!list_empty(&pulse_pool)) {
152 pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
153 list_del(&pe->head);
154 DFS_POOL_STAT_INC(pulse_used);
155 }
156 spin_unlock_bh(&pool_lock);
157 return pe;
158}
159
160static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
161{
162 struct list_head *l = &pde->pulses;
163 if (list_empty(l))
164 return NULL;
165 return list_entry(l->prev, struct pulse_elem, head);
166}
167
168static bool pulse_queue_dequeue(struct pri_detector *pde)
169{
170 struct pulse_elem *p = pulse_queue_get_tail(pde);
171 if (p != NULL) {
172 list_del_init(&p->head);
173 pde->count--;
174 /* give it back to pool */
175 pool_put_pulse_elem(p);
176 }
177 return (pde->count > 0);
178}
179
180/* remove pulses older than window */
181static void pulse_queue_check_window(struct pri_detector *pde)
182{
183 u64 min_valid_ts;
184 struct pulse_elem *p;
185
186 /* there is no delta time with less than 2 pulses */
187 if (pde->count < 2)
188 return;
189
190 if (pde->last_ts <= pde->window_size)
191 return;
192
193 min_valid_ts = pde->last_ts - pde->window_size;
194 while ((p = pulse_queue_get_tail(pde)) != NULL) {
195 if (p->ts >= min_valid_ts)
196 return;
197 pulse_queue_dequeue(pde);
198 }
199}
200
201static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
202{
203 struct pulse_elem *p = pool_get_pulse_elem();
204 if (p == NULL) {
205 p = kmalloc(sizeof(*p), GFP_ATOMIC);
206 if (p == NULL) {
207 DFS_POOL_STAT_INC(pulse_alloc_error);
208 return false;
209 }
210 DFS_POOL_STAT_INC(pulse_allocated);
211 DFS_POOL_STAT_INC(pulse_used);
212 }
213 INIT_LIST_HEAD(&p->head);
214 p->ts = ts;
215 list_add(&p->head, &pde->pulses);
216 pde->count++;
217 pde->last_ts = ts;
218 pulse_queue_check_window(pde);
219 if (pde->count >= pde->max_count)
220 pulse_queue_dequeue(pde);
221 return true;
222}
223
224static bool pseq_handler_create_sequences(struct pri_detector *pde,
225 u64 ts, u32 min_count)
226{
227 struct pulse_elem *p;
228 list_for_each_entry(p, &pde->pulses, head) {
229 struct pri_sequence ps, *new_ps;
230 struct pulse_elem *p2;
231 u32 tmp_false_count;
232 u64 min_valid_ts;
233 u32 delta_ts = ts - p->ts;
234
235 if (delta_ts < pde->rs->pri_min)
236 /* ignore too small pri */
237 continue;
238
239 if (delta_ts > pde->rs->pri_max)
240 /* stop on too large pri (sorted list) */
241 break;
242
243 /* build a new sequence with new potential pri */
244 ps.count = 2;
245 ps.count_falses = 0;
246 ps.first_ts = p->ts;
247 ps.last_ts = ts;
248 ps.pri = GET_PRI_TO_USE(pde->rs->pri_min,
249 pde->rs->pri_max, ts - p->ts);
250 ps.dur = ps.pri * (pde->rs->ppb - 1)
251 + 2 * pde->rs->max_pri_tolerance;
252
253 p2 = p;
254 tmp_false_count = 0;
255 min_valid_ts = ts - ps.dur;
256 /* check which past pulses are candidates for new sequence */
257 list_for_each_entry_continue(p2, &pde->pulses, head) {
258 u32 factor;
259 if (p2->ts < min_valid_ts)
260 /* stop on crossing window border */
261 break;
262 /* check if pulse match (multi)PRI */
263 factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
264 pde->rs->max_pri_tolerance);
265 if (factor > 0) {
266 ps.count++;
267 ps.first_ts = p2->ts;
268 /*
269 * on match, add the intermediate falses
270 * and reset counter
271 */
272 ps.count_falses += tmp_false_count;
273 tmp_false_count = 0;
274 } else {
275 /* this is a potential false one */
276 tmp_false_count++;
277 }
278 }
279 if (ps.count <= min_count)
280 /* did not reach minimum count, drop sequence */
281 continue;
282
283 /* this is a valid one, add it */
284 ps.deadline_ts = ps.first_ts + ps.dur;
285 new_ps = pool_get_pseq_elem();
286 if (new_ps == NULL) {
287 new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
288 if (new_ps == NULL) {
289 DFS_POOL_STAT_INC(pseq_alloc_error);
290 return false;
291 }
292 DFS_POOL_STAT_INC(pseq_allocated);
293 DFS_POOL_STAT_INC(pseq_used);
294 }
295 memcpy(new_ps, &ps, sizeof(ps));
296 INIT_LIST_HEAD(&new_ps->head);
297 list_add(&new_ps->head, &pde->sequences);
298 }
299 return true;
300}
301
302/* check new ts and add to all matching existing sequences */
303static u32
304pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
305{
306 u32 max_count = 0;
307 struct pri_sequence *ps, *ps2;
308 list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
309 u32 delta_ts;
310 u32 factor;
311
312 /* first ensure that sequence is within window */
313 if (ts > ps->deadline_ts) {
314 list_del_init(&ps->head);
315 pool_put_pseq_elem(ps);
316 continue;
317 }
318
319 delta_ts = ts - ps->last_ts;
320 factor = pde_get_multiple(delta_ts, ps->pri,
321 pde->rs->max_pri_tolerance);
322 if (factor > 0) {
323 ps->last_ts = ts;
324 ps->count++;
325
326 if (max_count < ps->count)
327 max_count = ps->count;
328 } else {
329 ps->count_falses++;
330 }
331 }
332 return max_count;
333}
334
335static struct pri_sequence *
336pseq_handler_check_detection(struct pri_detector *pde)
337{
338 struct pri_sequence *ps;
339
340 if (list_empty(&pde->sequences))
341 return NULL;
342
343 list_for_each_entry(ps, &pde->sequences, head) {
344 /*
345 * we assume to have enough matching confidence if we
346 * 1) have enough pulses
347 * 2) have more matching than false pulses
348 */
349 if ((ps->count >= pde->rs->ppb_thresh) &&
350 (ps->count * pde->rs->num_pri >= ps->count_falses))
351 return ps;
352 }
353 return NULL;
354}
355
356
357/* free pulse queue and sequences list and give objects back to pools */
358static void pri_detector_reset(struct pri_detector *pde, u64 ts)
359{
360 struct pri_sequence *ps, *ps0;
361 struct pulse_elem *p, *p0;
362 list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
363 list_del_init(&ps->head);
364 pool_put_pseq_elem(ps);
365 }
366 list_for_each_entry_safe(p, p0, &pde->pulses, head) {
367 list_del_init(&p->head);
368 pool_put_pulse_elem(p);
369 }
370 pde->count = 0;
371 pde->last_ts = ts;
372}
373
374static void pri_detector_exit(struct pri_detector *de)
375{
376 pri_detector_reset(de, 0);
377 pool_deregister_ref();
378 kfree(de);
379}
380
381static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
382 struct pulse_event *event)
383{
384 u32 max_updated_seq;
385 struct pri_sequence *ps;
386 u64 ts = event->ts;
387 const struct radar_detector_specs *rs = de->rs;
388
389 /* ignore pulses not within width range */
390 if ((rs->width_min > event->width) || (rs->width_max < event->width))
391 return NULL;
392
393 if ((ts - de->last_ts) < rs->max_pri_tolerance)
394 /* if delta to last pulse is too short, don't use this pulse */
395 return NULL;
396 /* radar detector spec needs chirp, but not detected */
397 if (rs->chirp && rs->chirp != event->chirp)
398 return NULL;
399
400 de->last_ts = ts;
401
402 max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
403
404 if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
405 pri_detector_reset(de, ts);
406 return NULL;
407 }
408
409 ps = pseq_handler_check_detection(de);
410
411 if (ps == NULL)
412 pulse_queue_enqueue(de, ts);
413
414 return ps;
415}
416
417struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs)
418{
419 struct pri_detector *de;
420
421 de = kzalloc(sizeof(*de), GFP_ATOMIC);
422 if (de == NULL)
423 return NULL;
424 de->exit = pri_detector_exit;
425 de->add_pulse = pri_detector_add_pulse;
426 de->reset = pri_detector_reset;
427
428 INIT_LIST_HEAD(&de->sequences);
429 INIT_LIST_HEAD(&de->pulses);
430 de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
431 de->max_count = rs->ppb * 2;
432 de->rs = rs;
433
434 pool_register_ref();
435 return de;
436}
1/*
2 * Copyright (c) 2012 Neratec Solutions AG
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19
20#include "ath.h"
21#include "dfs_pattern_detector.h"
22#include "dfs_pri_detector.h"
23
24struct ath_dfs_pool_stats global_dfs_pool_stats = {};
25
26#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
27#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
28
29/**
30 * struct pulse_elem - elements in pulse queue
31 * @ts: time stamp in usecs
32 */
33struct pulse_elem {
34 struct list_head head;
35 u64 ts;
36};
37
38/**
39 * pde_get_multiple() - get number of multiples considering a given tolerance
40 * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
41 */
42static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
43{
44 u32 remainder;
45 u32 factor;
46 u32 delta;
47
48 if (fraction == 0)
49 return 0;
50
51 delta = (val < fraction) ? (fraction - val) : (val - fraction);
52
53 if (delta <= tolerance)
54 /* val and fraction are within tolerance */
55 return 1;
56
57 factor = val / fraction;
58 remainder = val % fraction;
59 if (remainder > tolerance) {
60 /* no exact match */
61 if ((fraction - remainder) <= tolerance)
62 /* remainder is within tolerance */
63 factor++;
64 else
65 factor = 0;
66 }
67 return factor;
68}
69
70/**
71 * DOC: Singleton Pulse and Sequence Pools
72 *
73 * Instances of pri_sequence and pulse_elem are kept in singleton pools to
74 * reduce the number of dynamic allocations. They are shared between all
75 * instances and grow up to the peak number of simultaneously used objects.
76 *
77 * Memory is freed after all references to the pools are released.
78 */
79static u32 singleton_pool_references;
80static LIST_HEAD(pulse_pool);
81static LIST_HEAD(pseq_pool);
82static DEFINE_SPINLOCK(pool_lock);
83
84static void pool_register_ref(void)
85{
86 spin_lock_bh(&pool_lock);
87 singleton_pool_references++;
88 DFS_POOL_STAT_INC(pool_reference);
89 spin_unlock_bh(&pool_lock);
90}
91
92static void pool_deregister_ref(void)
93{
94 spin_lock_bh(&pool_lock);
95 singleton_pool_references--;
96 DFS_POOL_STAT_DEC(pool_reference);
97 if (singleton_pool_references == 0) {
98 /* free singleton pools with no references left */
99 struct pri_sequence *ps, *ps0;
100 struct pulse_elem *p, *p0;
101
102 list_for_each_entry_safe(p, p0, &pulse_pool, head) {
103 list_del(&p->head);
104 DFS_POOL_STAT_DEC(pulse_allocated);
105 kfree(p);
106 }
107 list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
108 list_del(&ps->head);
109 DFS_POOL_STAT_DEC(pseq_allocated);
110 kfree(ps);
111 }
112 }
113 spin_unlock_bh(&pool_lock);
114}
115
116static void pool_put_pulse_elem(struct pulse_elem *pe)
117{
118 spin_lock_bh(&pool_lock);
119 list_add(&pe->head, &pulse_pool);
120 DFS_POOL_STAT_DEC(pulse_used);
121 spin_unlock_bh(&pool_lock);
122}
123
124static void pool_put_pseq_elem(struct pri_sequence *pse)
125{
126 spin_lock_bh(&pool_lock);
127 list_add(&pse->head, &pseq_pool);
128 DFS_POOL_STAT_DEC(pseq_used);
129 spin_unlock_bh(&pool_lock);
130}
131
132static struct pri_sequence *pool_get_pseq_elem(void)
133{
134 struct pri_sequence *pse = NULL;
135 spin_lock_bh(&pool_lock);
136 if (!list_empty(&pseq_pool)) {
137 pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
138 list_del(&pse->head);
139 DFS_POOL_STAT_INC(pseq_used);
140 }
141 spin_unlock_bh(&pool_lock);
142 return pse;
143}
144
145static struct pulse_elem *pool_get_pulse_elem(void)
146{
147 struct pulse_elem *pe = NULL;
148 spin_lock_bh(&pool_lock);
149 if (!list_empty(&pulse_pool)) {
150 pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
151 list_del(&pe->head);
152 DFS_POOL_STAT_INC(pulse_used);
153 }
154 spin_unlock_bh(&pool_lock);
155 return pe;
156}
157
158static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
159{
160 struct list_head *l = &pde->pulses;
161 if (list_empty(l))
162 return NULL;
163 return list_entry(l->prev, struct pulse_elem, head);
164}
165
166static bool pulse_queue_dequeue(struct pri_detector *pde)
167{
168 struct pulse_elem *p = pulse_queue_get_tail(pde);
169 if (p != NULL) {
170 list_del_init(&p->head);
171 pde->count--;
172 /* give it back to pool */
173 pool_put_pulse_elem(p);
174 }
175 return (pde->count > 0);
176}
177
178/* remove pulses older than window */
179static void pulse_queue_check_window(struct pri_detector *pde)
180{
181 u64 min_valid_ts;
182 struct pulse_elem *p;
183
184 /* there is no delta time with less than 2 pulses */
185 if (pde->count < 2)
186 return;
187
188 if (pde->last_ts <= pde->window_size)
189 return;
190
191 min_valid_ts = pde->last_ts - pde->window_size;
192 while ((p = pulse_queue_get_tail(pde)) != NULL) {
193 if (p->ts >= min_valid_ts)
194 return;
195 pulse_queue_dequeue(pde);
196 }
197}
198
199static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
200{
201 struct pulse_elem *p = pool_get_pulse_elem();
202 if (p == NULL) {
203 p = kmalloc(sizeof(*p), GFP_ATOMIC);
204 if (p == NULL) {
205 DFS_POOL_STAT_INC(pulse_alloc_error);
206 return false;
207 }
208 DFS_POOL_STAT_INC(pulse_allocated);
209 DFS_POOL_STAT_INC(pulse_used);
210 }
211 INIT_LIST_HEAD(&p->head);
212 p->ts = ts;
213 list_add(&p->head, &pde->pulses);
214 pde->count++;
215 pde->last_ts = ts;
216 pulse_queue_check_window(pde);
217 if (pde->count >= pde->max_count)
218 pulse_queue_dequeue(pde);
219 return true;
220}
221
222static bool pseq_handler_create_sequences(struct pri_detector *pde,
223 u64 ts, u32 min_count)
224{
225 struct pulse_elem *p;
226 list_for_each_entry(p, &pde->pulses, head) {
227 struct pri_sequence ps, *new_ps;
228 struct pulse_elem *p2;
229 u32 tmp_false_count;
230 u64 min_valid_ts;
231 u32 delta_ts = ts - p->ts;
232
233 if (delta_ts < pde->rs->pri_min)
234 /* ignore too small pri */
235 continue;
236
237 if (delta_ts > pde->rs->pri_max)
238 /* stop on too large pri (sorted list) */
239 break;
240
241 /* build a new sequence with new potential pri */
242 ps.count = 2;
243 ps.count_falses = 0;
244 ps.first_ts = p->ts;
245 ps.last_ts = ts;
246 ps.pri = ts - p->ts;
247 ps.dur = ps.pri * (pde->rs->ppb - 1)
248 + 2 * pde->rs->max_pri_tolerance;
249
250 p2 = p;
251 tmp_false_count = 0;
252 min_valid_ts = ts - ps.dur;
253 /* check which past pulses are candidates for new sequence */
254 list_for_each_entry_continue(p2, &pde->pulses, head) {
255 u32 factor;
256 if (p2->ts < min_valid_ts)
257 /* stop on crossing window border */
258 break;
259 /* check if pulse match (multi)PRI */
260 factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
261 pde->rs->max_pri_tolerance);
262 if (factor > 0) {
263 ps.count++;
264 ps.first_ts = p2->ts;
265 /*
266 * on match, add the intermediate falses
267 * and reset counter
268 */
269 ps.count_falses += tmp_false_count;
270 tmp_false_count = 0;
271 } else {
272 /* this is a potential false one */
273 tmp_false_count++;
274 }
275 }
276 if (ps.count < min_count)
277 /* did not reach minimum count, drop sequence */
278 continue;
279
280 /* this is a valid one, add it */
281 ps.deadline_ts = ps.first_ts + ps.dur;
282 new_ps = pool_get_pseq_elem();
283 if (new_ps == NULL) {
284 new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
285 if (new_ps == NULL) {
286 DFS_POOL_STAT_INC(pseq_alloc_error);
287 return false;
288 }
289 DFS_POOL_STAT_INC(pseq_allocated);
290 DFS_POOL_STAT_INC(pseq_used);
291 }
292 memcpy(new_ps, &ps, sizeof(ps));
293 INIT_LIST_HEAD(&new_ps->head);
294 list_add(&new_ps->head, &pde->sequences);
295 }
296 return true;
297}
298
299/* check new ts and add to all matching existing sequences */
300static u32
301pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
302{
303 u32 max_count = 0;
304 struct pri_sequence *ps, *ps2;
305 list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
306 u32 delta_ts;
307 u32 factor;
308
309 /* first ensure that sequence is within window */
310 if (ts > ps->deadline_ts) {
311 list_del_init(&ps->head);
312 pool_put_pseq_elem(ps);
313 continue;
314 }
315
316 delta_ts = ts - ps->last_ts;
317 factor = pde_get_multiple(delta_ts, ps->pri,
318 pde->rs->max_pri_tolerance);
319 if (factor > 0) {
320 ps->last_ts = ts;
321 ps->count++;
322
323 if (max_count < ps->count)
324 max_count = ps->count;
325 } else {
326 ps->count_falses++;
327 }
328 }
329 return max_count;
330}
331
332static struct pri_sequence *
333pseq_handler_check_detection(struct pri_detector *pde)
334{
335 struct pri_sequence *ps;
336
337 if (list_empty(&pde->sequences))
338 return NULL;
339
340 list_for_each_entry(ps, &pde->sequences, head) {
341 /*
342 * we assume to have enough matching confidence if we
343 * 1) have enough pulses
344 * 2) have more matching than false pulses
345 */
346 if ((ps->count >= pde->rs->ppb_thresh) &&
347 (ps->count * pde->rs->num_pri >= ps->count_falses))
348 return ps;
349 }
350 return NULL;
351}
352
353
354/* free pulse queue and sequences list and give objects back to pools */
355static void pri_detector_reset(struct pri_detector *pde, u64 ts)
356{
357 struct pri_sequence *ps, *ps0;
358 struct pulse_elem *p, *p0;
359 list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
360 list_del_init(&ps->head);
361 pool_put_pseq_elem(ps);
362 }
363 list_for_each_entry_safe(p, p0, &pde->pulses, head) {
364 list_del_init(&p->head);
365 pool_put_pulse_elem(p);
366 }
367 pde->count = 0;
368 pde->last_ts = ts;
369}
370
371static void pri_detector_exit(struct pri_detector *de)
372{
373 pri_detector_reset(de, 0);
374 pool_deregister_ref();
375 kfree(de);
376}
377
378static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
379 struct pulse_event *event)
380{
381 u32 max_updated_seq;
382 struct pri_sequence *ps;
383 u64 ts = event->ts;
384 const struct radar_detector_specs *rs = de->rs;
385
386 /* ignore pulses not within width range */
387 if ((rs->width_min > event->width) || (rs->width_max < event->width))
388 return NULL;
389
390 if ((ts - de->last_ts) < rs->max_pri_tolerance)
391 /* if delta to last pulse is too short, don't use this pulse */
392 return NULL;
393 de->last_ts = ts;
394
395 max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
396
397 if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
398 pri_detector_reset(de, ts);
399 return NULL;
400 }
401
402 ps = pseq_handler_check_detection(de);
403
404 if (ps == NULL)
405 pulse_queue_enqueue(de, ts);
406
407 return ps;
408}
409
410struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs)
411{
412 struct pri_detector *de;
413
414 de = kzalloc(sizeof(*de), GFP_ATOMIC);
415 if (de == NULL)
416 return NULL;
417 de->exit = pri_detector_exit;
418 de->add_pulse = pri_detector_add_pulse;
419 de->reset = pri_detector_reset;
420
421 INIT_LIST_HEAD(&de->sequences);
422 INIT_LIST_HEAD(&de->pulses);
423 de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
424 de->max_count = rs->ppb * 2;
425 de->rs = rs;
426
427 pool_register_ref();
428 return de;
429}