1 /* 2 * Copyright (c) 2012 Neratec Solutions AG 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 20 #include "ath.h" 21 #include "dfs_pattern_detector.h" 22 #include "dfs_pri_detector.h" 23 24 struct ath_dfs_pool_stats global_dfs_pool_stats = {}; 25 26 #define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++) 27 #define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--) 28 29 /** 30 * struct pulse_elem - elements in pulse queue 31 * @ts: time stamp in usecs 32 */ 33 struct pulse_elem { 34 struct list_head head; 35 u64 ts; 36 }; 37 38 /** 39 * pde_get_multiple() - get number of multiples considering a given tolerance 40 * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise 41 */ 42 static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance) 43 { 44 u32 remainder; 45 u32 factor; 46 u32 delta; 47 48 if (fraction == 0) 49 return 0; 50 51 delta = (val < fraction) ? (fraction - val) : (val - fraction); 52 53 if (delta <= tolerance) 54 /* val and fraction are within tolerance */ 55 return 1; 56 57 factor = val / fraction; 58 remainder = val % fraction; 59 if (remainder > tolerance) { 60 /* no exact match */ 61 if ((fraction - remainder) <= tolerance) 62 /* remainder is within tolerance */ 63 factor++; 64 else 65 factor = 0; 66 } 67 return factor; 68 } 69 70 /** 71 * DOC: Singleton Pulse and Sequence Pools 72 * 73 * Instances of pri_sequence and pulse_elem are kept in singleton pools to 74 * reduce the number of dynamic allocations. They are shared between all 75 * instances and grow up to the peak number of simultaneously used objects. 76 * 77 * Memory is freed after all references to the pools are released. 78 */ 79 static u32 singleton_pool_references; 80 static LIST_HEAD(pulse_pool); 81 static LIST_HEAD(pseq_pool); 82 static DEFINE_SPINLOCK(pool_lock); 83 84 static void pool_register_ref(void) 85 { 86 spin_lock_bh(&pool_lock); 87 singleton_pool_references++; 88 DFS_POOL_STAT_INC(pool_reference); 89 spin_unlock_bh(&pool_lock); 90 } 91 92 static void pool_deregister_ref(void) 93 { 94 spin_lock_bh(&pool_lock); 95 singleton_pool_references--; 96 DFS_POOL_STAT_DEC(pool_reference); 97 if (singleton_pool_references == 0) { 98 /* free singleton pools with no references left */ 99 struct pri_sequence *ps, *ps0; 100 struct pulse_elem *p, *p0; 101 102 list_for_each_entry_safe(p, p0, &pulse_pool, head) { 103 list_del(&p->head); 104 DFS_POOL_STAT_DEC(pulse_allocated); 105 kfree(p); 106 } 107 list_for_each_entry_safe(ps, ps0, &pseq_pool, head) { 108 list_del(&ps->head); 109 DFS_POOL_STAT_DEC(pseq_allocated); 110 kfree(ps); 111 } 112 } 113 spin_unlock_bh(&pool_lock); 114 } 115 116 static void pool_put_pulse_elem(struct pulse_elem *pe) 117 { 118 spin_lock_bh(&pool_lock); 119 list_add(&pe->head, &pulse_pool); 120 DFS_POOL_STAT_DEC(pulse_used); 121 spin_unlock_bh(&pool_lock); 122 } 123 124 static void pool_put_pseq_elem(struct pri_sequence *pse) 125 { 126 spin_lock_bh(&pool_lock); 127 list_add(&pse->head, &pseq_pool); 128 DFS_POOL_STAT_DEC(pseq_used); 129 spin_unlock_bh(&pool_lock); 130 } 131 132 static struct pri_sequence *pool_get_pseq_elem(void) 133 { 134 struct pri_sequence *pse = NULL; 135 spin_lock_bh(&pool_lock); 136 if (!list_empty(&pseq_pool)) { 137 pse = list_first_entry(&pseq_pool, struct pri_sequence, head); 138 list_del(&pse->head); 139 DFS_POOL_STAT_INC(pseq_used); 140 } 141 spin_unlock_bh(&pool_lock); 142 return pse; 143 } 144 145 static struct pulse_elem *pool_get_pulse_elem(void) 146 { 147 struct pulse_elem *pe = NULL; 148 spin_lock_bh(&pool_lock); 149 if (!list_empty(&pulse_pool)) { 150 pe = list_first_entry(&pulse_pool, struct pulse_elem, head); 151 list_del(&pe->head); 152 DFS_POOL_STAT_INC(pulse_used); 153 } 154 spin_unlock_bh(&pool_lock); 155 return pe; 156 } 157 158 static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde) 159 { 160 struct list_head *l = &pde->pulses; 161 if (list_empty(l)) 162 return NULL; 163 return list_entry(l->prev, struct pulse_elem, head); 164 } 165 166 static bool pulse_queue_dequeue(struct pri_detector *pde) 167 { 168 struct pulse_elem *p = pulse_queue_get_tail(pde); 169 if (p != NULL) { 170 list_del_init(&p->head); 171 pde->count--; 172 /* give it back to pool */ 173 pool_put_pulse_elem(p); 174 } 175 return (pde->count > 0); 176 } 177 178 /* remove pulses older than window */ 179 static void pulse_queue_check_window(struct pri_detector *pde) 180 { 181 u64 min_valid_ts; 182 struct pulse_elem *p; 183 184 /* there is no delta time with less than 2 pulses */ 185 if (pde->count < 2) 186 return; 187 188 if (pde->last_ts <= pde->window_size) 189 return; 190 191 min_valid_ts = pde->last_ts - pde->window_size; 192 while ((p = pulse_queue_get_tail(pde)) != NULL) { 193 if (p->ts >= min_valid_ts) 194 return; 195 pulse_queue_dequeue(pde); 196 } 197 } 198 199 static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts) 200 { 201 struct pulse_elem *p = pool_get_pulse_elem(); 202 if (p == NULL) { 203 p = kmalloc(sizeof(*p), GFP_ATOMIC); 204 if (p == NULL) { 205 DFS_POOL_STAT_INC(pulse_alloc_error); 206 return false; 207 } 208 DFS_POOL_STAT_INC(pulse_allocated); 209 DFS_POOL_STAT_INC(pulse_used); 210 } 211 INIT_LIST_HEAD(&p->head); 212 p->ts = ts; 213 list_add(&p->head, &pde->pulses); 214 pde->count++; 215 pde->last_ts = ts; 216 pulse_queue_check_window(pde); 217 if (pde->count >= pde->max_count) 218 pulse_queue_dequeue(pde); 219 return true; 220 } 221 222 static bool pseq_handler_create_sequences(struct pri_detector *pde, 223 u64 ts, u32 min_count) 224 { 225 struct pulse_elem *p; 226 list_for_each_entry(p, &pde->pulses, head) { 227 struct pri_sequence ps, *new_ps; 228 struct pulse_elem *p2; 229 u32 tmp_false_count; 230 u64 min_valid_ts; 231 u32 delta_ts = ts - p->ts; 232 233 if (delta_ts < pde->rs->pri_min) 234 /* ignore too small pri */ 235 continue; 236 237 if (delta_ts > pde->rs->pri_max) 238 /* stop on too large pri (sorted list) */ 239 break; 240 241 /* build a new sequence with new potential pri */ 242 ps.count = 2; 243 ps.count_falses = 0; 244 ps.first_ts = p->ts; 245 ps.last_ts = ts; 246 ps.pri = ts - p->ts; 247 ps.dur = ps.pri * (pde->rs->ppb - 1) 248 + 2 * pde->rs->max_pri_tolerance; 249 250 p2 = p; 251 tmp_false_count = 0; 252 min_valid_ts = ts - ps.dur; 253 /* check which past pulses are candidates for new sequence */ 254 list_for_each_entry_continue(p2, &pde->pulses, head) { 255 u32 factor; 256 if (p2->ts < min_valid_ts) 257 /* stop on crossing window border */ 258 break; 259 /* check if pulse match (multi)PRI */ 260 factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri, 261 pde->rs->max_pri_tolerance); 262 if (factor > 0) { 263 ps.count++; 264 ps.first_ts = p2->ts; 265 /* 266 * on match, add the intermediate falses 267 * and reset counter 268 */ 269 ps.count_falses += tmp_false_count; 270 tmp_false_count = 0; 271 } else { 272 /* this is a potential false one */ 273 tmp_false_count++; 274 } 275 } 276 if (ps.count < min_count) 277 /* did not reach minimum count, drop sequence */ 278 continue; 279 280 /* this is a valid one, add it */ 281 ps.deadline_ts = ps.first_ts + ps.dur; 282 new_ps = pool_get_pseq_elem(); 283 if (new_ps == NULL) { 284 new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC); 285 if (new_ps == NULL) { 286 DFS_POOL_STAT_INC(pseq_alloc_error); 287 return false; 288 } 289 DFS_POOL_STAT_INC(pseq_allocated); 290 DFS_POOL_STAT_INC(pseq_used); 291 } 292 memcpy(new_ps, &ps, sizeof(ps)); 293 INIT_LIST_HEAD(&new_ps->head); 294 list_add(&new_ps->head, &pde->sequences); 295 } 296 return true; 297 } 298 299 /* check new ts and add to all matching existing sequences */ 300 static u32 301 pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts) 302 { 303 u32 max_count = 0; 304 struct pri_sequence *ps, *ps2; 305 list_for_each_entry_safe(ps, ps2, &pde->sequences, head) { 306 u32 delta_ts; 307 u32 factor; 308 309 /* first ensure that sequence is within window */ 310 if (ts > ps->deadline_ts) { 311 list_del_init(&ps->head); 312 pool_put_pseq_elem(ps); 313 continue; 314 } 315 316 delta_ts = ts - ps->last_ts; 317 factor = pde_get_multiple(delta_ts, ps->pri, 318 pde->rs->max_pri_tolerance); 319 if (factor > 0) { 320 ps->last_ts = ts; 321 ps->count++; 322 323 if (max_count < ps->count) 324 max_count = ps->count; 325 } else { 326 ps->count_falses++; 327 } 328 } 329 return max_count; 330 } 331 332 static struct pri_sequence * 333 pseq_handler_check_detection(struct pri_detector *pde) 334 { 335 struct pri_sequence *ps; 336 337 if (list_empty(&pde->sequences)) 338 return NULL; 339 340 list_for_each_entry(ps, &pde->sequences, head) { 341 /* 342 * we assume to have enough matching confidence if we 343 * 1) have enough pulses 344 * 2) have more matching than false pulses 345 */ 346 if ((ps->count >= pde->rs->ppb_thresh) && 347 (ps->count * pde->rs->num_pri >= ps->count_falses)) 348 return ps; 349 } 350 return NULL; 351 } 352 353 354 /* free pulse queue and sequences list and give objects back to pools */ 355 static void pri_detector_reset(struct pri_detector *pde, u64 ts) 356 { 357 struct pri_sequence *ps, *ps0; 358 struct pulse_elem *p, *p0; 359 list_for_each_entry_safe(ps, ps0, &pde->sequences, head) { 360 list_del_init(&ps->head); 361 pool_put_pseq_elem(ps); 362 } 363 list_for_each_entry_safe(p, p0, &pde->pulses, head) { 364 list_del_init(&p->head); 365 pool_put_pulse_elem(p); 366 } 367 pde->count = 0; 368 pde->last_ts = ts; 369 } 370 371 static void pri_detector_exit(struct pri_detector *de) 372 { 373 pri_detector_reset(de, 0); 374 pool_deregister_ref(); 375 kfree(de); 376 } 377 378 static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de, 379 struct pulse_event *event) 380 { 381 u32 max_updated_seq; 382 struct pri_sequence *ps; 383 u64 ts = event->ts; 384 const struct radar_detector_specs *rs = de->rs; 385 386 /* ignore pulses not within width range */ 387 if ((rs->width_min > event->width) || (rs->width_max < event->width)) 388 return NULL; 389 390 if ((ts - de->last_ts) < rs->max_pri_tolerance) 391 /* if delta to last pulse is too short, don't use this pulse */ 392 return NULL; 393 de->last_ts = ts; 394 395 max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts); 396 397 if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) { 398 pri_detector_reset(de, ts); 399 return NULL; 400 } 401 402 ps = pseq_handler_check_detection(de); 403 404 if (ps == NULL) 405 pulse_queue_enqueue(de, ts); 406 407 return ps; 408 } 409 410 struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs) 411 { 412 struct pri_detector *de; 413 414 de = kzalloc(sizeof(*de), GFP_ATOMIC); 415 if (de == NULL) 416 return NULL; 417 de->exit = pri_detector_exit; 418 de->add_pulse = pri_detector_add_pulse; 419 de->reset = pri_detector_reset; 420 421 INIT_LIST_HEAD(&de->sequences); 422 INIT_LIST_HEAD(&de->pulses); 423 de->window_size = rs->pri_max * rs->ppb * rs->num_pri; 424 de->max_count = rs->ppb * 2; 425 de->rs = rs; 426 427 pool_register_ref(); 428 return de; 429 } 430