1 /* 2 * tracing_map - lock-free map for tracing 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com> 15 * 16 * tracing_map implementation inspired by lock-free map algorithms 17 * originated by Dr. Cliff Click: 18 * 19 * http://www.azulsystems.com/blog/cliff/2007-03-26-non-blocking-hashtable 20 * http://www.azulsystems.com/events/javaone_2007/2007_LockFreeHash.pdf 21 */ 22 23 #include <linux/vmalloc.h> 24 #include <linux/jhash.h> 25 #include <linux/slab.h> 26 #include <linux/sort.h> 27 28 #include "tracing_map.h" 29 #include "trace.h" 30 31 /* 32 * NOTE: For a detailed description of the data structures used by 33 * these functions (such as tracing_map_elt) please see the overview 34 * of tracing_map data structures at the beginning of tracing_map.h. 35 */ 36 37 /** 38 * tracing_map_update_sum - Add a value to a tracing_map_elt's sum field 39 * @elt: The tracing_map_elt 40 * @i: The index of the given sum associated with the tracing_map_elt 41 * @n: The value to add to the sum 42 * 43 * Add n to sum i associated with the specified tracing_map_elt 44 * instance. The index i is the index returned by the call to 45 * tracing_map_add_sum_field() when the tracing map was set up. 46 */ 47 void tracing_map_update_sum(struct tracing_map_elt *elt, unsigned int i, u64 n) 48 { 49 atomic64_add(n, &elt->fields[i].sum); 50 } 51 52 /** 53 * tracing_map_read_sum - Return the value of a tracing_map_elt's sum field 54 * @elt: The tracing_map_elt 55 * @i: The index of the given sum associated with the tracing_map_elt 56 * 57 * Retrieve the value of the sum i associated with the specified 58 * tracing_map_elt instance. The index i is the index returned by the 59 * call to tracing_map_add_sum_field() when the tracing map was set 60 * up. 61 * 62 * Return: The sum associated with field i for elt. 63 */ 64 u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i) 65 { 66 return (u64)atomic64_read(&elt->fields[i].sum); 67 } 68 69 int tracing_map_cmp_string(void *val_a, void *val_b) 70 { 71 char *a = val_a; 72 char *b = val_b; 73 74 return strcmp(a, b); 75 } 76 77 int tracing_map_cmp_none(void *val_a, void *val_b) 78 { 79 return 0; 80 } 81 82 static int tracing_map_cmp_atomic64(void *val_a, void *val_b) 83 { 84 u64 a = atomic64_read((atomic64_t *)val_a); 85 u64 b = atomic64_read((atomic64_t *)val_b); 86 87 return (a > b) ? 1 : ((a < b) ? -1 : 0); 88 } 89 90 #define DEFINE_TRACING_MAP_CMP_FN(type) \ 91 static int tracing_map_cmp_##type(void *val_a, void *val_b) \ 92 { \ 93 type a = *(type *)val_a; \ 94 type b = *(type *)val_b; \ 95 \ 96 return (a > b) ? 1 : ((a < b) ? -1 : 0); \ 97 } 98 99 DEFINE_TRACING_MAP_CMP_FN(s64); 100 DEFINE_TRACING_MAP_CMP_FN(u64); 101 DEFINE_TRACING_MAP_CMP_FN(s32); 102 DEFINE_TRACING_MAP_CMP_FN(u32); 103 DEFINE_TRACING_MAP_CMP_FN(s16); 104 DEFINE_TRACING_MAP_CMP_FN(u16); 105 DEFINE_TRACING_MAP_CMP_FN(s8); 106 DEFINE_TRACING_MAP_CMP_FN(u8); 107 108 tracing_map_cmp_fn_t tracing_map_cmp_num(int field_size, 109 int field_is_signed) 110 { 111 tracing_map_cmp_fn_t fn = tracing_map_cmp_none; 112 113 switch (field_size) { 114 case 8: 115 if (field_is_signed) 116 fn = tracing_map_cmp_s64; 117 else 118 fn = tracing_map_cmp_u64; 119 break; 120 case 4: 121 if (field_is_signed) 122 fn = tracing_map_cmp_s32; 123 else 124 fn = tracing_map_cmp_u32; 125 break; 126 case 2: 127 if (field_is_signed) 128 fn = tracing_map_cmp_s16; 129 else 130 fn = tracing_map_cmp_u16; 131 break; 132 case 1: 133 if (field_is_signed) 134 fn = tracing_map_cmp_s8; 135 else 136 fn = tracing_map_cmp_u8; 137 break; 138 } 139 140 return fn; 141 } 142 143 static int tracing_map_add_field(struct tracing_map *map, 144 tracing_map_cmp_fn_t cmp_fn) 145 { 146 int ret = -EINVAL; 147 148 if (map->n_fields < TRACING_MAP_FIELDS_MAX) { 149 ret = map->n_fields; 150 map->fields[map->n_fields++].cmp_fn = cmp_fn; 151 } 152 153 return ret; 154 } 155 156 /** 157 * tracing_map_add_sum_field - Add a field describing a tracing_map sum 158 * @map: The tracing_map 159 * 160 * Add a sum field to the key and return the index identifying it in 161 * the map and associated tracing_map_elts. This is the index used 162 * for instance to update a sum for a particular tracing_map_elt using 163 * tracing_map_update_sum() or reading it via tracing_map_read_sum(). 164 * 165 * Return: The index identifying the field in the map and associated 166 * tracing_map_elts, or -EINVAL on error. 167 */ 168 int tracing_map_add_sum_field(struct tracing_map *map) 169 { 170 return tracing_map_add_field(map, tracing_map_cmp_atomic64); 171 } 172 173 /** 174 * tracing_map_add_key_field - Add a field describing a tracing_map key 175 * @map: The tracing_map 176 * @offset: The offset within the key 177 * @cmp_fn: The comparison function that will be used to sort on the key 178 * 179 * Let the map know there is a key and that if it's used as a sort key 180 * to use cmp_fn. 181 * 182 * A key can be a subset of a compound key; for that purpose, the 183 * offset param is used to describe where within the the compound key 184 * the key referenced by this key field resides. 185 * 186 * Return: The index identifying the field in the map and associated 187 * tracing_map_elts, or -EINVAL on error. 188 */ 189 int tracing_map_add_key_field(struct tracing_map *map, 190 unsigned int offset, 191 tracing_map_cmp_fn_t cmp_fn) 192 193 { 194 int idx = tracing_map_add_field(map, cmp_fn); 195 196 if (idx < 0) 197 return idx; 198 199 map->fields[idx].offset = offset; 200 201 map->key_idx[map->n_keys++] = idx; 202 203 return idx; 204 } 205 206 void tracing_map_array_clear(struct tracing_map_array *a) 207 { 208 unsigned int i; 209 210 if (!a->pages) 211 return; 212 213 for (i = 0; i < a->n_pages; i++) 214 memset(a->pages[i], 0, PAGE_SIZE); 215 } 216 217 void tracing_map_array_free(struct tracing_map_array *a) 218 { 219 unsigned int i; 220 221 if (!a) 222 return; 223 224 if (!a->pages) 225 goto free; 226 227 for (i = 0; i < a->n_pages; i++) { 228 if (!a->pages[i]) 229 break; 230 free_page((unsigned long)a->pages[i]); 231 } 232 233 kfree(a->pages); 234 235 free: 236 kfree(a); 237 } 238 239 struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts, 240 unsigned int entry_size) 241 { 242 struct tracing_map_array *a; 243 unsigned int i; 244 245 a = kzalloc(sizeof(*a), GFP_KERNEL); 246 if (!a) 247 return NULL; 248 249 a->entry_size_shift = fls(roundup_pow_of_two(entry_size) - 1); 250 a->entries_per_page = PAGE_SIZE / (1 << a->entry_size_shift); 251 a->n_pages = n_elts / a->entries_per_page; 252 if (!a->n_pages) 253 a->n_pages = 1; 254 a->entry_shift = fls(a->entries_per_page) - 1; 255 a->entry_mask = (1 << a->entry_shift) - 1; 256 257 a->pages = kcalloc(a->n_pages, sizeof(void *), GFP_KERNEL); 258 if (!a->pages) 259 goto free; 260 261 for (i = 0; i < a->n_pages; i++) { 262 a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL); 263 if (!a->pages[i]) 264 goto free; 265 } 266 out: 267 return a; 268 free: 269 tracing_map_array_free(a); 270 a = NULL; 271 272 goto out; 273 } 274 275 static void tracing_map_elt_clear(struct tracing_map_elt *elt) 276 { 277 unsigned i; 278 279 for (i = 0; i < elt->map->n_fields; i++) 280 if (elt->fields[i].cmp_fn == tracing_map_cmp_atomic64) 281 atomic64_set(&elt->fields[i].sum, 0); 282 283 if (elt->map->ops && elt->map->ops->elt_clear) 284 elt->map->ops->elt_clear(elt); 285 } 286 287 static void tracing_map_elt_init_fields(struct tracing_map_elt *elt) 288 { 289 unsigned int i; 290 291 tracing_map_elt_clear(elt); 292 293 for (i = 0; i < elt->map->n_fields; i++) { 294 elt->fields[i].cmp_fn = elt->map->fields[i].cmp_fn; 295 296 if (elt->fields[i].cmp_fn != tracing_map_cmp_atomic64) 297 elt->fields[i].offset = elt->map->fields[i].offset; 298 } 299 } 300 301 static void tracing_map_elt_free(struct tracing_map_elt *elt) 302 { 303 if (!elt) 304 return; 305 306 if (elt->map->ops && elt->map->ops->elt_free) 307 elt->map->ops->elt_free(elt); 308 kfree(elt->fields); 309 kfree(elt->key); 310 kfree(elt); 311 } 312 313 static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map) 314 { 315 struct tracing_map_elt *elt; 316 int err = 0; 317 318 elt = kzalloc(sizeof(*elt), GFP_KERNEL); 319 if (!elt) 320 return ERR_PTR(-ENOMEM); 321 322 elt->map = map; 323 324 elt->key = kzalloc(map->key_size, GFP_KERNEL); 325 if (!elt->key) { 326 err = -ENOMEM; 327 goto free; 328 } 329 330 elt->fields = kcalloc(map->n_fields, sizeof(*elt->fields), GFP_KERNEL); 331 if (!elt->fields) { 332 err = -ENOMEM; 333 goto free; 334 } 335 336 tracing_map_elt_init_fields(elt); 337 338 if (map->ops && map->ops->elt_alloc) { 339 err = map->ops->elt_alloc(elt); 340 if (err) 341 goto free; 342 } 343 return elt; 344 free: 345 tracing_map_elt_free(elt); 346 347 return ERR_PTR(err); 348 } 349 350 static struct tracing_map_elt *get_free_elt(struct tracing_map *map) 351 { 352 struct tracing_map_elt *elt = NULL; 353 int idx; 354 355 idx = atomic_inc_return(&map->next_elt); 356 if (idx < map->max_elts) { 357 elt = *(TRACING_MAP_ELT(map->elts, idx)); 358 if (map->ops && map->ops->elt_init) 359 map->ops->elt_init(elt); 360 } 361 362 return elt; 363 } 364 365 static void tracing_map_free_elts(struct tracing_map *map) 366 { 367 unsigned int i; 368 369 if (!map->elts) 370 return; 371 372 for (i = 0; i < map->max_elts; i++) { 373 tracing_map_elt_free(*(TRACING_MAP_ELT(map->elts, i))); 374 *(TRACING_MAP_ELT(map->elts, i)) = NULL; 375 } 376 377 tracing_map_array_free(map->elts); 378 map->elts = NULL; 379 } 380 381 static int tracing_map_alloc_elts(struct tracing_map *map) 382 { 383 unsigned int i; 384 385 map->elts = tracing_map_array_alloc(map->max_elts, 386 sizeof(struct tracing_map_elt *)); 387 if (!map->elts) 388 return -ENOMEM; 389 390 for (i = 0; i < map->max_elts; i++) { 391 *(TRACING_MAP_ELT(map->elts, i)) = tracing_map_elt_alloc(map); 392 if (IS_ERR(*(TRACING_MAP_ELT(map->elts, i)))) { 393 *(TRACING_MAP_ELT(map->elts, i)) = NULL; 394 tracing_map_free_elts(map); 395 396 return -ENOMEM; 397 } 398 } 399 400 return 0; 401 } 402 403 static inline bool keys_match(void *key, void *test_key, unsigned key_size) 404 { 405 bool match = true; 406 407 if (memcmp(key, test_key, key_size)) 408 match = false; 409 410 return match; 411 } 412 413 static inline struct tracing_map_elt * 414 __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only) 415 { 416 u32 idx, key_hash, test_key; 417 int dup_try = 0; 418 struct tracing_map_entry *entry; 419 struct tracing_map_elt *val; 420 421 key_hash = jhash(key, map->key_size, 0); 422 if (key_hash == 0) 423 key_hash = 1; 424 idx = key_hash >> (32 - (map->map_bits + 1)); 425 426 while (1) { 427 idx &= (map->map_size - 1); 428 entry = TRACING_MAP_ENTRY(map->map, idx); 429 test_key = entry->key; 430 431 if (test_key && test_key == key_hash) { 432 val = READ_ONCE(entry->val); 433 if (val && 434 keys_match(key, val->key, map->key_size)) { 435 if (!lookup_only) 436 atomic64_inc(&map->hits); 437 return val; 438 } else if (unlikely(!val)) { 439 /* 440 * The key is present. But, val (pointer to elt 441 * struct) is still NULL. which means some other 442 * thread is in the process of inserting an 443 * element. 444 * 445 * On top of that, it's key_hash is same as the 446 * one being inserted right now. So, it's 447 * possible that the element has the same 448 * key as well. 449 */ 450 451 dup_try++; 452 if (dup_try > map->map_size) { 453 atomic64_inc(&map->drops); 454 break; 455 } 456 continue; 457 } 458 } 459 460 if (!test_key) { 461 if (lookup_only) 462 break; 463 464 if (!cmpxchg(&entry->key, 0, key_hash)) { 465 struct tracing_map_elt *elt; 466 467 elt = get_free_elt(map); 468 if (!elt) { 469 atomic64_inc(&map->drops); 470 entry->key = 0; 471 break; 472 } 473 474 memcpy(elt->key, key, map->key_size); 475 entry->val = elt; 476 atomic64_inc(&map->hits); 477 478 return entry->val; 479 } else { 480 /* 481 * cmpxchg() failed. Loop around once 482 * more to check what key was inserted. 483 */ 484 dup_try++; 485 continue; 486 } 487 } 488 489 idx++; 490 } 491 492 return NULL; 493 } 494 495 /** 496 * tracing_map_insert - Insert key and/or retrieve val from a tracing_map 497 * @map: The tracing_map to insert into 498 * @key: The key to insert 499 * 500 * Inserts a key into a tracing_map and creates and returns a new 501 * tracing_map_elt for it, or if the key has already been inserted by 502 * a previous call, returns the tracing_map_elt already associated 503 * with it. When the map was created, the number of elements to be 504 * allocated for the map was specified (internally maintained as 505 * 'max_elts' in struct tracing_map), and that number of 506 * tracing_map_elts was created by tracing_map_init(). This is the 507 * pre-allocated pool of tracing_map_elts that tracing_map_insert() 508 * will allocate from when adding new keys. Once that pool is 509 * exhausted, tracing_map_insert() is useless and will return NULL to 510 * signal that state. There are two user-visible tracing_map 511 * variables, 'hits' and 'drops', which are updated by this function. 512 * Every time an element is either successfully inserted or retrieved, 513 * the 'hits' value is incrememented. Every time an element insertion 514 * fails, the 'drops' value is incremented. 515 * 516 * This is a lock-free tracing map insertion function implementing a 517 * modified form of Cliff Click's basic insertion algorithm. It 518 * requires the table size be a power of two. To prevent any 519 * possibility of an infinite loop we always make the internal table 520 * size double the size of the requested table size (max_elts * 2). 521 * Likewise, we never reuse a slot or resize or delete elements - when 522 * we've reached max_elts entries, we simply return NULL once we've 523 * run out of entries. Readers can at any point in time traverse the 524 * tracing map and safely access the key/val pairs. 525 * 526 * Return: the tracing_map_elt pointer val associated with the key. 527 * If this was a newly inserted key, the val will be a newly allocated 528 * and associated tracing_map_elt pointer val. If the key wasn't 529 * found and the pool of tracing_map_elts has been exhausted, NULL is 530 * returned and no further insertions will succeed. 531 */ 532 struct tracing_map_elt *tracing_map_insert(struct tracing_map *map, void *key) 533 { 534 return __tracing_map_insert(map, key, false); 535 } 536 537 /** 538 * tracing_map_lookup - Retrieve val from a tracing_map 539 * @map: The tracing_map to perform the lookup on 540 * @key: The key to look up 541 * 542 * Looks up key in tracing_map and if found returns the matching 543 * tracing_map_elt. This is a lock-free lookup; see 544 * tracing_map_insert() for details on tracing_map and how it works. 545 * Every time an element is retrieved, the 'hits' value is 546 * incrememented. There is one user-visible tracing_map variable, 547 * 'hits', which is updated by this function. Every time an element 548 * is successfully retrieved, the 'hits' value is incrememented. The 549 * 'drops' value is never updated by this function. 550 * 551 * Return: the tracing_map_elt pointer val associated with the key. 552 * If the key wasn't found, NULL is returned. 553 */ 554 struct tracing_map_elt *tracing_map_lookup(struct tracing_map *map, void *key) 555 { 556 return __tracing_map_insert(map, key, true); 557 } 558 559 /** 560 * tracing_map_destroy - Destroy a tracing_map 561 * @map: The tracing_map to destroy 562 * 563 * Frees a tracing_map along with its associated array of 564 * tracing_map_elts. 565 * 566 * Callers should make sure there are no readers or writers actively 567 * reading or inserting into the map before calling this. 568 */ 569 void tracing_map_destroy(struct tracing_map *map) 570 { 571 if (!map) 572 return; 573 574 tracing_map_free_elts(map); 575 576 tracing_map_array_free(map->map); 577 kfree(map); 578 } 579 580 /** 581 * tracing_map_clear - Clear a tracing_map 582 * @map: The tracing_map to clear 583 * 584 * Resets the tracing map to a cleared or initial state. The 585 * tracing_map_elts are all cleared, and the array of struct 586 * tracing_map_entry is reset to an initialized state. 587 * 588 * Callers should make sure there are no writers actively inserting 589 * into the map before calling this. 590 */ 591 void tracing_map_clear(struct tracing_map *map) 592 { 593 unsigned int i; 594 595 atomic_set(&map->next_elt, -1); 596 atomic64_set(&map->hits, 0); 597 atomic64_set(&map->drops, 0); 598 599 tracing_map_array_clear(map->map); 600 601 for (i = 0; i < map->max_elts; i++) 602 tracing_map_elt_clear(*(TRACING_MAP_ELT(map->elts, i))); 603 } 604 605 static void set_sort_key(struct tracing_map *map, 606 struct tracing_map_sort_key *sort_key) 607 { 608 map->sort_key = *sort_key; 609 } 610 611 /** 612 * tracing_map_create - Create a lock-free map and element pool 613 * @map_bits: The size of the map (2 ** map_bits) 614 * @key_size: The size of the key for the map in bytes 615 * @ops: Optional client-defined tracing_map_ops instance 616 * @private_data: Client data associated with the map 617 * 618 * Creates and sets up a map to contain 2 ** map_bits number of 619 * elements (internally maintained as 'max_elts' in struct 620 * tracing_map). Before using, map fields should be added to the map 621 * with tracing_map_add_sum_field() and tracing_map_add_key_field(). 622 * tracing_map_init() should then be called to allocate the array of 623 * tracing_map_elts, in order to avoid allocating anything in the map 624 * insertion path. The user-specified map size reflects the maximum 625 * number of elements that can be contained in the table requested by 626 * the user - internally we double that in order to keep the table 627 * sparse and keep collisions manageable. 628 * 629 * A tracing_map is a special-purpose map designed to aggregate or 630 * 'sum' one or more values associated with a specific object of type 631 * tracing_map_elt, which is attached by the map to a given key. 632 * 633 * tracing_map_create() sets up the map itself, and provides 634 * operations for inserting tracing_map_elts, but doesn't allocate the 635 * tracing_map_elts themselves, or provide a means for describing the 636 * keys or sums associated with the tracing_map_elts. All 637 * tracing_map_elts for a given map have the same set of sums and 638 * keys, which are defined by the client using the functions 639 * tracing_map_add_key_field() and tracing_map_add_sum_field(). Once 640 * the fields are defined, the pool of elements allocated for the map 641 * can be created, which occurs when the client code calls 642 * tracing_map_init(). 643 * 644 * When tracing_map_init() returns, tracing_map_elt elements can be 645 * inserted into the map using tracing_map_insert(). When called, 646 * tracing_map_insert() grabs a free tracing_map_elt from the pool, or 647 * finds an existing match in the map and in either case returns it. 648 * The client can then use tracing_map_update_sum() and 649 * tracing_map_read_sum() to update or read a given sum field for the 650 * tracing_map_elt. 651 * 652 * The client can at any point retrieve and traverse the current set 653 * of inserted tracing_map_elts in a tracing_map, via 654 * tracing_map_sort_entries(). Sorting can be done on any field, 655 * including keys. 656 * 657 * See tracing_map.h for a description of tracing_map_ops. 658 * 659 * Return: the tracing_map pointer if successful, ERR_PTR if not. 660 */ 661 struct tracing_map *tracing_map_create(unsigned int map_bits, 662 unsigned int key_size, 663 const struct tracing_map_ops *ops, 664 void *private_data) 665 { 666 struct tracing_map *map; 667 unsigned int i; 668 669 if (map_bits < TRACING_MAP_BITS_MIN || 670 map_bits > TRACING_MAP_BITS_MAX) 671 return ERR_PTR(-EINVAL); 672 673 map = kzalloc(sizeof(*map), GFP_KERNEL); 674 if (!map) 675 return ERR_PTR(-ENOMEM); 676 677 map->map_bits = map_bits; 678 map->max_elts = (1 << map_bits); 679 atomic_set(&map->next_elt, -1); 680 681 map->map_size = (1 << (map_bits + 1)); 682 map->ops = ops; 683 684 map->private_data = private_data; 685 686 map->map = tracing_map_array_alloc(map->map_size, 687 sizeof(struct tracing_map_entry)); 688 if (!map->map) 689 goto free; 690 691 map->key_size = key_size; 692 for (i = 0; i < TRACING_MAP_KEYS_MAX; i++) 693 map->key_idx[i] = -1; 694 out: 695 return map; 696 free: 697 tracing_map_destroy(map); 698 map = ERR_PTR(-ENOMEM); 699 700 goto out; 701 } 702 703 /** 704 * tracing_map_init - Allocate and clear a map's tracing_map_elts 705 * @map: The tracing_map to initialize 706 * 707 * Allocates a clears a pool of tracing_map_elts equal to the 708 * user-specified size of 2 ** map_bits (internally maintained as 709 * 'max_elts' in struct tracing_map). Before using, the map fields 710 * should be added to the map with tracing_map_add_sum_field() and 711 * tracing_map_add_key_field(). tracing_map_init() should then be 712 * called to allocate the array of tracing_map_elts, in order to avoid 713 * allocating anything in the map insertion path. The user-specified 714 * map size reflects the max number of elements requested by the user 715 * - internally we double that in order to keep the table sparse and 716 * keep collisions manageable. 717 * 718 * See tracing_map.h for a description of tracing_map_ops. 719 * 720 * Return: the tracing_map pointer if successful, ERR_PTR if not. 721 */ 722 int tracing_map_init(struct tracing_map *map) 723 { 724 int err; 725 726 if (map->n_fields < 2) 727 return -EINVAL; /* need at least 1 key and 1 val */ 728 729 err = tracing_map_alloc_elts(map); 730 if (err) 731 return err; 732 733 tracing_map_clear(map); 734 735 return err; 736 } 737 738 static int cmp_entries_dup(const struct tracing_map_sort_entry **a, 739 const struct tracing_map_sort_entry **b) 740 { 741 int ret = 0; 742 743 if (memcmp((*a)->key, (*b)->key, (*a)->elt->map->key_size)) 744 ret = 1; 745 746 return ret; 747 } 748 749 static int cmp_entries_sum(const struct tracing_map_sort_entry **a, 750 const struct tracing_map_sort_entry **b) 751 { 752 const struct tracing_map_elt *elt_a, *elt_b; 753 struct tracing_map_sort_key *sort_key; 754 struct tracing_map_field *field; 755 tracing_map_cmp_fn_t cmp_fn; 756 void *val_a, *val_b; 757 int ret = 0; 758 759 elt_a = (*a)->elt; 760 elt_b = (*b)->elt; 761 762 sort_key = &elt_a->map->sort_key; 763 764 field = &elt_a->fields[sort_key->field_idx]; 765 cmp_fn = field->cmp_fn; 766 767 val_a = &elt_a->fields[sort_key->field_idx].sum; 768 val_b = &elt_b->fields[sort_key->field_idx].sum; 769 770 ret = cmp_fn(val_a, val_b); 771 if (sort_key->descending) 772 ret = -ret; 773 774 return ret; 775 } 776 777 static int cmp_entries_key(const struct tracing_map_sort_entry **a, 778 const struct tracing_map_sort_entry **b) 779 { 780 const struct tracing_map_elt *elt_a, *elt_b; 781 struct tracing_map_sort_key *sort_key; 782 struct tracing_map_field *field; 783 tracing_map_cmp_fn_t cmp_fn; 784 void *val_a, *val_b; 785 int ret = 0; 786 787 elt_a = (*a)->elt; 788 elt_b = (*b)->elt; 789 790 sort_key = &elt_a->map->sort_key; 791 792 field = &elt_a->fields[sort_key->field_idx]; 793 794 cmp_fn = field->cmp_fn; 795 796 val_a = elt_a->key + field->offset; 797 val_b = elt_b->key + field->offset; 798 799 ret = cmp_fn(val_a, val_b); 800 if (sort_key->descending) 801 ret = -ret; 802 803 return ret; 804 } 805 806 static void destroy_sort_entry(struct tracing_map_sort_entry *entry) 807 { 808 if (!entry) 809 return; 810 811 if (entry->elt_copied) 812 tracing_map_elt_free(entry->elt); 813 814 kfree(entry); 815 } 816 817 /** 818 * tracing_map_destroy_sort_entries - Destroy an array of sort entries 819 * @entries: The entries to destroy 820 * @n_entries: The number of entries in the array 821 * 822 * Destroy the elements returned by a tracing_map_sort_entries() call. 823 */ 824 void tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries, 825 unsigned int n_entries) 826 { 827 unsigned int i; 828 829 for (i = 0; i < n_entries; i++) 830 destroy_sort_entry(entries[i]); 831 832 vfree(entries); 833 } 834 835 static struct tracing_map_sort_entry * 836 create_sort_entry(void *key, struct tracing_map_elt *elt) 837 { 838 struct tracing_map_sort_entry *sort_entry; 839 840 sort_entry = kzalloc(sizeof(*sort_entry), GFP_KERNEL); 841 if (!sort_entry) 842 return NULL; 843 844 sort_entry->key = key; 845 sort_entry->elt = elt; 846 847 return sort_entry; 848 } 849 850 static struct tracing_map_elt *copy_elt(struct tracing_map_elt *elt) 851 { 852 struct tracing_map_elt *dup_elt; 853 unsigned int i; 854 855 dup_elt = tracing_map_elt_alloc(elt->map); 856 if (IS_ERR(dup_elt)) 857 return NULL; 858 859 if (elt->map->ops && elt->map->ops->elt_copy) 860 elt->map->ops->elt_copy(dup_elt, elt); 861 862 dup_elt->private_data = elt->private_data; 863 memcpy(dup_elt->key, elt->key, elt->map->key_size); 864 865 for (i = 0; i < elt->map->n_fields; i++) { 866 atomic64_set(&dup_elt->fields[i].sum, 867 atomic64_read(&elt->fields[i].sum)); 868 dup_elt->fields[i].cmp_fn = elt->fields[i].cmp_fn; 869 } 870 871 return dup_elt; 872 } 873 874 static int merge_dup(struct tracing_map_sort_entry **sort_entries, 875 unsigned int target, unsigned int dup) 876 { 877 struct tracing_map_elt *target_elt, *elt; 878 bool first_dup = (target - dup) == 1; 879 int i; 880 881 if (first_dup) { 882 elt = sort_entries[target]->elt; 883 target_elt = copy_elt(elt); 884 if (!target_elt) 885 return -ENOMEM; 886 sort_entries[target]->elt = target_elt; 887 sort_entries[target]->elt_copied = true; 888 } else 889 target_elt = sort_entries[target]->elt; 890 891 elt = sort_entries[dup]->elt; 892 893 for (i = 0; i < elt->map->n_fields; i++) 894 atomic64_add(atomic64_read(&elt->fields[i].sum), 895 &target_elt->fields[i].sum); 896 897 sort_entries[dup]->dup = true; 898 899 return 0; 900 } 901 902 static int merge_dups(struct tracing_map_sort_entry **sort_entries, 903 int n_entries, unsigned int key_size) 904 { 905 unsigned int dups = 0, total_dups = 0; 906 int err, i, j; 907 void *key; 908 909 if (n_entries < 2) 910 return total_dups; 911 912 sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *), 913 (int (*)(const void *, const void *))cmp_entries_dup, NULL); 914 915 key = sort_entries[0]->key; 916 for (i = 1; i < n_entries; i++) { 917 if (!memcmp(sort_entries[i]->key, key, key_size)) { 918 dups++; total_dups++; 919 err = merge_dup(sort_entries, i - dups, i); 920 if (err) 921 return err; 922 continue; 923 } 924 key = sort_entries[i]->key; 925 dups = 0; 926 } 927 928 if (!total_dups) 929 return total_dups; 930 931 for (i = 0, j = 0; i < n_entries; i++) { 932 if (!sort_entries[i]->dup) { 933 sort_entries[j] = sort_entries[i]; 934 if (j++ != i) 935 sort_entries[i] = NULL; 936 } else { 937 destroy_sort_entry(sort_entries[i]); 938 sort_entries[i] = NULL; 939 } 940 } 941 942 return total_dups; 943 } 944 945 static bool is_key(struct tracing_map *map, unsigned int field_idx) 946 { 947 unsigned int i; 948 949 for (i = 0; i < map->n_keys; i++) 950 if (map->key_idx[i] == field_idx) 951 return true; 952 return false; 953 } 954 955 static void sort_secondary(struct tracing_map *map, 956 const struct tracing_map_sort_entry **entries, 957 unsigned int n_entries, 958 struct tracing_map_sort_key *primary_key, 959 struct tracing_map_sort_key *secondary_key) 960 { 961 int (*primary_fn)(const struct tracing_map_sort_entry **, 962 const struct tracing_map_sort_entry **); 963 int (*secondary_fn)(const struct tracing_map_sort_entry **, 964 const struct tracing_map_sort_entry **); 965 unsigned i, start = 0, n_sub = 1; 966 967 if (is_key(map, primary_key->field_idx)) 968 primary_fn = cmp_entries_key; 969 else 970 primary_fn = cmp_entries_sum; 971 972 if (is_key(map, secondary_key->field_idx)) 973 secondary_fn = cmp_entries_key; 974 else 975 secondary_fn = cmp_entries_sum; 976 977 for (i = 0; i < n_entries - 1; i++) { 978 const struct tracing_map_sort_entry **a = &entries[i]; 979 const struct tracing_map_sort_entry **b = &entries[i + 1]; 980 981 if (primary_fn(a, b) == 0) { 982 n_sub++; 983 if (i < n_entries - 2) 984 continue; 985 } 986 987 if (n_sub < 2) { 988 start = i + 1; 989 n_sub = 1; 990 continue; 991 } 992 993 set_sort_key(map, secondary_key); 994 sort(&entries[start], n_sub, 995 sizeof(struct tracing_map_sort_entry *), 996 (int (*)(const void *, const void *))secondary_fn, NULL); 997 set_sort_key(map, primary_key); 998 999 start = i + 1; 1000 n_sub = 1; 1001 } 1002 } 1003 1004 /** 1005 * tracing_map_sort_entries - Sort the current set of tracing_map_elts in a map 1006 * @map: The tracing_map 1007 * @sort_key: The sort key to use for sorting 1008 * @sort_entries: outval: pointer to allocated and sorted array of entries 1009 * 1010 * tracing_map_sort_entries() sorts the current set of entries in the 1011 * map and returns the list of tracing_map_sort_entries containing 1012 * them to the client in the sort_entries param. The client can 1013 * access the struct tracing_map_elt element of interest directly as 1014 * the 'elt' field of a returned struct tracing_map_sort_entry object. 1015 * 1016 * The sort_key has only two fields: idx and descending. 'idx' refers 1017 * to the index of the field added via tracing_map_add_sum_field() or 1018 * tracing_map_add_key_field() when the tracing_map was initialized. 1019 * 'descending' is a flag that if set reverses the sort order, which 1020 * by default is ascending. 1021 * 1022 * The client should not hold on to the returned array but should use 1023 * it and call tracing_map_destroy_sort_entries() when done. 1024 * 1025 * Return: the number of sort_entries in the struct tracing_map_sort_entry 1026 * array, negative on error 1027 */ 1028 int tracing_map_sort_entries(struct tracing_map *map, 1029 struct tracing_map_sort_key *sort_keys, 1030 unsigned int n_sort_keys, 1031 struct tracing_map_sort_entry ***sort_entries) 1032 { 1033 int (*cmp_entries_fn)(const struct tracing_map_sort_entry **, 1034 const struct tracing_map_sort_entry **); 1035 struct tracing_map_sort_entry *sort_entry, **entries; 1036 int i, n_entries, ret; 1037 1038 entries = vmalloc(map->max_elts * sizeof(sort_entry)); 1039 if (!entries) 1040 return -ENOMEM; 1041 1042 for (i = 0, n_entries = 0; i < map->map_size; i++) { 1043 struct tracing_map_entry *entry; 1044 1045 entry = TRACING_MAP_ENTRY(map->map, i); 1046 1047 if (!entry->key || !entry->val) 1048 continue; 1049 1050 entries[n_entries] = create_sort_entry(entry->val->key, 1051 entry->val); 1052 if (!entries[n_entries++]) { 1053 ret = -ENOMEM; 1054 goto free; 1055 } 1056 } 1057 1058 if (n_entries == 0) { 1059 ret = 0; 1060 goto free; 1061 } 1062 1063 if (n_entries == 1) { 1064 *sort_entries = entries; 1065 return 1; 1066 } 1067 1068 ret = merge_dups(entries, n_entries, map->key_size); 1069 if (ret < 0) 1070 goto free; 1071 n_entries -= ret; 1072 1073 if (is_key(map, sort_keys[0].field_idx)) 1074 cmp_entries_fn = cmp_entries_key; 1075 else 1076 cmp_entries_fn = cmp_entries_sum; 1077 1078 set_sort_key(map, &sort_keys[0]); 1079 1080 sort(entries, n_entries, sizeof(struct tracing_map_sort_entry *), 1081 (int (*)(const void *, const void *))cmp_entries_fn, NULL); 1082 1083 if (n_sort_keys > 1) 1084 sort_secondary(map, 1085 (const struct tracing_map_sort_entry **)entries, 1086 n_entries, 1087 &sort_keys[0], 1088 &sort_keys[1]); 1089 1090 *sort_entries = entries; 1091 1092 return n_entries; 1093 free: 1094 tracing_map_destroy_sort_entries(entries, n_entries); 1095 1096 return ret; 1097 } 1098