16954e415SSteven Rostedt (VMware) // SPDX-License-Identifier: GPL-2.0
26954e415SSteven Rostedt (VMware) /*
36954e415SSteven Rostedt (VMware) * Copyright (C) 2021 VMware Inc, Steven Rostedt <rostedt@goodmis.org>
46954e415SSteven Rostedt (VMware) */
58d6e9098SSteven Rostedt (VMware) #include <linux/spinlock.h>
68d6e9098SSteven Rostedt (VMware) #include <linux/irq_work.h>
76954e415SSteven Rostedt (VMware) #include <linux/slab.h>
86954e415SSteven Rostedt (VMware) #include "trace.h"
96954e415SSteven Rostedt (VMware)
108d6e9098SSteven Rostedt (VMware) /* See pid_list.h for details */
118d6e9098SSteven Rostedt (VMware)
get_lower_chunk(struct trace_pid_list * pid_list)128d6e9098SSteven Rostedt (VMware) static inline union lower_chunk *get_lower_chunk(struct trace_pid_list *pid_list)
138d6e9098SSteven Rostedt (VMware) {
148d6e9098SSteven Rostedt (VMware) union lower_chunk *chunk;
158d6e9098SSteven Rostedt (VMware)
168d6e9098SSteven Rostedt (VMware) lockdep_assert_held(&pid_list->lock);
178d6e9098SSteven Rostedt (VMware)
188d6e9098SSteven Rostedt (VMware) if (!pid_list->lower_list)
198d6e9098SSteven Rostedt (VMware) return NULL;
208d6e9098SSteven Rostedt (VMware)
218d6e9098SSteven Rostedt (VMware) chunk = pid_list->lower_list;
228d6e9098SSteven Rostedt (VMware) pid_list->lower_list = chunk->next;
238d6e9098SSteven Rostedt (VMware) pid_list->free_lower_chunks--;
248d6e9098SSteven Rostedt (VMware) WARN_ON_ONCE(pid_list->free_lower_chunks < 0);
258d6e9098SSteven Rostedt (VMware) chunk->next = NULL;
268d6e9098SSteven Rostedt (VMware) /*
278d6e9098SSteven Rostedt (VMware) * If a refill needs to happen, it can not happen here
288d6e9098SSteven Rostedt (VMware) * as the scheduler run queue locks are held.
298d6e9098SSteven Rostedt (VMware) */
308d6e9098SSteven Rostedt (VMware) if (pid_list->free_lower_chunks <= CHUNK_REALLOC)
318d6e9098SSteven Rostedt (VMware) irq_work_queue(&pid_list->refill_irqwork);
328d6e9098SSteven Rostedt (VMware)
338d6e9098SSteven Rostedt (VMware) return chunk;
348d6e9098SSteven Rostedt (VMware) }
358d6e9098SSteven Rostedt (VMware)
get_upper_chunk(struct trace_pid_list * pid_list)368d6e9098SSteven Rostedt (VMware) static inline union upper_chunk *get_upper_chunk(struct trace_pid_list *pid_list)
378d6e9098SSteven Rostedt (VMware) {
388d6e9098SSteven Rostedt (VMware) union upper_chunk *chunk;
398d6e9098SSteven Rostedt (VMware)
408d6e9098SSteven Rostedt (VMware) lockdep_assert_held(&pid_list->lock);
418d6e9098SSteven Rostedt (VMware)
428d6e9098SSteven Rostedt (VMware) if (!pid_list->upper_list)
438d6e9098SSteven Rostedt (VMware) return NULL;
448d6e9098SSteven Rostedt (VMware)
458d6e9098SSteven Rostedt (VMware) chunk = pid_list->upper_list;
468d6e9098SSteven Rostedt (VMware) pid_list->upper_list = chunk->next;
478d6e9098SSteven Rostedt (VMware) pid_list->free_upper_chunks--;
488d6e9098SSteven Rostedt (VMware) WARN_ON_ONCE(pid_list->free_upper_chunks < 0);
498d6e9098SSteven Rostedt (VMware) chunk->next = NULL;
508d6e9098SSteven Rostedt (VMware) /*
518d6e9098SSteven Rostedt (VMware) * If a refill needs to happen, it can not happen here
528d6e9098SSteven Rostedt (VMware) * as the scheduler run queue locks are held.
538d6e9098SSteven Rostedt (VMware) */
548d6e9098SSteven Rostedt (VMware) if (pid_list->free_upper_chunks <= CHUNK_REALLOC)
558d6e9098SSteven Rostedt (VMware) irq_work_queue(&pid_list->refill_irqwork);
568d6e9098SSteven Rostedt (VMware)
578d6e9098SSteven Rostedt (VMware) return chunk;
588d6e9098SSteven Rostedt (VMware) }
598d6e9098SSteven Rostedt (VMware)
put_lower_chunk(struct trace_pid_list * pid_list,union lower_chunk * chunk)608d6e9098SSteven Rostedt (VMware) static inline void put_lower_chunk(struct trace_pid_list *pid_list,
618d6e9098SSteven Rostedt (VMware) union lower_chunk *chunk)
628d6e9098SSteven Rostedt (VMware) {
638d6e9098SSteven Rostedt (VMware) lockdep_assert_held(&pid_list->lock);
648d6e9098SSteven Rostedt (VMware)
658d6e9098SSteven Rostedt (VMware) chunk->next = pid_list->lower_list;
668d6e9098SSteven Rostedt (VMware) pid_list->lower_list = chunk;
678d6e9098SSteven Rostedt (VMware) pid_list->free_lower_chunks++;
688d6e9098SSteven Rostedt (VMware) }
698d6e9098SSteven Rostedt (VMware)
put_upper_chunk(struct trace_pid_list * pid_list,union upper_chunk * chunk)708d6e9098SSteven Rostedt (VMware) static inline void put_upper_chunk(struct trace_pid_list *pid_list,
718d6e9098SSteven Rostedt (VMware) union upper_chunk *chunk)
728d6e9098SSteven Rostedt (VMware) {
738d6e9098SSteven Rostedt (VMware) lockdep_assert_held(&pid_list->lock);
748d6e9098SSteven Rostedt (VMware)
758d6e9098SSteven Rostedt (VMware) chunk->next = pid_list->upper_list;
768d6e9098SSteven Rostedt (VMware) pid_list->upper_list = chunk;
778d6e9098SSteven Rostedt (VMware) pid_list->free_upper_chunks++;
788d6e9098SSteven Rostedt (VMware) }
798d6e9098SSteven Rostedt (VMware)
upper_empty(union upper_chunk * chunk)808d6e9098SSteven Rostedt (VMware) static inline bool upper_empty(union upper_chunk *chunk)
818d6e9098SSteven Rostedt (VMware) {
828d6e9098SSteven Rostedt (VMware) /*
838d6e9098SSteven Rostedt (VMware) * If chunk->data has no lower chunks, it will be the same
848d6e9098SSteven Rostedt (VMware) * as a zeroed bitmask. Use find_first_bit() to test it
858d6e9098SSteven Rostedt (VMware) * and if it doesn't find any bits set, then the array
868d6e9098SSteven Rostedt (VMware) * is empty.
878d6e9098SSteven Rostedt (VMware) */
888d6e9098SSteven Rostedt (VMware) int bit = find_first_bit((unsigned long *)chunk->data,
898d6e9098SSteven Rostedt (VMware) sizeof(chunk->data) * 8);
908d6e9098SSteven Rostedt (VMware) return bit >= sizeof(chunk->data) * 8;
918d6e9098SSteven Rostedt (VMware) }
928d6e9098SSteven Rostedt (VMware)
pid_split(unsigned int pid,unsigned int * upper1,unsigned int * upper2,unsigned int * lower)938d6e9098SSteven Rostedt (VMware) static inline int pid_split(unsigned int pid, unsigned int *upper1,
948d6e9098SSteven Rostedt (VMware) unsigned int *upper2, unsigned int *lower)
958d6e9098SSteven Rostedt (VMware) {
968d6e9098SSteven Rostedt (VMware) /* MAX_PID should cover all pids */
978d6e9098SSteven Rostedt (VMware) BUILD_BUG_ON(MAX_PID < PID_MAX_LIMIT);
988d6e9098SSteven Rostedt (VMware)
998d6e9098SSteven Rostedt (VMware) /* In case a bad pid is passed in, then fail */
1008d6e9098SSteven Rostedt (VMware) if (unlikely(pid >= MAX_PID))
1018d6e9098SSteven Rostedt (VMware) return -1;
1028d6e9098SSteven Rostedt (VMware)
1038d6e9098SSteven Rostedt (VMware) *upper1 = (pid >> UPPER1_SHIFT) & UPPER_MASK;
1048d6e9098SSteven Rostedt (VMware) *upper2 = (pid >> UPPER2_SHIFT) & UPPER_MASK;
1058d6e9098SSteven Rostedt (VMware) *lower = pid & LOWER_MASK;
1068d6e9098SSteven Rostedt (VMware)
1078d6e9098SSteven Rostedt (VMware) return 0;
1088d6e9098SSteven Rostedt (VMware) }
1098d6e9098SSteven Rostedt (VMware)
pid_join(unsigned int upper1,unsigned int upper2,unsigned int lower)1108d6e9098SSteven Rostedt (VMware) static inline unsigned int pid_join(unsigned int upper1,
1118d6e9098SSteven Rostedt (VMware) unsigned int upper2, unsigned int lower)
1128d6e9098SSteven Rostedt (VMware) {
1138d6e9098SSteven Rostedt (VMware) return ((upper1 & UPPER_MASK) << UPPER1_SHIFT) |
1148d6e9098SSteven Rostedt (VMware) ((upper2 & UPPER_MASK) << UPPER2_SHIFT) |
1158d6e9098SSteven Rostedt (VMware) (lower & LOWER_MASK);
1168d6e9098SSteven Rostedt (VMware) }
1178d6e9098SSteven Rostedt (VMware)
1186954e415SSteven Rostedt (VMware) /**
1196954e415SSteven Rostedt (VMware) * trace_pid_list_is_set - test if the pid is set in the list
1206954e415SSteven Rostedt (VMware) * @pid_list: The pid list to test
121*217d8c05STom Rix * @pid: The pid to see if set in the list.
1226954e415SSteven Rostedt (VMware) *
123*217d8c05STom Rix * Tests if @pid is set in the @pid_list. This is usually called
1246954e415SSteven Rostedt (VMware) * from the scheduler when a task is scheduled. Its pid is checked
1256954e415SSteven Rostedt (VMware) * if it should be traced or not.
1266954e415SSteven Rostedt (VMware) *
1276954e415SSteven Rostedt (VMware) * Return true if the pid is in the list, false otherwise.
1286954e415SSteven Rostedt (VMware) */
trace_pid_list_is_set(struct trace_pid_list * pid_list,unsigned int pid)1296954e415SSteven Rostedt (VMware) bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid)
1306954e415SSteven Rostedt (VMware) {
1318d6e9098SSteven Rostedt (VMware) union upper_chunk *upper_chunk;
1328d6e9098SSteven Rostedt (VMware) union lower_chunk *lower_chunk;
1338d6e9098SSteven Rostedt (VMware) unsigned long flags;
1348d6e9098SSteven Rostedt (VMware) unsigned int upper1;
1358d6e9098SSteven Rostedt (VMware) unsigned int upper2;
1368d6e9098SSteven Rostedt (VMware) unsigned int lower;
1378d6e9098SSteven Rostedt (VMware) bool ret = false;
1388d6e9098SSteven Rostedt (VMware)
1398d6e9098SSteven Rostedt (VMware) if (!pid_list)
1406954e415SSteven Rostedt (VMware) return false;
1416954e415SSteven Rostedt (VMware)
1428d6e9098SSteven Rostedt (VMware) if (pid_split(pid, &upper1, &upper2, &lower) < 0)
1438d6e9098SSteven Rostedt (VMware) return false;
1448d6e9098SSteven Rostedt (VMware)
1458d6e9098SSteven Rostedt (VMware) raw_spin_lock_irqsave(&pid_list->lock, flags);
1468d6e9098SSteven Rostedt (VMware) upper_chunk = pid_list->upper[upper1];
1478d6e9098SSteven Rostedt (VMware) if (upper_chunk) {
1488d6e9098SSteven Rostedt (VMware) lower_chunk = upper_chunk->data[upper2];
1498d6e9098SSteven Rostedt (VMware) if (lower_chunk)
1508d6e9098SSteven Rostedt (VMware) ret = test_bit(lower, lower_chunk->data);
1518d6e9098SSteven Rostedt (VMware) }
1528d6e9098SSteven Rostedt (VMware) raw_spin_unlock_irqrestore(&pid_list->lock, flags);
1538d6e9098SSteven Rostedt (VMware)
1548d6e9098SSteven Rostedt (VMware) return ret;
1556954e415SSteven Rostedt (VMware) }
1566954e415SSteven Rostedt (VMware)
1576954e415SSteven Rostedt (VMware) /**
1586954e415SSteven Rostedt (VMware) * trace_pid_list_set - add a pid to the list
1596954e415SSteven Rostedt (VMware) * @pid_list: The pid list to add the @pid to.
1606954e415SSteven Rostedt (VMware) * @pid: The pid to add.
1616954e415SSteven Rostedt (VMware) *
1626954e415SSteven Rostedt (VMware) * Adds @pid to @pid_list. This is usually done explicitly by a user
1636954e415SSteven Rostedt (VMware) * adding a task to be traced, or indirectly by the fork function
1646954e415SSteven Rostedt (VMware) * when children should be traced and a task's pid is in the list.
1656954e415SSteven Rostedt (VMware) *
1666954e415SSteven Rostedt (VMware) * Return 0 on success, negative otherwise.
1676954e415SSteven Rostedt (VMware) */
trace_pid_list_set(struct trace_pid_list * pid_list,unsigned int pid)1686954e415SSteven Rostedt (VMware) int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid)
1696954e415SSteven Rostedt (VMware) {
1708d6e9098SSteven Rostedt (VMware) union upper_chunk *upper_chunk;
1718d6e9098SSteven Rostedt (VMware) union lower_chunk *lower_chunk;
1728d6e9098SSteven Rostedt (VMware) unsigned long flags;
1738d6e9098SSteven Rostedt (VMware) unsigned int upper1;
1748d6e9098SSteven Rostedt (VMware) unsigned int upper2;
1758d6e9098SSteven Rostedt (VMware) unsigned int lower;
1768d6e9098SSteven Rostedt (VMware) int ret;
1778d6e9098SSteven Rostedt (VMware)
1788d6e9098SSteven Rostedt (VMware) if (!pid_list)
1798d6e9098SSteven Rostedt (VMware) return -ENODEV;
1808d6e9098SSteven Rostedt (VMware)
1818d6e9098SSteven Rostedt (VMware) if (pid_split(pid, &upper1, &upper2, &lower) < 0)
1826954e415SSteven Rostedt (VMware) return -EINVAL;
1836954e415SSteven Rostedt (VMware)
1848d6e9098SSteven Rostedt (VMware) raw_spin_lock_irqsave(&pid_list->lock, flags);
1858d6e9098SSteven Rostedt (VMware) upper_chunk = pid_list->upper[upper1];
1868d6e9098SSteven Rostedt (VMware) if (!upper_chunk) {
1878d6e9098SSteven Rostedt (VMware) upper_chunk = get_upper_chunk(pid_list);
1888d6e9098SSteven Rostedt (VMware) if (!upper_chunk) {
1898d6e9098SSteven Rostedt (VMware) ret = -ENOMEM;
1908d6e9098SSteven Rostedt (VMware) goto out;
1918d6e9098SSteven Rostedt (VMware) }
1928d6e9098SSteven Rostedt (VMware) pid_list->upper[upper1] = upper_chunk;
1938d6e9098SSteven Rostedt (VMware) }
1948d6e9098SSteven Rostedt (VMware) lower_chunk = upper_chunk->data[upper2];
1958d6e9098SSteven Rostedt (VMware) if (!lower_chunk) {
1968d6e9098SSteven Rostedt (VMware) lower_chunk = get_lower_chunk(pid_list);
1978d6e9098SSteven Rostedt (VMware) if (!lower_chunk) {
1988d6e9098SSteven Rostedt (VMware) ret = -ENOMEM;
1998d6e9098SSteven Rostedt (VMware) goto out;
2008d6e9098SSteven Rostedt (VMware) }
2018d6e9098SSteven Rostedt (VMware) upper_chunk->data[upper2] = lower_chunk;
2028d6e9098SSteven Rostedt (VMware) }
2038d6e9098SSteven Rostedt (VMware) set_bit(lower, lower_chunk->data);
2048d6e9098SSteven Rostedt (VMware) ret = 0;
2058d6e9098SSteven Rostedt (VMware) out:
2068d6e9098SSteven Rostedt (VMware) raw_spin_unlock_irqrestore(&pid_list->lock, flags);
2078d6e9098SSteven Rostedt (VMware) return ret;
2086954e415SSteven Rostedt (VMware) }
2096954e415SSteven Rostedt (VMware)
2106954e415SSteven Rostedt (VMware) /**
2116954e415SSteven Rostedt (VMware) * trace_pid_list_clear - remove a pid from the list
2126954e415SSteven Rostedt (VMware) * @pid_list: The pid list to remove the @pid from.
2136954e415SSteven Rostedt (VMware) * @pid: The pid to remove.
2146954e415SSteven Rostedt (VMware) *
2156954e415SSteven Rostedt (VMware) * Removes @pid from @pid_list. This is usually done explicitly by a user
2166954e415SSteven Rostedt (VMware) * removing tasks from tracing, or indirectly by the exit function
2176954e415SSteven Rostedt (VMware) * when a task that is set to be traced exits.
2186954e415SSteven Rostedt (VMware) *
2196954e415SSteven Rostedt (VMware) * Return 0 on success, negative otherwise.
2206954e415SSteven Rostedt (VMware) */
trace_pid_list_clear(struct trace_pid_list * pid_list,unsigned int pid)2216954e415SSteven Rostedt (VMware) int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid)
2226954e415SSteven Rostedt (VMware) {
2238d6e9098SSteven Rostedt (VMware) union upper_chunk *upper_chunk;
2248d6e9098SSteven Rostedt (VMware) union lower_chunk *lower_chunk;
2258d6e9098SSteven Rostedt (VMware) unsigned long flags;
2268d6e9098SSteven Rostedt (VMware) unsigned int upper1;
2278d6e9098SSteven Rostedt (VMware) unsigned int upper2;
2288d6e9098SSteven Rostedt (VMware) unsigned int lower;
2298d6e9098SSteven Rostedt (VMware)
2308d6e9098SSteven Rostedt (VMware) if (!pid_list)
2318d6e9098SSteven Rostedt (VMware) return -ENODEV;
2328d6e9098SSteven Rostedt (VMware)
2338d6e9098SSteven Rostedt (VMware) if (pid_split(pid, &upper1, &upper2, &lower) < 0)
2346954e415SSteven Rostedt (VMware) return -EINVAL;
2356954e415SSteven Rostedt (VMware)
2368d6e9098SSteven Rostedt (VMware) raw_spin_lock_irqsave(&pid_list->lock, flags);
2378d6e9098SSteven Rostedt (VMware) upper_chunk = pid_list->upper[upper1];
2388d6e9098SSteven Rostedt (VMware) if (!upper_chunk)
2398d6e9098SSteven Rostedt (VMware) goto out;
2406954e415SSteven Rostedt (VMware)
2418d6e9098SSteven Rostedt (VMware) lower_chunk = upper_chunk->data[upper2];
2428d6e9098SSteven Rostedt (VMware) if (!lower_chunk)
2438d6e9098SSteven Rostedt (VMware) goto out;
2448d6e9098SSteven Rostedt (VMware)
2458d6e9098SSteven Rostedt (VMware) clear_bit(lower, lower_chunk->data);
2468d6e9098SSteven Rostedt (VMware)
2478d6e9098SSteven Rostedt (VMware) /* if there's no more bits set, add it to the free list */
2488d6e9098SSteven Rostedt (VMware) if (find_first_bit(lower_chunk->data, LOWER_MAX) >= LOWER_MAX) {
2498d6e9098SSteven Rostedt (VMware) put_lower_chunk(pid_list, lower_chunk);
2508d6e9098SSteven Rostedt (VMware) upper_chunk->data[upper2] = NULL;
2518d6e9098SSteven Rostedt (VMware) if (upper_empty(upper_chunk)) {
2528d6e9098SSteven Rostedt (VMware) put_upper_chunk(pid_list, upper_chunk);
2538d6e9098SSteven Rostedt (VMware) pid_list->upper[upper1] = NULL;
2548d6e9098SSteven Rostedt (VMware) }
2558d6e9098SSteven Rostedt (VMware) }
2568d6e9098SSteven Rostedt (VMware) out:
2578d6e9098SSteven Rostedt (VMware) raw_spin_unlock_irqrestore(&pid_list->lock, flags);
2586954e415SSteven Rostedt (VMware) return 0;
2596954e415SSteven Rostedt (VMware) }
2606954e415SSteven Rostedt (VMware)
2616954e415SSteven Rostedt (VMware) /**
2626954e415SSteven Rostedt (VMware) * trace_pid_list_next - return the next pid in the list
2636954e415SSteven Rostedt (VMware) * @pid_list: The pid list to examine.
2646954e415SSteven Rostedt (VMware) * @pid: The pid to start from
2656954e415SSteven Rostedt (VMware) * @next: The pointer to place the pid that is set starting from @pid.
2666954e415SSteven Rostedt (VMware) *
2676954e415SSteven Rostedt (VMware) * Looks for the next consecutive pid that is in @pid_list starting
2686954e415SSteven Rostedt (VMware) * at the pid specified by @pid. If one is set (including @pid), then
2696954e415SSteven Rostedt (VMware) * that pid is placed into @next.
2706954e415SSteven Rostedt (VMware) *
2716954e415SSteven Rostedt (VMware) * Return 0 when a pid is found, -1 if there are no more pids included.
2726954e415SSteven Rostedt (VMware) */
trace_pid_list_next(struct trace_pid_list * pid_list,unsigned int pid,unsigned int * next)2736954e415SSteven Rostedt (VMware) int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid,
2746954e415SSteven Rostedt (VMware) unsigned int *next)
2756954e415SSteven Rostedt (VMware) {
2768d6e9098SSteven Rostedt (VMware) union upper_chunk *upper_chunk;
2778d6e9098SSteven Rostedt (VMware) union lower_chunk *lower_chunk;
2788d6e9098SSteven Rostedt (VMware) unsigned long flags;
2798d6e9098SSteven Rostedt (VMware) unsigned int upper1;
2808d6e9098SSteven Rostedt (VMware) unsigned int upper2;
2818d6e9098SSteven Rostedt (VMware) unsigned int lower;
2826954e415SSteven Rostedt (VMware)
2838d6e9098SSteven Rostedt (VMware) if (!pid_list)
2848d6e9098SSteven Rostedt (VMware) return -ENODEV;
2858d6e9098SSteven Rostedt (VMware)
2868d6e9098SSteven Rostedt (VMware) if (pid_split(pid, &upper1, &upper2, &lower) < 0)
2878d6e9098SSteven Rostedt (VMware) return -EINVAL;
2888d6e9098SSteven Rostedt (VMware)
2898d6e9098SSteven Rostedt (VMware) raw_spin_lock_irqsave(&pid_list->lock, flags);
2908d6e9098SSteven Rostedt (VMware) for (; upper1 <= UPPER_MASK; upper1++, upper2 = 0) {
2918d6e9098SSteven Rostedt (VMware) upper_chunk = pid_list->upper[upper1];
2928d6e9098SSteven Rostedt (VMware)
2938d6e9098SSteven Rostedt (VMware) if (!upper_chunk)
2948d6e9098SSteven Rostedt (VMware) continue;
2958d6e9098SSteven Rostedt (VMware)
2968d6e9098SSteven Rostedt (VMware) for (; upper2 <= UPPER_MASK; upper2++, lower = 0) {
2978d6e9098SSteven Rostedt (VMware) lower_chunk = upper_chunk->data[upper2];
2988d6e9098SSteven Rostedt (VMware) if (!lower_chunk)
2998d6e9098SSteven Rostedt (VMware) continue;
3008d6e9098SSteven Rostedt (VMware)
3018d6e9098SSteven Rostedt (VMware) lower = find_next_bit(lower_chunk->data, LOWER_MAX,
3028d6e9098SSteven Rostedt (VMware) lower);
3038d6e9098SSteven Rostedt (VMware) if (lower < LOWER_MAX)
3048d6e9098SSteven Rostedt (VMware) goto found;
3056954e415SSteven Rostedt (VMware) }
3068d6e9098SSteven Rostedt (VMware) }
3078d6e9098SSteven Rostedt (VMware)
3088d6e9098SSteven Rostedt (VMware) found:
3098d6e9098SSteven Rostedt (VMware) raw_spin_unlock_irqrestore(&pid_list->lock, flags);
3108d6e9098SSteven Rostedt (VMware) if (upper1 > UPPER_MASK)
3116954e415SSteven Rostedt (VMware) return -1;
3128d6e9098SSteven Rostedt (VMware)
3138d6e9098SSteven Rostedt (VMware) *next = pid_join(upper1, upper2, lower);
3148d6e9098SSteven Rostedt (VMware) return 0;
3156954e415SSteven Rostedt (VMware) }
3166954e415SSteven Rostedt (VMware)
3176954e415SSteven Rostedt (VMware) /**
3186954e415SSteven Rostedt (VMware) * trace_pid_list_first - return the first pid in the list
3196954e415SSteven Rostedt (VMware) * @pid_list: The pid list to examine.
3206954e415SSteven Rostedt (VMware) * @pid: The pointer to place the pid first found pid that is set.
3216954e415SSteven Rostedt (VMware) *
3226954e415SSteven Rostedt (VMware) * Looks for the first pid that is set in @pid_list, and places it
3236954e415SSteven Rostedt (VMware) * into @pid if found.
3246954e415SSteven Rostedt (VMware) *
3256954e415SSteven Rostedt (VMware) * Return 0 when a pid is found, -1 if there are no pids set.
3266954e415SSteven Rostedt (VMware) */
trace_pid_list_first(struct trace_pid_list * pid_list,unsigned int * pid)3276954e415SSteven Rostedt (VMware) int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid)
3286954e415SSteven Rostedt (VMware) {
3298d6e9098SSteven Rostedt (VMware) return trace_pid_list_next(pid_list, 0, pid);
3306954e415SSteven Rostedt (VMware) }
3318d6e9098SSteven Rostedt (VMware)
pid_list_refill_irq(struct irq_work * iwork)3328d6e9098SSteven Rostedt (VMware) static void pid_list_refill_irq(struct irq_work *iwork)
3338d6e9098SSteven Rostedt (VMware) {
3348d6e9098SSteven Rostedt (VMware) struct trace_pid_list *pid_list = container_of(iwork, struct trace_pid_list,
3358d6e9098SSteven Rostedt (VMware) refill_irqwork);
336b30a779dSSteven Rostedt (VMware) union upper_chunk *upper = NULL;
337b30a779dSSteven Rostedt (VMware) union lower_chunk *lower = NULL;
3388d6e9098SSteven Rostedt (VMware) union upper_chunk **upper_next = &upper;
3398d6e9098SSteven Rostedt (VMware) union lower_chunk **lower_next = &lower;
3408d6e9098SSteven Rostedt (VMware) int upper_count;
3418d6e9098SSteven Rostedt (VMware) int lower_count;
3428d6e9098SSteven Rostedt (VMware) int ucnt = 0;
3438d6e9098SSteven Rostedt (VMware) int lcnt = 0;
3448d6e9098SSteven Rostedt (VMware)
3458d6e9098SSteven Rostedt (VMware) again:
3468d6e9098SSteven Rostedt (VMware) raw_spin_lock(&pid_list->lock);
3478d6e9098SSteven Rostedt (VMware) upper_count = CHUNK_ALLOC - pid_list->free_upper_chunks;
3488d6e9098SSteven Rostedt (VMware) lower_count = CHUNK_ALLOC - pid_list->free_lower_chunks;
3498d6e9098SSteven Rostedt (VMware) raw_spin_unlock(&pid_list->lock);
3508d6e9098SSteven Rostedt (VMware)
3518d6e9098SSteven Rostedt (VMware) if (upper_count <= 0 && lower_count <= 0)
3528d6e9098SSteven Rostedt (VMware) return;
3538d6e9098SSteven Rostedt (VMware)
3548d6e9098SSteven Rostedt (VMware) while (upper_count-- > 0) {
3558d6e9098SSteven Rostedt (VMware) union upper_chunk *chunk;
3568d6e9098SSteven Rostedt (VMware)
3578d6e9098SSteven Rostedt (VMware) chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
3588d6e9098SSteven Rostedt (VMware) if (!chunk)
3598d6e9098SSteven Rostedt (VMware) break;
3608d6e9098SSteven Rostedt (VMware) *upper_next = chunk;
3618d6e9098SSteven Rostedt (VMware) upper_next = &chunk->next;
3628d6e9098SSteven Rostedt (VMware) ucnt++;
3638d6e9098SSteven Rostedt (VMware) }
3648d6e9098SSteven Rostedt (VMware)
3658d6e9098SSteven Rostedt (VMware) while (lower_count-- > 0) {
3668d6e9098SSteven Rostedt (VMware) union lower_chunk *chunk;
3678d6e9098SSteven Rostedt (VMware)
3688d6e9098SSteven Rostedt (VMware) chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
3698d6e9098SSteven Rostedt (VMware) if (!chunk)
3708d6e9098SSteven Rostedt (VMware) break;
3718d6e9098SSteven Rostedt (VMware) *lower_next = chunk;
3728d6e9098SSteven Rostedt (VMware) lower_next = &chunk->next;
3738d6e9098SSteven Rostedt (VMware) lcnt++;
3748d6e9098SSteven Rostedt (VMware) }
3758d6e9098SSteven Rostedt (VMware)
3768d6e9098SSteven Rostedt (VMware) raw_spin_lock(&pid_list->lock);
3778d6e9098SSteven Rostedt (VMware) if (upper) {
3788d6e9098SSteven Rostedt (VMware) *upper_next = pid_list->upper_list;
3798d6e9098SSteven Rostedt (VMware) pid_list->upper_list = upper;
3808d6e9098SSteven Rostedt (VMware) pid_list->free_upper_chunks += ucnt;
3818d6e9098SSteven Rostedt (VMware) }
3828d6e9098SSteven Rostedt (VMware) if (lower) {
3838d6e9098SSteven Rostedt (VMware) *lower_next = pid_list->lower_list;
3848d6e9098SSteven Rostedt (VMware) pid_list->lower_list = lower;
3858d6e9098SSteven Rostedt (VMware) pid_list->free_lower_chunks += lcnt;
3868d6e9098SSteven Rostedt (VMware) }
3878d6e9098SSteven Rostedt (VMware) raw_spin_unlock(&pid_list->lock);
3888d6e9098SSteven Rostedt (VMware)
3898d6e9098SSteven Rostedt (VMware) /*
3908d6e9098SSteven Rostedt (VMware) * On success of allocating all the chunks, both counters
3918d6e9098SSteven Rostedt (VMware) * will be less than zero. If they are not, then an allocation
3928d6e9098SSteven Rostedt (VMware) * failed, and we should not try again.
3938d6e9098SSteven Rostedt (VMware) */
3948d6e9098SSteven Rostedt (VMware) if (upper_count >= 0 || lower_count >= 0)
3958d6e9098SSteven Rostedt (VMware) return;
3968d6e9098SSteven Rostedt (VMware) /*
3978d6e9098SSteven Rostedt (VMware) * When the locks were released, free chunks could have
3988d6e9098SSteven Rostedt (VMware) * been used and allocation needs to be done again. Might as
3998d6e9098SSteven Rostedt (VMware) * well allocate it now.
4008d6e9098SSteven Rostedt (VMware) */
4018d6e9098SSteven Rostedt (VMware) goto again;
4026954e415SSteven Rostedt (VMware) }
4036954e415SSteven Rostedt (VMware)
4046954e415SSteven Rostedt (VMware) /**
4056954e415SSteven Rostedt (VMware) * trace_pid_list_alloc - create a new pid_list
4066954e415SSteven Rostedt (VMware) *
4076954e415SSteven Rostedt (VMware) * Allocates a new pid_list to store pids into.
4086954e415SSteven Rostedt (VMware) *
4096954e415SSteven Rostedt (VMware) * Returns the pid_list on success, NULL otherwise.
4106954e415SSteven Rostedt (VMware) */
trace_pid_list_alloc(void)4116954e415SSteven Rostedt (VMware) struct trace_pid_list *trace_pid_list_alloc(void)
4126954e415SSteven Rostedt (VMware) {
4136954e415SSteven Rostedt (VMware) struct trace_pid_list *pid_list;
4148d6e9098SSteven Rostedt (VMware) int i;
4156954e415SSteven Rostedt (VMware)
4168d6e9098SSteven Rostedt (VMware) /* According to linux/thread.h, pids can be no bigger that 30 bits */
4178d6e9098SSteven Rostedt (VMware) WARN_ON_ONCE(pid_max > (1 << 30));
4188d6e9098SSteven Rostedt (VMware)
4198d6e9098SSteven Rostedt (VMware) pid_list = kzalloc(sizeof(*pid_list), GFP_KERNEL);
4206954e415SSteven Rostedt (VMware) if (!pid_list)
4216954e415SSteven Rostedt (VMware) return NULL;
4226954e415SSteven Rostedt (VMware)
4238d6e9098SSteven Rostedt (VMware) init_irq_work(&pid_list->refill_irqwork, pid_list_refill_irq);
4246954e415SSteven Rostedt (VMware)
4258d6e9098SSteven Rostedt (VMware) raw_spin_lock_init(&pid_list->lock);
4268d6e9098SSteven Rostedt (VMware)
4278d6e9098SSteven Rostedt (VMware) for (i = 0; i < CHUNK_ALLOC; i++) {
4288d6e9098SSteven Rostedt (VMware) union upper_chunk *chunk;
4298d6e9098SSteven Rostedt (VMware)
4308d6e9098SSteven Rostedt (VMware) chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
4318d6e9098SSteven Rostedt (VMware) if (!chunk)
4328d6e9098SSteven Rostedt (VMware) break;
4338d6e9098SSteven Rostedt (VMware) chunk->next = pid_list->upper_list;
4348d6e9098SSteven Rostedt (VMware) pid_list->upper_list = chunk;
4358d6e9098SSteven Rostedt (VMware) pid_list->free_upper_chunks++;
4366954e415SSteven Rostedt (VMware) }
4378d6e9098SSteven Rostedt (VMware)
4388d6e9098SSteven Rostedt (VMware) for (i = 0; i < CHUNK_ALLOC; i++) {
4398d6e9098SSteven Rostedt (VMware) union lower_chunk *chunk;
4408d6e9098SSteven Rostedt (VMware)
4418d6e9098SSteven Rostedt (VMware) chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
4428d6e9098SSteven Rostedt (VMware) if (!chunk)
4438d6e9098SSteven Rostedt (VMware) break;
4448d6e9098SSteven Rostedt (VMware) chunk->next = pid_list->lower_list;
4458d6e9098SSteven Rostedt (VMware) pid_list->lower_list = chunk;
4468d6e9098SSteven Rostedt (VMware) pid_list->free_lower_chunks++;
4478d6e9098SSteven Rostedt (VMware) }
4488d6e9098SSteven Rostedt (VMware)
4496954e415SSteven Rostedt (VMware) return pid_list;
4506954e415SSteven Rostedt (VMware) }
4516954e415SSteven Rostedt (VMware)
4526954e415SSteven Rostedt (VMware) /**
4536954e415SSteven Rostedt (VMware) * trace_pid_list_free - Frees an allocated pid_list.
4546954e415SSteven Rostedt (VMware) *
4556954e415SSteven Rostedt (VMware) * Frees the memory for a pid_list that was allocated.
4566954e415SSteven Rostedt (VMware) */
trace_pid_list_free(struct trace_pid_list * pid_list)4576954e415SSteven Rostedt (VMware) void trace_pid_list_free(struct trace_pid_list *pid_list)
4586954e415SSteven Rostedt (VMware) {
4598d6e9098SSteven Rostedt (VMware) union upper_chunk *upper;
4608d6e9098SSteven Rostedt (VMware) union lower_chunk *lower;
4618d6e9098SSteven Rostedt (VMware) int i, j;
4628d6e9098SSteven Rostedt (VMware)
4636954e415SSteven Rostedt (VMware) if (!pid_list)
4646954e415SSteven Rostedt (VMware) return;
4656954e415SSteven Rostedt (VMware)
4668d6e9098SSteven Rostedt (VMware) irq_work_sync(&pid_list->refill_irqwork);
4678d6e9098SSteven Rostedt (VMware)
4688d6e9098SSteven Rostedt (VMware) while (pid_list->lower_list) {
4698d6e9098SSteven Rostedt (VMware) union lower_chunk *chunk;
4708d6e9098SSteven Rostedt (VMware)
4718d6e9098SSteven Rostedt (VMware) chunk = pid_list->lower_list;
4728d6e9098SSteven Rostedt (VMware) pid_list->lower_list = pid_list->lower_list->next;
4738d6e9098SSteven Rostedt (VMware) kfree(chunk);
4748d6e9098SSteven Rostedt (VMware) }
4758d6e9098SSteven Rostedt (VMware)
4768d6e9098SSteven Rostedt (VMware) while (pid_list->upper_list) {
4778d6e9098SSteven Rostedt (VMware) union upper_chunk *chunk;
4788d6e9098SSteven Rostedt (VMware)
4798d6e9098SSteven Rostedt (VMware) chunk = pid_list->upper_list;
4808d6e9098SSteven Rostedt (VMware) pid_list->upper_list = pid_list->upper_list->next;
4818d6e9098SSteven Rostedt (VMware) kfree(chunk);
4828d6e9098SSteven Rostedt (VMware) }
4838d6e9098SSteven Rostedt (VMware)
4848d6e9098SSteven Rostedt (VMware) for (i = 0; i < UPPER1_SIZE; i++) {
4858d6e9098SSteven Rostedt (VMware) upper = pid_list->upper[i];
4868d6e9098SSteven Rostedt (VMware) if (upper) {
4878d6e9098SSteven Rostedt (VMware) for (j = 0; j < UPPER2_SIZE; j++) {
4888d6e9098SSteven Rostedt (VMware) lower = upper->data[j];
4898d6e9098SSteven Rostedt (VMware) kfree(lower);
4908d6e9098SSteven Rostedt (VMware) }
4918d6e9098SSteven Rostedt (VMware) kfree(upper);
4928d6e9098SSteven Rostedt (VMware) }
4938d6e9098SSteven Rostedt (VMware) }
4946954e415SSteven Rostedt (VMware) kfree(pid_list);
4956954e415SSteven Rostedt (VMware) }
496