/openbmc/linux/tools/perf/util/ |
H A D | thread_map.c | 36 struct perf_thread_map *threads; in thread_map__new_by_pid() local 47 threads = thread_map__alloc(items); in thread_map__new_by_pid() 48 if (threads != NULL) { in thread_map__new_by_pid() 50 perf_thread_map__set_pid(threads, i, atoi(namelist[i]->d_name)); in thread_map__new_by_pid() 51 threads->nr = items; in thread_map__new_by_pid() 52 refcount_set(&threads->refcnt, 1); in thread_map__new_by_pid() 59 return threads; in thread_map__new_by_pid() 64 struct perf_thread_map *threads = thread_map__alloc(1); in thread_map__new_by_tid() local 66 if (threads != NULL) { in thread_map__new_by_tid() 67 perf_thread_map__set_pid(threads, 0, tid); in thread_map__new_by_tid() [all …]
|
/openbmc/linux/tools/perf/tests/ |
H A D | thread-map.c | 69 struct perf_thread_map *threads; in process_event() local 75 threads = thread_map__new_event(&event->thread_map); in process_event() 76 TEST_ASSERT_VAL("failed to alloc map", threads); in process_event() 78 TEST_ASSERT_VAL("wrong nr", threads->nr == 1); in process_event() 80 perf_thread_map__pid(threads, 0) == getpid()); in process_event() 82 perf_thread_map__comm(threads, 0) && in process_event() 83 !strcmp(perf_thread_map__comm(threads, 0), NAME)); in process_event() 85 refcount_read(&threads->refcnt) == 1); in process_event() 86 perf_thread_map__put(threads); in process_event() 92 struct perf_thread_map *threads; in test__thread_map_synthesize() local [all …]
|
H A D | mmap-thread-lookup.c | 21 #define THREADS 4 macro 32 static struct thread_data threads[THREADS]; variable 81 struct thread_data *td = &threads[i]; in thread_create() 101 struct thread_data *td0 = &threads[0]; in threads_create() 110 for (i = 1; !err && i < THREADS; i++) in threads_create() 118 struct thread_data *td0 = &threads[0]; in threads_destroy() 126 for (i = 1; !err && i < THREADS; i++) in threads_destroy() 127 err = pthread_join(threads[i].pt, NULL); in threads_destroy() 162 * The threads_create will not return before all threads in mmap_events() 168 TEST_ASSERT_VAL("failed to create threads", !threads_create()); in mmap_events() [all …]
|
/openbmc/linux/tools/lib/perf/tests/ |
H A D | test-evsel.c | 56 struct perf_thread_map *threads; in test_stat_thread() local 64 threads = perf_thread_map__new_dummy(); in test_stat_thread() 65 __T("failed to create threads", threads); in test_stat_thread() 67 perf_thread_map__set_pid(threads, 0, 0); in test_stat_thread() 72 err = perf_evsel__open(evsel, NULL, threads); in test_stat_thread() 81 perf_thread_map__put(threads); in test_stat_thread() 88 struct perf_thread_map *threads; in test_stat_thread_enable() local 97 threads = perf_thread_map__new_dummy(); in test_stat_thread_enable() 98 __T("failed to create threads", threads); in test_stat_thread_enable() 100 perf_thread_map__set_pid(threads, 0, 0); in test_stat_thread_enable() [all …]
|
H A D | test-threadmap.c | 16 struct perf_thread_map *threads; in test_threadmap_array() local 19 threads = perf_thread_map__new_array(nr, array); in test_threadmap_array() 20 __T("Failed to allocate new thread map", threads); in test_threadmap_array() 22 __T("Unexpected number of threads", perf_thread_map__nr(threads) == nr); in test_threadmap_array() 26 perf_thread_map__pid(threads, i) == (array ? array[i] : -1)); in test_threadmap_array() 30 perf_thread_map__set_pid(threads, i, i * 100); in test_threadmap_array() 33 perf_thread_map__pid(threads, 0) == (array ? array[0] : -1)); in test_threadmap_array() 37 perf_thread_map__pid(threads, i) == i * 100); in test_threadmap_array() 40 perf_thread_map__put(threads); in test_threadmap_array() 48 struct perf_thread_map *threads; in test_threadmap() local [all …]
|
H A D | test-evlist.c | 95 struct perf_thread_map *threads; in test_stat_thread() local 108 threads = perf_thread_map__new_dummy(); in test_stat_thread() 109 __T("failed to create threads", threads); in test_stat_thread() 111 perf_thread_map__set_pid(threads, 0, 0); in test_stat_thread() 130 perf_evlist__set_maps(evlist, NULL, threads); in test_stat_thread() 143 perf_thread_map__put(threads); in test_stat_thread() 150 struct perf_thread_map *threads; in test_stat_thread_enable() local 165 threads = perf_thread_map__new_dummy(); in test_stat_thread_enable() 166 __T("failed to create threads", threads); in test_stat_thread_enable() 168 perf_thread_map__set_pid(threads, 0, 0); in test_stat_thread_enable() [all …]
|
/openbmc/linux/tools/lib/perf/ |
H A D | threadmap.c | 47 struct perf_thread_map *threads = thread_map__alloc(nr_threads); in perf_thread_map__new_array() local 50 if (!threads) in perf_thread_map__new_array() 54 perf_thread_map__set_pid(threads, i, array ? array[i] : -1); in perf_thread_map__new_array() 56 threads->nr = nr_threads; in perf_thread_map__new_array() 57 refcount_set(&threads->refcnt, 1); in perf_thread_map__new_array() 59 return threads; in perf_thread_map__new_array() 67 static void perf_thread_map__delete(struct perf_thread_map *threads) in perf_thread_map__delete() argument 69 if (threads) { in perf_thread_map__delete() 72 WARN_ONCE(refcount_read(&threads->refcnt) != 0, in perf_thread_map__delete() 74 for (i = 0; i < threads->nr; i++) in perf_thread_map__delete() [all …]
|
/openbmc/openbmc/meta-openembedded/meta-oe/recipes-benchmark/tiobench/tiobench-0.3.3/ |
H A D | avoid-glibc-clashes.patch | 38 pthread_attr_setscope(&(d->threads[i].thread_attr), 41 - d->threads[i].buffer = aligned_alloc( d->threads[i].blockSize ); 42 + d->threads[i].buffer = _aligned_alloc( d->threads[i].blockSize ); 43 if( d->threads[i].buffer == NULL ) 49 unlink(d->threads[i].fileName); 50 - aligned_free( d->threads[i].buffer, d->threads[i].blockSize ); 51 + _aligned_free( d->threads[i].buffer, d->threads[i].blockSize ); 52 d->threads[i].buffer = 0; 54 pthread_attr_destroy( &(d->threads[i].thread_attr) );
|
/openbmc/qemu/hw/core/ |
H A D | machine-smp.c | 59 g_string_append_printf(s, " * threads (%u)", ms->smp.threads); in cpu_hierarchy_to_string() 68 * Any missing parameter in "cpus/maxcpus/sockets/cores/threads" will be 71 * In the calculation of omitted sockets/cores/threads: we prefer sockets 72 * over cores over threads before 6.2, while preferring cores over sockets 73 * over threads since 6.2. 97 unsigned threads = config->has_threads ? config->threads : 0; in machine_parse_smp_config() local 113 (config->has_threads && config->threads == 0) || in machine_parse_smp_config() 168 threads = threads > 0 ? threads : 1; in machine_parse_smp_config() 176 threads = threads > 0 ? threads : 1; in machine_parse_smp_config() 179 modules * cores * threads); in machine_parse_smp_config() [all …]
|
/openbmc/linux/tools/perf/bench/ |
H A D | breakpoint.c | 33 OPT_UINTEGER('t', "threads", &thread_params.nthreads, "Specify amount of threads"), 89 pthread_t *threads; in breakpoint_thread() local 91 threads = calloc(thread_params.nthreads, sizeof(threads[0])); in breakpoint_thread() 92 if (!threads) in breakpoint_thread() 98 if (pthread_create(&threads[i], NULL, passive_thread, &done)) in breakpoint_thread() 104 pthread_join(threads[i], NULL); in breakpoint_thread() 106 free(threads); in breakpoint_thread() 111 // then starts nparallel threads which create and join bench_repeat batches of nthreads threads. 155 printf("# Created/joined %d threads with %d breakpoints and %d parallelism\n", in bench_breakpoint_thread() 185 OPT_UINTEGER('p', "passive", &enable_params.npassive, "Specify amount of passive threads"), [all …]
|
H A D | synthesize.c | 34 OPT_UINTEGER('m', "min-threads", &min_threads, 35 "Minimum number of threads in multithreaded bench"), 36 OPT_UINTEGER('M', "max-threads", &max_threads, 37 "Maximum number of threads in multithreaded bench"), 62 struct perf_thread_map *threads, in do_run_single_threaded() argument 81 target, threads, in do_run_single_threaded() 116 struct perf_thread_map *threads; in run_single_threaded() local 125 threads = thread_map__new_by_pid(getpid()); in run_single_threaded() 126 if (!threads) { in run_single_threaded() 136 err = do_run_single_threaded(session, threads, &target, false); in run_single_threaded() [all …]
|
/openbmc/qemu/tests/unit/ |
H A D | test-smp-parse.c | 30 * -sockets/cores/threads 37 .has_threads = hd, .threads = d, \ 46 .threads = d, \ 52 * -sockets/dies/modules/cores/threads 62 .has_threads = hf, .threads = f, \ 68 * -sockets/clusters/cores/threads 76 .has_threads = he, .threads = e, \ 82 * -drawers/books/sockets/cores/threads 92 .has_threads = hf, .threads = f, \ 99 * -drawers/books/sockets/dies/clusters/modules/cores/threads [all …]
|
H A D | test-aio-multithread.c | 24 static IOThread *threads[NUM_CONTEXTS]; variable 71 threads[i] = iothread_new(); in create_aio_contexts() 72 ctx[i] = iothread_get_aio_context(threads[i]); in create_aio_contexts() 91 iothread_join(threads[i]); in join_aio_contexts() 226 static void test_multi_co_mutex(int threads, int seconds) in test_multi_co_mutex() argument 236 assert(threads <= NUM_CONTEXTS); in test_multi_co_mutex() 237 running = threads; in test_multi_co_mutex() 238 for (i = 0; i < threads; i++) { in test_multi_co_mutex() 255 /* Testing with NUM_CONTEXTS threads focuses on the queue. The mutex however 256 * is too contended (and the threads spend too much time in aio_poll) [all …]
|
/openbmc/linux/Documentation/driver-api/dmaengine/ |
H A D | dmatest.rst | 16 test multiple channels at the same time, and it can start multiple threads 73 (shared) parameters used for all threads will use the new values. 74 After the channels are specified, each thread is set as pending. All threads 82 Once started a message like " dmatest: Added 1 threads using dma0chan0" is 171 dmatest: Added 1 threads using dma0chan2 179 dmatest: Added 1 threads using dma0chan1 181 dmatest: Added 1 threads using dma0chan2 191 dmatest: Added 1 threads using dma0chan0 192 dmatest: Added 1 threads using dma0chan3 193 dmatest: Added 1 threads using dma0chan4 [all …]
|
/openbmc/linux/Documentation/arch/x86/ |
H A D | topology.rst | 24 threads, cores, packages, etc. 37 - threads 95 A core consists of 1 or more threads. It does not matter whether the threads 96 are SMT- or CMT-type threads. 105 The number of threads in a core. The number of threads in a package can be 111 Threads chapter 116 AMDs nomenclature for CMT threads is "Compute Unit Core". The kernel always 123 The cpumask contains all online threads in the package to which a thread 126 The number of online threads is also printed in /proc/cpuinfo "siblings." 130 The cpumask contains all online threads in the core to which a thread [all …]
|
/openbmc/linux/Documentation/power/ |
H A D | freezing-of-tasks.rst | 11 kernel threads are controlled during hibernation or system-wide suspend (on some 19 PF_NOFREEZE unset (all user space processes and some kernel threads) are 29 fake signal to all user space processes, and wakes up all the kernel threads. 37 frozen before kernel threads. 44 signal-handling code, but the freezable kernel threads need to call it 64 threads must call try_to_freeze() somewhere or use one of the 80 - freezes all tasks (including kernel threads) because we can't freeze 81 kernel threads without freezing userspace tasks 84 - thaws only kernel threads; this is particularly useful if we need to do 85 anything special in between thawing of kernel threads and thawing of [all …]
|
/openbmc/linux/tools/testing/selftests/powerpc/dscr/ |
H A D | dscr_default_test.c | 6 * it's sysfs interface and then verifies that all threads 121 struct random_thread_args threads[THREADS]; in dscr_default_random_test() local 132 FAIL_IF(pthread_barrier_init(&barrier, NULL, THREADS)); in dscr_default_random_test() 136 for (int i = 0; i < THREADS; i++) { in dscr_default_random_test() 137 threads[i].expected_system_dscr = &expected_system_dscr; in dscr_default_random_test() 138 threads[i].rw_lock = &rw_lock; in dscr_default_random_test() 139 threads[i].barrier = &barrier; in dscr_default_random_test() 141 FAIL_IF(pthread_create(&threads[i].thread_id, NULL, in dscr_default_random_test() 142 dscr_default_random_thread, (void *)&threads[i])); in dscr_default_random_test() 145 for (int i = 0; i < THREADS; i++) in dscr_default_random_test() [all …]
|
/openbmc/linux/include/uapi/linux/ |
H A D | membarrier.h | 34 * @MEMBARRIER_CMD_GLOBAL: Execute a memory barrier on all running threads. 36 * is ensured that all running threads have passed 40 * (non-running threads are de facto in such a 41 * state). This covers threads from all processes 44 * Execute a memory barrier on all running threads 48 * is ensured that all running threads have passed 52 * (non-running threads are de facto in such a 53 * state). This only covers threads from processes 70 * threads siblings have passed through a state 74 * (non-running threads are de facto in such a [all …]
|
/openbmc/qemu/tests/qtest/ |
H A D | cpu-plug-test.c | 22 unsigned threads; member 38 "-smp 1,sockets=%u,cores=%u,threads=%u,maxcpus=%u", in test_plug_with_device_add() 40 td->sockets, td->cores, td->threads, td->maxcpus); in test_plug_with_device_add() 96 data->threads = 2; in add_pc_test_case() 97 data->maxcpus = data->sockets * data->cores * data->threads; in add_pc_test_case() 101 data->threads, data->maxcpus); in add_pc_test_case() 122 data->threads = 1; in add_pseries_test_case() 123 data->maxcpus = data->sockets * data->cores * data->threads; in add_pseries_test_case() 127 data->threads, data->maxcpus); in add_pseries_test_case() 148 data->threads = 1; in add_s390x_test_case() [all …]
|
/openbmc/linux/tools/testing/selftests/mm/ |
H A D | migration.c | 25 pthread_t *threads; in FIXTURE() local 51 self->threads = malloc(self->nthreads * sizeof(*self->threads)); in FIXTURE_SETUP() 52 ASSERT_NE(self->threads, NULL); in FIXTURE_SETUP() 59 free(self->threads); in FIXTURE_TEARDOWN() 115 * between nodes whilst other threads try and access them triggering the 124 SKIP(return, "Not enough threads or NUMA nodes available"); 132 if (pthread_create(&self->threads[i], NULL, access_mem, ptr)) 137 ASSERT_EQ(pthread_cancel(self->threads[i]), 0); 150 SKIP(return, "Not enough threads or NUMA nodes available"); 184 SKIP(return, "Not enough threads or NUMA nodes available"); [all …]
|
/openbmc/qemu/tests/migration/guestperf/ |
H A D | plot.py | 151 threads = {} 153 if record._tid in threads: 155 threads[record._tid] = { 172 threads[record._tid]["xaxis"].append(record._timestamp - starttime) 173 threads[record._tid]["yaxis"].append(record._value) 174 threads[record._tid]["labels"].append(self._get_progress_label(progress)) 179 for tid in threads.keys(): 181 go.Scatter(x=threads[tid]["xaxis"], 182 y=threads[tid]["yaxis"], 191 text=threads[tid]["labels"])) [all …]
|
/openbmc/linux/samples/pktgen/ |
H A D | pktgen_sample06_numa_awared_queue_irq_affinity.sh | 3 # Multiqueue: Using pktgen threads for sending on multiple CPUs 4 # * adding devices to kernel threads which are in the same NUMA node 5 # * bound devices queue's irq affinity to the threads, 1:1 mapping 32 [ $THREADS -gt ${#irq_array[*]} -o $THREADS -gt ${#cpu_array[*]} ] && \ 33 err 1 "Thread number $THREADS exceeds: min (${#irq_array[*]},${#cpu_array[*]})" 52 # Threads are specified with parameter -t value in $THREADS 53 for ((i = 0; i < $THREADS; i++)); do 109 for ((i = 0; i < $THREADS; i++)); do
|
/openbmc/qemu/tests/tcg/multiarch/ |
H A D | vma-pthread.c | 10 * Reader, writer and executor threads perform the respective operations on 12 * Two mutator threads change the non-fixed protection bits randomly. 159 pthread_t threads[5]; in main() local 182 /* Start threads. */ in main() 183 ret = pthread_create(&threads[0], NULL, thread_read, &ctx); in main() 185 ret = pthread_create(&threads[1], NULL, thread_write, &ctx); in main() 187 ret = pthread_create(&threads[2], NULL, thread_execute, &ctx); in main() 190 ret = pthread_create(&threads[i], NULL, thread_mutate, &ctx); in main() 194 /* Wait for threads to stop. */ in main() 195 for (i = 0; i < sizeof(threads) / sizeof(threads[0]); i++) { in main() [all …]
|
/openbmc/linux/arch/powerpc/kvm/ |
H A D | book3s_hv_ras.c | 258 * - On TB error, HMI interrupt is reported on all the threads of the core 267 * All threads need to co-ordinate before making opal hmi handler. 268 * All threads will use sibling_subcore_state->in_guest[] (shared by all 269 * threads in the core) in paca which holds information about whether 272 * subcore status. Only primary threads from each subcore is responsible 279 * primary threads to decide who takes up the responsibility. 286 * - All other threads which are in host will call 291 * - Once all primary threads clear in_guest[0-3], all of them will invoke 293 * - Now all threads will wait for TB resync to complete by invoking 297 * - All other threads will now come out of resync wait loop and proceed [all …]
|
/openbmc/linux/tools/testing/selftests/dma/ |
H A D | dma_map_benchmark.c | 29 int threads = 1, seconds = 20, node = -1; in main() local 41 threads = atoi(optarg); in main() 66 if (threads <= 0 || threads > DMA_MAP_MAX_THREADS) { in main() 67 fprintf(stderr, "invalid number of threads, must be in 1-%d\n", in main() 109 map.threads = threads; in main() 121 printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s granule: %d\n", in main() 122 threads, seconds, node, dir[directions], granule); in main()
|