Lines Matching refs:tp

35 static int build_cpu_topology(struct cpu_topology *tp, int cpu)  in build_cpu_topology()  argument
64 for (i = 0; i < tp->package_cpus_lists; i++) { in build_cpu_topology()
65 if (!strcmp(buf, tp->package_cpus_list[i])) in build_cpu_topology()
68 if (i == tp->package_cpus_lists) { in build_cpu_topology()
69 tp->package_cpus_list[i] = buf; in build_cpu_topology()
70 tp->package_cpus_lists++; in build_cpu_topology()
77 if (!tp->die_cpus_list) in build_cpu_topology()
95 for (i = 0; i < tp->die_cpus_lists; i++) { in build_cpu_topology()
96 if (!strcmp(buf, tp->die_cpus_list[i])) in build_cpu_topology()
99 if (i == tp->die_cpus_lists) { in build_cpu_topology()
100 tp->die_cpus_list[i] = buf; in build_cpu_topology()
101 tp->die_cpus_lists++; in build_cpu_topology()
125 for (i = 0; i < tp->core_cpus_lists; i++) { in build_cpu_topology()
126 if (!strcmp(buf, tp->core_cpus_list[i])) in build_cpu_topology()
129 if (i == tp->core_cpus_lists) { in build_cpu_topology()
130 tp->core_cpus_list[i] = buf; in build_cpu_topology()
131 tp->core_cpus_lists++; in build_cpu_topology()
142 void cpu_topology__delete(struct cpu_topology *tp) in cpu_topology__delete() argument
146 if (!tp) in cpu_topology__delete()
149 for (i = 0 ; i < tp->package_cpus_lists; i++) in cpu_topology__delete()
150 zfree(&tp->package_cpus_list[i]); in cpu_topology__delete()
152 for (i = 0 ; i < tp->die_cpus_lists; i++) in cpu_topology__delete()
153 zfree(&tp->die_cpus_list[i]); in cpu_topology__delete()
155 for (i = 0 ; i < tp->core_cpus_lists; i++) in cpu_topology__delete()
156 zfree(&tp->core_cpus_list[i]); in cpu_topology__delete()
158 free(tp); in cpu_topology__delete()
258 struct cpu_topology *tp = NULL; in cpu_topology__new() local
283 addr = calloc(1, sizeof(*tp) + nr_addr * sz); in cpu_topology__new()
287 tp = addr; in cpu_topology__new()
288 addr += sizeof(*tp); in cpu_topology__new()
289 tp->package_cpus_list = addr; in cpu_topology__new()
292 tp->die_cpus_list = addr; in cpu_topology__new()
295 tp->core_cpus_list = addr; in cpu_topology__new()
301 ret = build_cpu_topology(tp, i); in cpu_topology__new()
309 cpu_topology__delete(tp); in cpu_topology__new()
310 tp = NULL; in cpu_topology__new()
312 return tp; in cpu_topology__new()
378 struct numa_topology *tp = NULL; in numa_topology__new() local
406 tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr); in numa_topology__new()
407 if (!tp) in numa_topology__new()
410 tp->nr = nr; in numa_topology__new()
413 if (load_numa_node(&tp->nodes[i], perf_cpu_map__cpu(node_map, i).cpu)) { in numa_topology__new()
414 numa_topology__delete(tp); in numa_topology__new()
415 tp = NULL; in numa_topology__new()
424 return tp; in numa_topology__new()
427 void numa_topology__delete(struct numa_topology *tp) in numa_topology__delete() argument
431 for (i = 0; i < tp->nr; i++) in numa_topology__delete()
432 zfree(&tp->nodes[i].cpus); in numa_topology__delete()
434 free(tp); in numa_topology__delete()
474 struct hybrid_topology *tp = NULL; in hybrid_topology__new() local
480 tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0]) * nr); in hybrid_topology__new()
481 if (!tp) in hybrid_topology__new()
484 tp->nr = nr; in hybrid_topology__new()
486 if (load_hybrid_node(&tp->nodes[i], pmu)) { in hybrid_topology__new()
487 hybrid_topology__delete(tp); in hybrid_topology__new()
493 return tp; in hybrid_topology__new()
496 void hybrid_topology__delete(struct hybrid_topology *tp) in hybrid_topology__delete() argument
500 for (i = 0; i < tp->nr; i++) { in hybrid_topology__delete()
501 zfree(&tp->nodes[i].pmu_name); in hybrid_topology__delete()
502 zfree(&tp->nodes[i].cpus); in hybrid_topology__delete()
505 free(tp); in hybrid_topology__delete()