trace.c (f01387d2693813eb5271a3448e6a082322c7d75d) trace.c (12306276fabcb746a14979e96f43a13c724dec49)
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>

--- 261 unchanged lines hidden (view full) ---

270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
277
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>

--- 261 unchanged lines hidden (view full) ---

270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
277
278cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
283 if (!buf->buffer)
284 return trace_clock_local();
285
286 ts = ring_buffer_time_stamp(buf->buffer, cpu);

--- 307 unchanged lines hidden (view full) ---

594 return ret;
595
596 tr->allocated_snapshot = true;
597 }
598
599 return 0;
600}
601
279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
283 if (!buf->buffer)
284 return trace_clock_local();
285
286 ts = ring_buffer_time_stamp(buf->buffer, cpu);

--- 307 unchanged lines hidden (view full) ---

594 return ret;
595
596 tr->allocated_snapshot = true;
597 }
598
599 return 0;
600}
601
602void free_snapshot(struct trace_array *tr)
602static void free_snapshot(struct trace_array *tr)
603{
604 /*
605 * We don't free the ring buffer. instead, resize it because
606 * The max_tr ring buffer has some state (e.g. ring->clock) and
607 * we want preserve it.
608 */
609 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
610 set_buffer_entries(&tr->max_buffer, 1);

--- 307 unchanged lines hidden (view full) ---

918
919 *ppos += read;
920 ret = read;
921
922out:
923 return ret;
924}
925
603{
604 /*
605 * We don't free the ring buffer. instead, resize it because
606 * The max_tr ring buffer has some state (e.g. ring->clock) and
607 * we want preserve it.
608 */
609 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
610 set_buffer_entries(&tr->max_buffer, 1);

--- 307 unchanged lines hidden (view full) ---

918
919 *ppos += read;
920 ret = read;
921
922out:
923 return ret;
924}
925
926ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
927{
928 int len;
929 int ret;
930
931 if (!cnt)
932 return 0;
933
934 if (s->len <= s->readpos)
935 return -EBUSY;
936
937 len = s->len - s->readpos;
938 if (cnt > len)
939 cnt = len;
940 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
941 if (ret == cnt)
942 return -EFAULT;
943
944 cnt -= ret;
945
946 s->readpos += cnt;
947 return cnt;
948}
949
950static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
951{
952 int len;
953
954 if (s->len <= s->readpos)
955 return -EBUSY;
956
957 len = s->len - s->readpos;
958 if (cnt > len)
959 cnt = len;
960 memcpy(buf, s->buffer + s->readpos, cnt);
961
962 s->readpos += cnt;
963 return cnt;
964}
965
926static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
927{
928 int len;
929
930 if (s->len <= s->readpos)
931 return -EBUSY;
932
933 len = s->len - s->readpos;
934 if (cnt > len)
935 cnt = len;
936 memcpy(buf, s->buffer + s->readpos, cnt);
937
938 s->readpos += cnt;
939 return cnt;
940}
941
966/*
967 * ftrace_max_lock is used to protect the swapping of buffers
968 * when taking a max snapshot. The buffers themselves are
969 * protected by per_cpu spinlocks. But the action of the swap
970 * needs its own lock.
971 *
972 * This is defined as a arch_spinlock_t in order to help
973 * with performance when lockdep debugging is enabled.
974 *
975 * It is also used in other places outside the update_max_tr
976 * so it needs to be defined outside of the
977 * CONFIG_TRACER_MAX_TRACE.
978 */
979static arch_spinlock_t ftrace_max_lock =
980 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
981
982unsigned long __read_mostly tracing_thresh;
983
984#ifdef CONFIG_TRACER_MAX_TRACE
942unsigned long __read_mostly tracing_thresh;
943
944#ifdef CONFIG_TRACER_MAX_TRACE
985unsigned long __read_mostly tracing_max_latency;
986
987/*
988 * Copy the new maximum trace into the separate maximum-trace
989 * structure. (this way the maximum trace is permanently saved,
990 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
991 */
992static void
993__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
994{
995 struct trace_buffer *trace_buf = &tr->trace_buffer;
996 struct trace_buffer *max_buf = &tr->max_buffer;
997 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
998 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
999
1000 max_buf->cpu = cpu;
1001 max_buf->time_start = data->preempt_timestamp;
1002
945/*
946 * Copy the new maximum trace into the separate maximum-trace
947 * structure. (this way the maximum trace is permanently saved,
948 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
949 */
950static void
951__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
952{
953 struct trace_buffer *trace_buf = &tr->trace_buffer;
954 struct trace_buffer *max_buf = &tr->max_buffer;
955 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
956 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
957
958 max_buf->cpu = cpu;
959 max_buf->time_start = data->preempt_timestamp;
960
1003 max_data->saved_latency = tracing_max_latency;
961 max_data->saved_latency = tr->max_latency;
1004 max_data->critical_start = data->critical_start;
1005 max_data->critical_end = data->critical_end;
1006
1007 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1008 max_data->pid = tsk->pid;
1009 /*
1010 * If tsk == current, then use current_uid(), as that does not use
1011 * RCU. The irq tracer can be called out of RCU scope.

--- 31 unchanged lines hidden (view full) ---

1043 WARN_ON_ONCE(!irqs_disabled());
1044
1045 if (!tr->allocated_snapshot) {
1046 /* Only the nop tracer should hit this when disabling */
1047 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1048 return;
1049 }
1050
962 max_data->critical_start = data->critical_start;
963 max_data->critical_end = data->critical_end;
964
965 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
966 max_data->pid = tsk->pid;
967 /*
968 * If tsk == current, then use current_uid(), as that does not use
969 * RCU. The irq tracer can be called out of RCU scope.

--- 31 unchanged lines hidden (view full) ---

1001 WARN_ON_ONCE(!irqs_disabled());
1002
1003 if (!tr->allocated_snapshot) {
1004 /* Only the nop tracer should hit this when disabling */
1005 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1006 return;
1007 }
1008
1051 arch_spin_lock(&ftrace_max_lock);
1009 arch_spin_lock(&tr->max_lock);
1052
1053 buf = tr->trace_buffer.buffer;
1054 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1055 tr->max_buffer.buffer = buf;
1056
1057 __update_max_tr(tr, tsk, cpu);
1010
1011 buf = tr->trace_buffer.buffer;
1012 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1013 tr->max_buffer.buffer = buf;
1014
1015 __update_max_tr(tr, tsk, cpu);
1058 arch_spin_unlock(&ftrace_max_lock);
1016 arch_spin_unlock(&tr->max_lock);
1059}
1060
1061/**
1062 * update_max_tr_single - only copy one trace over, and reset the rest
1063 * @tr - tracer
1064 * @tsk - task with the latency
1065 * @cpu - the cpu of the buffer to copy.
1066 *

--- 9 unchanged lines hidden (view full) ---

1076
1077 WARN_ON_ONCE(!irqs_disabled());
1078 if (!tr->allocated_snapshot) {
1079 /* Only the nop tracer should hit this when disabling */
1080 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1081 return;
1082 }
1083
1017}
1018
1019/**
1020 * update_max_tr_single - only copy one trace over, and reset the rest
1021 * @tr - tracer
1022 * @tsk - task with the latency
1023 * @cpu - the cpu of the buffer to copy.
1024 *

--- 9 unchanged lines hidden (view full) ---

1034
1035 WARN_ON_ONCE(!irqs_disabled());
1036 if (!tr->allocated_snapshot) {
1037 /* Only the nop tracer should hit this when disabling */
1038 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1039 return;
1040 }
1041
1084 arch_spin_lock(&ftrace_max_lock);
1042 arch_spin_lock(&tr->max_lock);
1085
1086 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1087
1088 if (ret == -EBUSY) {
1089 /*
1090 * We failed to swap the buffer due to a commit taking
1091 * place on this CPU. We fail to record, but we reset
1092 * the max trace buffer (no one writes directly to it)
1093 * and flag that it failed.
1094 */
1095 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1096 "Failed to swap buffers due to commit in progress\n");
1097 }
1098
1099 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1100
1101 __update_max_tr(tr, tsk, cpu);
1043
1044 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1045
1046 if (ret == -EBUSY) {
1047 /*
1048 * We failed to swap the buffer due to a commit taking
1049 * place on this CPU. We fail to record, but we reset
1050 * the max trace buffer (no one writes directly to it)
1051 * and flag that it failed.
1052 */
1053 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1054 "Failed to swap buffers due to commit in progress\n");
1055 }
1056
1057 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1058
1059 __update_max_tr(tr, tsk, cpu);
1102 arch_spin_unlock(&ftrace_max_lock);
1060 arch_spin_unlock(&tr->max_lock);
1103}
1104#endif /* CONFIG_TRACER_MAX_TRACE */
1105
1061}
1062#endif /* CONFIG_TRACER_MAX_TRACE */
1063
1106static void default_wait_pipe(struct trace_iterator *iter)
1064static int wait_on_pipe(struct trace_iterator *iter)
1107{
1108 /* Iterators are static, they should be filled or empty */
1109 if (trace_buffer_iter(iter, iter->cpu_file))
1065{
1066 /* Iterators are static, they should be filled or empty */
1067 if (trace_buffer_iter(iter, iter->cpu_file))
1110 return;
1068 return 0;
1111
1069
1112 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
1070 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
1113}
1114
1115#ifdef CONFIG_FTRACE_STARTUP_TEST
1116static int run_tracer_selftest(struct tracer *type)
1117{
1118 struct trace_array *tr = &global_trace;
1119 struct tracer *saved_tracer = tr->current_trace;
1120 int ret;

--- 94 unchanged lines hidden (view full) ---

1215
1216 if (!type->set_flag)
1217 type->set_flag = &dummy_set_flag;
1218 if (!type->flags)
1219 type->flags = &dummy_tracer_flags;
1220 else
1221 if (!type->flags->opts)
1222 type->flags->opts = dummy_tracer_opt;
1071}
1072
1073#ifdef CONFIG_FTRACE_STARTUP_TEST
1074static int run_tracer_selftest(struct tracer *type)
1075{
1076 struct trace_array *tr = &global_trace;
1077 struct tracer *saved_tracer = tr->current_trace;
1078 int ret;

--- 94 unchanged lines hidden (view full) ---

1173
1174 if (!type->set_flag)
1175 type->set_flag = &dummy_set_flag;
1176 if (!type->flags)
1177 type->flags = &dummy_tracer_flags;
1178 else
1179 if (!type->flags->opts)
1180 type->flags->opts = dummy_tracer_opt;
1223 if (!type->wait_pipe)
1224 type->wait_pipe = default_wait_pipe;
1225
1226 ret = run_tracer_selftest(type);
1227 if (ret < 0)
1228 goto out;
1229
1230 type->next = trace_types;
1231 trace_types = type;
1232

--- 67 unchanged lines hidden (view full) ---

1300 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1301 tracing_reset_online_cpus(&tr->trace_buffer);
1302#ifdef CONFIG_TRACER_MAX_TRACE
1303 tracing_reset_online_cpus(&tr->max_buffer);
1304#endif
1305 }
1306}
1307
1181
1182 ret = run_tracer_selftest(type);
1183 if (ret < 0)
1184 goto out;
1185
1186 type->next = trace_types;
1187 trace_types = type;
1188

--- 67 unchanged lines hidden (view full) ---

1256 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1257 tracing_reset_online_cpus(&tr->trace_buffer);
1258#ifdef CONFIG_TRACER_MAX_TRACE
1259 tracing_reset_online_cpus(&tr->max_buffer);
1260#endif
1261 }
1262}
1263
1308#define SAVED_CMDLINES 128
1264#define SAVED_CMDLINES_DEFAULT 128
1309#define NO_CMDLINE_MAP UINT_MAX
1265#define NO_CMDLINE_MAP UINT_MAX
1310static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1311static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1312static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1313static int cmdline_idx;
1314static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1266static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1267struct saved_cmdlines_buffer {
1268 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1269 unsigned *map_cmdline_to_pid;
1270 unsigned cmdline_num;
1271 int cmdline_idx;
1272 char *saved_cmdlines;
1273};
1274static struct saved_cmdlines_buffer *savedcmd;
1315
1316/* temporary disable recording */
1317static atomic_t trace_record_cmdline_disabled __read_mostly;
1318
1275
1276/* temporary disable recording */
1277static atomic_t trace_record_cmdline_disabled __read_mostly;
1278
1319static void trace_init_cmdlines(void)
1279static inline char *get_saved_cmdlines(int idx)
1320{
1280{
1321 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1322 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
1323 cmdline_idx = 0;
1281 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1324}
1325
1282}
1283
1284static inline void set_cmdline(int idx, const char *cmdline)
1285{
1286 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1287}
1288
1289static int allocate_cmdlines_buffer(unsigned int val,
1290 struct saved_cmdlines_buffer *s)
1291{
1292 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1293 GFP_KERNEL);
1294 if (!s->map_cmdline_to_pid)
1295 return -ENOMEM;
1296
1297 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1298 if (!s->saved_cmdlines) {
1299 kfree(s->map_cmdline_to_pid);
1300 return -ENOMEM;
1301 }
1302
1303 s->cmdline_idx = 0;
1304 s->cmdline_num = val;
1305 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1306 sizeof(s->map_pid_to_cmdline));
1307 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1308 val * sizeof(*s->map_cmdline_to_pid));
1309
1310 return 0;
1311}
1312
1313static int trace_create_savedcmd(void)
1314{
1315 int ret;
1316
1317 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1318 if (!savedcmd)
1319 return -ENOMEM;
1320
1321 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1322 if (ret < 0) {
1323 kfree(savedcmd);
1324 savedcmd = NULL;
1325 return -ENOMEM;
1326 }
1327
1328 return 0;
1329}
1330
1326int is_tracing_stopped(void)
1327{
1328 return global_trace.stop_count;
1329}
1330
1331/**
1332 * tracing_start - quick start of the tracer
1333 *

--- 14 unchanged lines hidden (view full) ---

1348 /* Someone screwed up their debugging */
1349 WARN_ON_ONCE(1);
1350 global_trace.stop_count = 0;
1351 }
1352 goto out;
1353 }
1354
1355 /* Prevent the buffers from switching */
1331int is_tracing_stopped(void)
1332{
1333 return global_trace.stop_count;
1334}
1335
1336/**
1337 * tracing_start - quick start of the tracer
1338 *

--- 14 unchanged lines hidden (view full) ---

1353 /* Someone screwed up their debugging */
1354 WARN_ON_ONCE(1);
1355 global_trace.stop_count = 0;
1356 }
1357 goto out;
1358 }
1359
1360 /* Prevent the buffers from switching */
1356 arch_spin_lock(&ftrace_max_lock);
1361 arch_spin_lock(&global_trace.max_lock);
1357
1358 buffer = global_trace.trace_buffer.buffer;
1359 if (buffer)
1360 ring_buffer_record_enable(buffer);
1361
1362#ifdef CONFIG_TRACER_MAX_TRACE
1363 buffer = global_trace.max_buffer.buffer;
1364 if (buffer)
1365 ring_buffer_record_enable(buffer);
1366#endif
1367
1362
1363 buffer = global_trace.trace_buffer.buffer;
1364 if (buffer)
1365 ring_buffer_record_enable(buffer);
1366
1367#ifdef CONFIG_TRACER_MAX_TRACE
1368 buffer = global_trace.max_buffer.buffer;
1369 if (buffer)
1370 ring_buffer_record_enable(buffer);
1371#endif
1372
1368 arch_spin_unlock(&ftrace_max_lock);
1373 arch_spin_unlock(&global_trace.max_lock);
1369
1370 ftrace_start();
1371 out:
1372 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1373}
1374
1375static void tracing_start_tr(struct trace_array *tr)
1376{

--- 38 unchanged lines hidden (view full) ---

1415 unsigned long flags;
1416
1417 ftrace_stop();
1418 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1419 if (global_trace.stop_count++)
1420 goto out;
1421
1422 /* Prevent the buffers from switching */
1374
1375 ftrace_start();
1376 out:
1377 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1378}
1379
1380static void tracing_start_tr(struct trace_array *tr)
1381{

--- 38 unchanged lines hidden (view full) ---

1420 unsigned long flags;
1421
1422 ftrace_stop();
1423 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1424 if (global_trace.stop_count++)
1425 goto out;
1426
1427 /* Prevent the buffers from switching */
1423 arch_spin_lock(&ftrace_max_lock);
1428 arch_spin_lock(&global_trace.max_lock);
1424
1425 buffer = global_trace.trace_buffer.buffer;
1426 if (buffer)
1427 ring_buffer_record_disable(buffer);
1428
1429#ifdef CONFIG_TRACER_MAX_TRACE
1430 buffer = global_trace.max_buffer.buffer;
1431 if (buffer)
1432 ring_buffer_record_disable(buffer);
1433#endif
1434
1429
1430 buffer = global_trace.trace_buffer.buffer;
1431 if (buffer)
1432 ring_buffer_record_disable(buffer);
1433
1434#ifdef CONFIG_TRACER_MAX_TRACE
1435 buffer = global_trace.max_buffer.buffer;
1436 if (buffer)
1437 ring_buffer_record_disable(buffer);
1438#endif
1439
1435 arch_spin_unlock(&ftrace_max_lock);
1440 arch_spin_unlock(&global_trace.max_lock);
1436
1437 out:
1438 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1439}
1440
1441static void tracing_stop_tr(struct trace_array *tr)
1442{
1443 struct ring_buffer *buffer;

--- 12 unchanged lines hidden (view full) ---

1456 ring_buffer_record_disable(buffer);
1457
1458 out:
1459 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1460}
1461
1462void trace_stop_cmdline_recording(void);
1463
1441
1442 out:
1443 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1444}
1445
1446static void tracing_stop_tr(struct trace_array *tr)
1447{
1448 struct ring_buffer *buffer;

--- 12 unchanged lines hidden (view full) ---

1461 ring_buffer_record_disable(buffer);
1462
1463 out:
1464 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1465}
1466
1467void trace_stop_cmdline_recording(void);
1468
1464static void trace_save_cmdline(struct task_struct *tsk)
1469static int trace_save_cmdline(struct task_struct *tsk)
1465{
1466 unsigned pid, idx;
1467
1468 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1470{
1471 unsigned pid, idx;
1472
1473 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1469 return;
1474 return 0;
1470
1471 /*
1472 * It's not the end of the world if we don't get
1473 * the lock, but we also don't want to spin
1474 * nor do we want to disable interrupts,
1475 * so if we miss here, then better luck next time.
1476 */
1477 if (!arch_spin_trylock(&trace_cmdline_lock))
1475
1476 /*
1477 * It's not the end of the world if we don't get
1478 * the lock, but we also don't want to spin
1479 * nor do we want to disable interrupts,
1480 * so if we miss here, then better luck next time.
1481 */
1482 if (!arch_spin_trylock(&trace_cmdline_lock))
1478 return;
1483 return 0;
1479
1484
1480 idx = map_pid_to_cmdline[tsk->pid];
1485 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1481 if (idx == NO_CMDLINE_MAP) {
1486 if (idx == NO_CMDLINE_MAP) {
1482 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1487 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1483
1484 /*
1485 * Check whether the cmdline buffer at idx has a pid
1486 * mapped. We are going to overwrite that entry so we
1487 * need to clear the map_pid_to_cmdline. Otherwise we
1488 * would read the new comm for the old pid.
1489 */
1488
1489 /*
1490 * Check whether the cmdline buffer at idx has a pid
1491 * mapped. We are going to overwrite that entry so we
1492 * need to clear the map_pid_to_cmdline. Otherwise we
1493 * would read the new comm for the old pid.
1494 */
1490 pid = map_cmdline_to_pid[idx];
1495 pid = savedcmd->map_cmdline_to_pid[idx];
1491 if (pid != NO_CMDLINE_MAP)
1496 if (pid != NO_CMDLINE_MAP)
1492 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1497 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1493
1498
1494 map_cmdline_to_pid[idx] = tsk->pid;
1495 map_pid_to_cmdline[tsk->pid] = idx;
1499 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1500 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1496
1501
1497 cmdline_idx = idx;
1502 savedcmd->cmdline_idx = idx;
1498 }
1499
1503 }
1504
1500 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1505 set_cmdline(idx, tsk->comm);
1501
1502 arch_spin_unlock(&trace_cmdline_lock);
1506
1507 arch_spin_unlock(&trace_cmdline_lock);
1508
1509 return 1;
1503}
1504
1510}
1511
1505void trace_find_cmdline(int pid, char comm[])
1512static void __trace_find_cmdline(int pid, char comm[])
1506{
1507 unsigned map;
1508
1509 if (!pid) {
1510 strcpy(comm, "<idle>");
1511 return;
1512 }
1513
1514 if (WARN_ON_ONCE(pid < 0)) {
1515 strcpy(comm, "<XXX>");
1516 return;
1517 }
1518
1519 if (pid > PID_MAX_DEFAULT) {
1520 strcpy(comm, "<...>");
1521 return;
1522 }
1523
1513{
1514 unsigned map;
1515
1516 if (!pid) {
1517 strcpy(comm, "<idle>");
1518 return;
1519 }
1520
1521 if (WARN_ON_ONCE(pid < 0)) {
1522 strcpy(comm, "<XXX>");
1523 return;
1524 }
1525
1526 if (pid > PID_MAX_DEFAULT) {
1527 strcpy(comm, "<...>");
1528 return;
1529 }
1530
1524 preempt_disable();
1525 arch_spin_lock(&trace_cmdline_lock);
1526 map = map_pid_to_cmdline[pid];
1531 map = savedcmd->map_pid_to_cmdline[pid];
1527 if (map != NO_CMDLINE_MAP)
1532 if (map != NO_CMDLINE_MAP)
1528 strcpy(comm, saved_cmdlines[map]);
1533 strcpy(comm, get_saved_cmdlines(map));
1529 else
1530 strcpy(comm, "<...>");
1534 else
1535 strcpy(comm, "<...>");
1536}
1531
1537
1538void trace_find_cmdline(int pid, char comm[])
1539{
1540 preempt_disable();
1541 arch_spin_lock(&trace_cmdline_lock);
1542
1543 __trace_find_cmdline(pid, comm);
1544
1532 arch_spin_unlock(&trace_cmdline_lock);
1533 preempt_enable();
1534}
1535
1536void tracing_record_cmdline(struct task_struct *tsk)
1537{
1538 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1539 return;
1540
1541 if (!__this_cpu_read(trace_cmdline_save))
1542 return;
1543
1545 arch_spin_unlock(&trace_cmdline_lock);
1546 preempt_enable();
1547}
1548
1549void tracing_record_cmdline(struct task_struct *tsk)
1550{
1551 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1552 return;
1553
1554 if (!__this_cpu_read(trace_cmdline_save))
1555 return;
1556
1544 __this_cpu_write(trace_cmdline_save, false);
1545
1546 trace_save_cmdline(tsk);
1557 if (trace_save_cmdline(tsk))
1558 __this_cpu_write(trace_cmdline_save, false);
1547}
1548
1549void
1550tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1551 int pc)
1552{
1553 struct task_struct *tsk = current;
1554

--- 186 unchanged lines hidden (view full) ---

1741 * We don't need any atomic variables, just a barrier.
1742 * If an interrupt comes in, we don't care, because it would
1743 * have exited and put the counter back to what we want.
1744 * We just need a barrier to keep gcc from moving things
1745 * around.
1746 */
1747 barrier();
1748 if (use_stack == 1) {
1559}
1560
1561void
1562tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1563 int pc)
1564{
1565 struct task_struct *tsk = current;
1566

--- 186 unchanged lines hidden (view full) ---

1753 * We don't need any atomic variables, just a barrier.
1754 * If an interrupt comes in, we don't care, because it would
1755 * have exited and put the counter back to what we want.
1756 * We just need a barrier to keep gcc from moving things
1757 * around.
1758 */
1759 barrier();
1760 if (use_stack == 1) {
1749 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1761 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1750 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1751
1752 if (regs)
1753 save_stack_trace_regs(regs, &trace);
1754 else
1755 save_stack_trace(&trace);
1756
1757 if (trace.nr_entries > size)

--- 232 unchanged lines hidden (view full) ---

1990void trace_printk_init_buffers(void)
1991{
1992 if (buffers_allocated)
1993 return;
1994
1995 if (alloc_percpu_trace_buffer())
1996 return;
1997
1762 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1763
1764 if (regs)
1765 save_stack_trace_regs(regs, &trace);
1766 else
1767 save_stack_trace(&trace);
1768
1769 if (trace.nr_entries > size)

--- 232 unchanged lines hidden (view full) ---

2002void trace_printk_init_buffers(void)
2003{
2004 if (buffers_allocated)
2005 return;
2006
2007 if (alloc_percpu_trace_buffer())
2008 return;
2009
1998 pr_info("ftrace: Allocated trace_printk buffers\n");
2010 /* trace_printk() is for debug use only. Don't use it in production. */
1999
2011
2012 pr_warning("\n**********************************************************\n");
2013 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2014 pr_warning("** **\n");
2015 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2016 pr_warning("** **\n");
2017 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2018 pr_warning("** unsafe for produciton use. **\n");
2019 pr_warning("** **\n");
2020 pr_warning("** If you see this message and you are not debugging **\n");
2021 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2022 pr_warning("** **\n");
2023 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2024 pr_warning("**********************************************************\n");
2025
2000 /* Expand the buffers to set size */
2001 tracing_update_buffers();
2002
2003 buffers_allocated = 1;
2004
2005 /*
2006 * trace_printk_init_buffers() can be called by modules.
2007 * If that happens, then we need to start cmdline recording

--- 1320 unchanged lines hidden (view full) ---

3328
3329 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3330 if (err)
3331 goto err_unlock;
3332
3333 mutex_lock(&tracing_cpumask_update_lock);
3334
3335 local_irq_disable();
2026 /* Expand the buffers to set size */
2027 tracing_update_buffers();
2028
2029 buffers_allocated = 1;
2030
2031 /*
2032 * trace_printk_init_buffers() can be called by modules.
2033 * If that happens, then we need to start cmdline recording

--- 1320 unchanged lines hidden (view full) ---

3354
3355 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3356 if (err)
3357 goto err_unlock;
3358
3359 mutex_lock(&tracing_cpumask_update_lock);
3360
3361 local_irq_disable();
3336 arch_spin_lock(&ftrace_max_lock);
3362 arch_spin_lock(&tr->max_lock);
3337 for_each_tracing_cpu(cpu) {
3338 /*
3339 * Increase/decrease the disabled counter if we are
3340 * about to flip a bit in the cpumask:
3341 */
3342 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3343 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3344 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3345 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3346 }
3347 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3348 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3349 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3350 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3351 }
3352 }
3363 for_each_tracing_cpu(cpu) {
3364 /*
3365 * Increase/decrease the disabled counter if we are
3366 * about to flip a bit in the cpumask:
3367 */
3368 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3369 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3370 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3371 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3372 }
3373 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3374 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3375 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3376 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3377 }
3378 }
3353 arch_spin_unlock(&ftrace_max_lock);
3379 arch_spin_unlock(&tr->max_lock);
3354 local_irq_enable();
3355
3356 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3357
3358 mutex_unlock(&tracing_cpumask_update_lock);
3359 free_cpumask_var(tracing_cpumask_new);
3360
3361 return count;

--- 225 unchanged lines hidden (view full) ---

3587#endif
3588 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3589 " tracing_cpumask\t- Limit which CPUs to trace\n"
3590 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3591 "\t\t\t Remove sub-buffer with rmdir\n"
3592 " trace_options\t\t- Set format or modify how tracing happens\n"
3593 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3594 "\t\t\t option name\n"
3380 local_irq_enable();
3381
3382 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3383
3384 mutex_unlock(&tracing_cpumask_update_lock);
3385 free_cpumask_var(tracing_cpumask_new);
3386
3387 return count;

--- 225 unchanged lines hidden (view full) ---

3613#endif
3614 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3615 " tracing_cpumask\t- Limit which CPUs to trace\n"
3616 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3617 "\t\t\t Remove sub-buffer with rmdir\n"
3618 " trace_options\t\t- Set format or modify how tracing happens\n"
3619 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3620 "\t\t\t option name\n"
3621 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3595#ifdef CONFIG_DYNAMIC_FTRACE
3596 "\n available_filter_functions - list of functions that can be filtered on\n"
3597 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3598 "\t\t\t functions\n"
3599 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3600 "\t modules: Can select a group via module\n"
3601 "\t Format: :mod:<module-name>\n"
3602 "\t example: echo :mod:ext3 > set_ftrace_filter\n"

--- 97 unchanged lines hidden (view full) ---

3700}
3701
3702static const struct file_operations tracing_readme_fops = {
3703 .open = tracing_open_generic,
3704 .read = tracing_readme_read,
3705 .llseek = generic_file_llseek,
3706};
3707
3622#ifdef CONFIG_DYNAMIC_FTRACE
3623 "\n available_filter_functions - list of functions that can be filtered on\n"
3624 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3625 "\t\t\t functions\n"
3626 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3627 "\t modules: Can select a group via module\n"
3628 "\t Format: :mod:<module-name>\n"
3629 "\t example: echo :mod:ext3 > set_ftrace_filter\n"

--- 97 unchanged lines hidden (view full) ---

3727}
3728
3729static const struct file_operations tracing_readme_fops = {
3730 .open = tracing_open_generic,
3731 .read = tracing_readme_read,
3732 .llseek = generic_file_llseek,
3733};
3734
3735static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3736{
3737 unsigned int *ptr = v;
3738
3739 if (*pos || m->count)
3740 ptr++;
3741
3742 (*pos)++;
3743
3744 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3745 ptr++) {
3746 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3747 continue;
3748
3749 return ptr;
3750 }
3751
3752 return NULL;
3753}
3754
3755static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3756{
3757 void *v;
3758 loff_t l = 0;
3759
3760 preempt_disable();
3761 arch_spin_lock(&trace_cmdline_lock);
3762
3763 v = &savedcmd->map_cmdline_to_pid[0];
3764 while (l <= *pos) {
3765 v = saved_cmdlines_next(m, v, &l);
3766 if (!v)
3767 return NULL;
3768 }
3769
3770 return v;
3771}
3772
3773static void saved_cmdlines_stop(struct seq_file *m, void *v)
3774{
3775 arch_spin_unlock(&trace_cmdline_lock);
3776 preempt_enable();
3777}
3778
3779static int saved_cmdlines_show(struct seq_file *m, void *v)
3780{
3781 char buf[TASK_COMM_LEN];
3782 unsigned int *pid = v;
3783
3784 __trace_find_cmdline(*pid, buf);
3785 seq_printf(m, "%d %s\n", *pid, buf);
3786 return 0;
3787}
3788
3789static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3790 .start = saved_cmdlines_start,
3791 .next = saved_cmdlines_next,
3792 .stop = saved_cmdlines_stop,
3793 .show = saved_cmdlines_show,
3794};
3795
3796static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3797{
3798 if (tracing_disabled)
3799 return -ENODEV;
3800
3801 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3802}
3803
3804static const struct file_operations tracing_saved_cmdlines_fops = {
3805 .open = tracing_saved_cmdlines_open,
3806 .read = seq_read,
3807 .llseek = seq_lseek,
3808 .release = seq_release,
3809};
3810
3708static ssize_t
3811static ssize_t
3709tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3710 size_t cnt, loff_t *ppos)
3812tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3813 size_t cnt, loff_t *ppos)
3711{
3814{
3712 char *buf_comm;
3713 char *file_buf;
3714 char *buf;
3715 int len = 0;
3716 int pid;
3717 int i;
3815 char buf[64];
3816 int r;
3718
3817
3719 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3720 if (!file_buf)
3818 arch_spin_lock(&trace_cmdline_lock);
3819 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3820 arch_spin_unlock(&trace_cmdline_lock);
3821
3822 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3823}
3824
3825static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3826{
3827 kfree(s->saved_cmdlines);
3828 kfree(s->map_cmdline_to_pid);
3829 kfree(s);
3830}
3831
3832static int tracing_resize_saved_cmdlines(unsigned int val)
3833{
3834 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3835
3836 s = kmalloc(sizeof(*s), GFP_KERNEL);
3837 if (!s)
3721 return -ENOMEM;
3722
3838 return -ENOMEM;
3839
3723 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3724 if (!buf_comm) {
3725 kfree(file_buf);
3840 if (allocate_cmdlines_buffer(val, s) < 0) {
3841 kfree(s);
3726 return -ENOMEM;
3727 }
3728
3842 return -ENOMEM;
3843 }
3844
3729 buf = file_buf;
3845 arch_spin_lock(&trace_cmdline_lock);
3846 savedcmd_temp = savedcmd;
3847 savedcmd = s;
3848 arch_spin_unlock(&trace_cmdline_lock);
3849 free_saved_cmdlines_buffer(savedcmd_temp);
3730
3850
3731 for (i = 0; i < SAVED_CMDLINES; i++) {
3732 int r;
3851 return 0;
3852}
3733
3853
3734 pid = map_cmdline_to_pid[i];
3735 if (pid == -1 || pid == NO_CMDLINE_MAP)
3736 continue;
3854static ssize_t
3855tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3856 size_t cnt, loff_t *ppos)
3857{
3858 unsigned long val;
3859 int ret;
3737
3860
3738 trace_find_cmdline(pid, buf_comm);
3739 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3740 buf += r;
3741 len += r;
3742 }
3861 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3862 if (ret)
3863 return ret;
3743
3864
3744 len = simple_read_from_buffer(ubuf, cnt, ppos,
3745 file_buf, len);
3865 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3866 if (!val || val > PID_MAX_DEFAULT)
3867 return -EINVAL;
3746
3868
3747 kfree(file_buf);
3748 kfree(buf_comm);
3869 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3870 if (ret < 0)
3871 return ret;
3749
3872
3750 return len;
3873 *ppos += cnt;
3874
3875 return cnt;
3751}
3752
3876}
3877
3753static const struct file_operations tracing_saved_cmdlines_fops = {
3754 .open = tracing_open_generic,
3755 .read = tracing_saved_cmdlines_read,
3756 .llseek = generic_file_llseek,
3878static const struct file_operations tracing_saved_cmdlines_size_fops = {
3879 .open = tracing_open_generic,
3880 .read = tracing_saved_cmdlines_size_read,
3881 .write = tracing_saved_cmdlines_size_write,
3757};
3758
3759static ssize_t
3760tracing_set_trace_read(struct file *filp, char __user *ubuf,
3761 size_t cnt, loff_t *ppos)
3762{
3763 struct trace_array *tr = filp->private_data;
3764 char buf[MAX_TRACER_SIZE+2];

--- 455 unchanged lines hidden (view full) ---

4220static unsigned int
4221tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4222{
4223 struct trace_iterator *iter = filp->private_data;
4224
4225 return trace_poll(iter, filp, poll_table);
4226}
4227
3882};
3883
3884static ssize_t
3885tracing_set_trace_read(struct file *filp, char __user *ubuf,
3886 size_t cnt, loff_t *ppos)
3887{
3888 struct trace_array *tr = filp->private_data;
3889 char buf[MAX_TRACER_SIZE+2];

--- 455 unchanged lines hidden (view full) ---

4345static unsigned int
4346tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4347{
4348 struct trace_iterator *iter = filp->private_data;
4349
4350 return trace_poll(iter, filp, poll_table);
4351}
4352
4228/*
4229 * This is a make-shift waitqueue.
4230 * A tracer might use this callback on some rare cases:
4231 *
4232 * 1) the current tracer might hold the runqueue lock when it wakes up
4233 * a reader, hence a deadlock (sched, function, and function graph tracers)
4234 * 2) the function tracers, trace all functions, we don't want
4235 * the overhead of calling wake_up and friends
4236 * (and tracing them too)
4237 *
4238 * Anyway, this is really very primitive wakeup.
4239 */
4240void poll_wait_pipe(struct trace_iterator *iter)
4241{
4242 set_current_state(TASK_INTERRUPTIBLE);
4243 /* sleep for 100 msecs, and try again. */
4244 schedule_timeout(HZ / 10);
4245}
4246
4247/* Must be called with trace_types_lock mutex held. */
4248static int tracing_wait_pipe(struct file *filp)
4249{
4250 struct trace_iterator *iter = filp->private_data;
4353/* Must be called with trace_types_lock mutex held. */
4354static int tracing_wait_pipe(struct file *filp)
4355{
4356 struct trace_iterator *iter = filp->private_data;
4357 int ret;
4251
4252 while (trace_empty(iter)) {
4253
4254 if ((filp->f_flags & O_NONBLOCK)) {
4255 return -EAGAIN;
4256 }
4257
4358
4359 while (trace_empty(iter)) {
4360
4361 if ((filp->f_flags & O_NONBLOCK)) {
4362 return -EAGAIN;
4363 }
4364
4258 mutex_unlock(&iter->mutex);
4259
4260 iter->trace->wait_pipe(iter);
4261
4262 mutex_lock(&iter->mutex);
4263
4264 if (signal_pending(current))
4265 return -EINTR;
4266
4267 /*
4268 * We block until we read something and tracing is disabled.
4269 * We still block if tracing is disabled, but we have never
4270 * read anything. This allows a user to cat this file, and
4271 * then enable tracing. But after we have read something,
4272 * we give an EOF when tracing is again disabled.
4273 *
4274 * iter->pos will be 0 if we haven't read anything.
4275 */
4276 if (!tracing_is_on() && iter->pos)
4277 break;
4365 /*
4366 * We block until we read something and tracing is disabled.
4367 * We still block if tracing is disabled, but we have never
4368 * read anything. This allows a user to cat this file, and
4369 * then enable tracing. But after we have read something,
4370 * we give an EOF when tracing is again disabled.
4371 *
4372 * iter->pos will be 0 if we haven't read anything.
4373 */
4374 if (!tracing_is_on() && iter->pos)
4375 break;
4376
4377 mutex_unlock(&iter->mutex);
4378
4379 ret = wait_on_pipe(iter);
4380
4381 mutex_lock(&iter->mutex);
4382
4383 if (ret)
4384 return ret;
4385
4386 if (signal_pending(current))
4387 return -EINTR;
4278 }
4279
4280 return 1;
4281}
4282
4283/*
4284 * Consumer reader.
4285 */

--- 906 unchanged lines hidden (view full) ---

5192
5193 if (ret < 0) {
5194 if (trace_empty(iter)) {
5195 if ((filp->f_flags & O_NONBLOCK)) {
5196 size = -EAGAIN;
5197 goto out_unlock;
5198 }
5199 mutex_unlock(&trace_types_lock);
4388 }
4389
4390 return 1;
4391}
4392
4393/*
4394 * Consumer reader.
4395 */

--- 906 unchanged lines hidden (view full) ---

5302
5303 if (ret < 0) {
5304 if (trace_empty(iter)) {
5305 if ((filp->f_flags & O_NONBLOCK)) {
5306 size = -EAGAIN;
5307 goto out_unlock;
5308 }
5309 mutex_unlock(&trace_types_lock);
5200 iter->trace->wait_pipe(iter);
5310 ret = wait_on_pipe(iter);
5201 mutex_lock(&trace_types_lock);
5311 mutex_lock(&trace_types_lock);
5312 if (ret) {
5313 size = ret;
5314 goto out_unlock;
5315 }
5202 if (signal_pending(current)) {
5203 size = -EINTR;
5204 goto out_unlock;
5205 }
5206 goto again;
5207 }
5208 size = 0;
5209 goto out_unlock;

--- 193 unchanged lines hidden (view full) ---

5403
5404 /* did we read anything? */
5405 if (!spd.nr_pages) {
5406 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5407 ret = -EAGAIN;
5408 goto out;
5409 }
5410 mutex_unlock(&trace_types_lock);
5316 if (signal_pending(current)) {
5317 size = -EINTR;
5318 goto out_unlock;
5319 }
5320 goto again;
5321 }
5322 size = 0;
5323 goto out_unlock;

--- 193 unchanged lines hidden (view full) ---

5517
5518 /* did we read anything? */
5519 if (!spd.nr_pages) {
5520 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5521 ret = -EAGAIN;
5522 goto out;
5523 }
5524 mutex_unlock(&trace_types_lock);
5411 iter->trace->wait_pipe(iter);
5525 ret = wait_on_pipe(iter);
5412 mutex_lock(&trace_types_lock);
5526 mutex_lock(&trace_types_lock);
5527 if (ret)
5528 goto out;
5413 if (signal_pending(current)) {
5414 ret = -EINTR;
5415 goto out;
5416 }
5417 goto again;
5418 }
5419
5420 ret = splice_to_pipe(pipe, &spd);

--- 676 unchanged lines hidden (view full) ---

6097 * Only the top level trace array gets its snapshot allocated
6098 * from the kernel command line.
6099 */
6100 allocate_snapshot = false;
6101#endif
6102 return 0;
6103}
6104
5529 if (signal_pending(current)) {
5530 ret = -EINTR;
5531 goto out;
5532 }
5533 goto again;
5534 }
5535
5536 ret = splice_to_pipe(pipe, &spd);

--- 676 unchanged lines hidden (view full) ---

6213 * Only the top level trace array gets its snapshot allocated
6214 * from the kernel command line.
6215 */
6216 allocate_snapshot = false;
6217#endif
6218 return 0;
6219}
6220
6221static void free_trace_buffer(struct trace_buffer *buf)
6222{
6223 if (buf->buffer) {
6224 ring_buffer_free(buf->buffer);
6225 buf->buffer = NULL;
6226 free_percpu(buf->data);
6227 buf->data = NULL;
6228 }
6229}
6230
6231static void free_trace_buffers(struct trace_array *tr)
6232{
6233 if (!tr)
6234 return;
6235
6236 free_trace_buffer(&tr->trace_buffer);
6237
6238#ifdef CONFIG_TRACER_MAX_TRACE
6239 free_trace_buffer(&tr->max_buffer);
6240#endif
6241}
6242
6105static int new_instance_create(const char *name)
6106{
6107 struct trace_array *tr;
6108 int ret;
6109
6110 mutex_lock(&trace_types_lock);
6111
6112 ret = -EEXIST;

--- 13 unchanged lines hidden (view full) ---

6126
6127 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6128 goto out_free_tr;
6129
6130 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6131
6132 raw_spin_lock_init(&tr->start_lock);
6133
6243static int new_instance_create(const char *name)
6244{
6245 struct trace_array *tr;
6246 int ret;
6247
6248 mutex_lock(&trace_types_lock);
6249
6250 ret = -EEXIST;

--- 13 unchanged lines hidden (view full) ---

6264
6265 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6266 goto out_free_tr;
6267
6268 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6269
6270 raw_spin_lock_init(&tr->start_lock);
6271
6272 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6273
6134 tr->current_trace = &nop_trace;
6135
6136 INIT_LIST_HEAD(&tr->systems);
6137 INIT_LIST_HEAD(&tr->events);
6138
6139 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6140 goto out_free_tr;
6141

--- 11 unchanged lines hidden (view full) ---

6153
6154 list_add(&tr->list, &ftrace_trace_arrays);
6155
6156 mutex_unlock(&trace_types_lock);
6157
6158 return 0;
6159
6160 out_free_tr:
6274 tr->current_trace = &nop_trace;
6275
6276 INIT_LIST_HEAD(&tr->systems);
6277 INIT_LIST_HEAD(&tr->events);
6278
6279 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6280 goto out_free_tr;
6281

--- 11 unchanged lines hidden (view full) ---

6293
6294 list_add(&tr->list, &ftrace_trace_arrays);
6295
6296 mutex_unlock(&trace_types_lock);
6297
6298 return 0;
6299
6300 out_free_tr:
6161 if (tr->trace_buffer.buffer)
6162 ring_buffer_free(tr->trace_buffer.buffer);
6301 free_trace_buffers(tr);
6163 free_cpumask_var(tr->tracing_cpumask);
6164 kfree(tr->name);
6165 kfree(tr);
6166
6167 out_unlock:
6168 mutex_unlock(&trace_types_lock);
6169
6170 return ret;

--- 23 unchanged lines hidden (view full) ---

6194 goto out_unlock;
6195
6196 list_del(&tr->list);
6197
6198 tracing_set_nop(tr);
6199 event_trace_del_tracer(tr);
6200 ftrace_destroy_function_files(tr);
6201 debugfs_remove_recursive(tr->dir);
6302 free_cpumask_var(tr->tracing_cpumask);
6303 kfree(tr->name);
6304 kfree(tr);
6305
6306 out_unlock:
6307 mutex_unlock(&trace_types_lock);
6308
6309 return ret;

--- 23 unchanged lines hidden (view full) ---

6333 goto out_unlock;
6334
6335 list_del(&tr->list);
6336
6337 tracing_set_nop(tr);
6338 event_trace_del_tracer(tr);
6339 ftrace_destroy_function_files(tr);
6340 debugfs_remove_recursive(tr->dir);
6202 free_percpu(tr->trace_buffer.data);
6203 ring_buffer_free(tr->trace_buffer.buffer);
6341 free_trace_buffers(tr);
6204
6205 kfree(tr->name);
6206 kfree(tr);
6207
6208 ret = 0;
6209
6210 out_unlock:
6211 mutex_unlock(&trace_types_lock);

--- 111 unchanged lines hidden (view full) ---

6323 tr, &tracing_mark_fops);
6324
6325 trace_create_file("trace_clock", 0644, d_tracer, tr,
6326 &trace_clock_fops);
6327
6328 trace_create_file("tracing_on", 0644, d_tracer,
6329 tr, &rb_simple_fops);
6330
6342
6343 kfree(tr->name);
6344 kfree(tr);
6345
6346 ret = 0;
6347
6348 out_unlock:
6349 mutex_unlock(&trace_types_lock);

--- 111 unchanged lines hidden (view full) ---

6461 tr, &tracing_mark_fops);
6462
6463 trace_create_file("trace_clock", 0644, d_tracer, tr,
6464 &trace_clock_fops);
6465
6466 trace_create_file("tracing_on", 0644, d_tracer,
6467 tr, &rb_simple_fops);
6468
6469#ifdef CONFIG_TRACER_MAX_TRACE
6470 trace_create_file("tracing_max_latency", 0644, d_tracer,
6471 &tr->max_latency, &tracing_max_lat_fops);
6472#endif
6473
6331 if (ftrace_create_function_files(tr, d_tracer))
6332 WARN(1, "Could not allocate function filter files");
6333
6334#ifdef CONFIG_TRACER_SNAPSHOT
6335 trace_create_file("snapshot", 0644, d_tracer,
6336 tr, &snapshot_fops);
6337#endif
6338

--- 9 unchanged lines hidden (view full) ---

6348 trace_access_lock_init();
6349
6350 d_tracer = tracing_init_dentry();
6351 if (!d_tracer)
6352 return 0;
6353
6354 init_tracer_debugfs(&global_trace, d_tracer);
6355
6474 if (ftrace_create_function_files(tr, d_tracer))
6475 WARN(1, "Could not allocate function filter files");
6476
6477#ifdef CONFIG_TRACER_SNAPSHOT
6478 trace_create_file("snapshot", 0644, d_tracer,
6479 tr, &snapshot_fops);
6480#endif
6481

--- 9 unchanged lines hidden (view full) ---

6491 trace_access_lock_init();
6492
6493 d_tracer = tracing_init_dentry();
6494 if (!d_tracer)
6495 return 0;
6496
6497 init_tracer_debugfs(&global_trace, d_tracer);
6498
6356#ifdef CONFIG_TRACER_MAX_TRACE
6357 trace_create_file("tracing_max_latency", 0644, d_tracer,
6358 &tracing_max_latency, &tracing_max_lat_fops);
6359#endif
6360
6361 trace_create_file("tracing_thresh", 0644, d_tracer,
6362 &tracing_thresh, &tracing_max_lat_fops);
6363
6364 trace_create_file("README", 0444, d_tracer,
6365 NULL, &tracing_readme_fops);
6366
6367 trace_create_file("saved_cmdlines", 0444, d_tracer,
6368 NULL, &tracing_saved_cmdlines_fops);
6369
6499 trace_create_file("tracing_thresh", 0644, d_tracer,
6500 &tracing_thresh, &tracing_max_lat_fops);
6501
6502 trace_create_file("README", 0444, d_tracer,
6503 NULL, &tracing_readme_fops);
6504
6505 trace_create_file("saved_cmdlines", 0444, d_tracer,
6506 NULL, &tracing_saved_cmdlines_fops);
6507
6508 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6509 NULL, &tracing_saved_cmdlines_size_fops);
6510
6370#ifdef CONFIG_DYNAMIC_FTRACE
6371 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6372 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6373#endif
6374
6375 create_trace_instances(d_tracer);
6376
6377 create_trace_options_dir(&global_trace);

--- 220 unchanged lines hidden (view full) ---

6598
6599 raw_spin_lock_init(&global_trace.start_lock);
6600
6601 /* Used for event triggers */
6602 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6603 if (!temp_buffer)
6604 goto out_free_cpumask;
6605
6511#ifdef CONFIG_DYNAMIC_FTRACE
6512 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6513 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6514#endif
6515
6516 create_trace_instances(d_tracer);
6517
6518 create_trace_options_dir(&global_trace);

--- 220 unchanged lines hidden (view full) ---

6739
6740 raw_spin_lock_init(&global_trace.start_lock);
6741
6742 /* Used for event triggers */
6743 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6744 if (!temp_buffer)
6745 goto out_free_cpumask;
6746
6747 if (trace_create_savedcmd() < 0)
6748 goto out_free_temp_buffer;
6749
6606 /* TODO: make the number of buffers hot pluggable with CPUS */
6607 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6608 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6609 WARN_ON(1);
6750 /* TODO: make the number of buffers hot pluggable with CPUS */
6751 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6752 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6753 WARN_ON(1);
6610 goto out_free_temp_buffer;
6754 goto out_free_savedcmd;
6611 }
6612
6613 if (global_trace.buffer_disabled)
6614 tracing_off();
6615
6755 }
6756
6757 if (global_trace.buffer_disabled)
6758 tracing_off();
6759
6616 trace_init_cmdlines();
6617
6618 if (trace_boot_clock) {
6619 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6620 if (ret < 0)
6621 pr_warning("Trace clock %s not defined, going back to default\n",
6622 trace_boot_clock);
6623 }
6624
6625 /*
6626 * register_tracer() might reference current_trace, so it
6627 * needs to be set before we register anything. This is
6628 * just a bootstrap of current_trace anyway.
6629 */
6630 global_trace.current_trace = &nop_trace;
6631
6760 if (trace_boot_clock) {
6761 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6762 if (ret < 0)
6763 pr_warning("Trace clock %s not defined, going back to default\n",
6764 trace_boot_clock);
6765 }
6766
6767 /*
6768 * register_tracer() might reference current_trace, so it
6769 * needs to be set before we register anything. This is
6770 * just a bootstrap of current_trace anyway.
6771 */
6772 global_trace.current_trace = &nop_trace;
6773
6774 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6775
6776 ftrace_init_global_array_ops(&global_trace);
6777
6632 register_tracer(&nop_trace);
6633
6634 /* All seems OK, enable tracing */
6635 tracing_disabled = 0;
6636
6637 atomic_notifier_chain_register(&panic_notifier_list,
6638 &trace_panic_notifier);
6639

--- 11 unchanged lines hidden (view full) ---

6651 option = strsep(&trace_boot_options, ",");
6652 trace_set_options(&global_trace, option);
6653 }
6654
6655 register_snapshot_cmd();
6656
6657 return 0;
6658
6778 register_tracer(&nop_trace);
6779
6780 /* All seems OK, enable tracing */
6781 tracing_disabled = 0;
6782
6783 atomic_notifier_chain_register(&panic_notifier_list,
6784 &trace_panic_notifier);
6785

--- 11 unchanged lines hidden (view full) ---

6797 option = strsep(&trace_boot_options, ",");
6798 trace_set_options(&global_trace, option);
6799 }
6800
6801 register_snapshot_cmd();
6802
6803 return 0;
6804
6805out_free_savedcmd:
6806 free_saved_cmdlines_buffer(savedcmd);
6659out_free_temp_buffer:
6660 ring_buffer_free(temp_buffer);
6661out_free_cpumask:
6807out_free_temp_buffer:
6808 ring_buffer_free(temp_buffer);
6809out_free_cpumask:
6662 free_percpu(global_trace.trace_buffer.data);
6663#ifdef CONFIG_TRACER_MAX_TRACE
6664 free_percpu(global_trace.max_buffer.data);
6665#endif
6666 free_cpumask_var(global_trace.tracing_cpumask);
6667out_free_buffer_mask:
6668 free_cpumask_var(tracing_buffer_mask);
6669out:
6670 return ret;
6671}
6672
6673__init static int clear_boot_tracer(void)

--- 21 unchanged lines hidden ---
6810 free_cpumask_var(global_trace.tracing_cpumask);
6811out_free_buffer_mask:
6812 free_cpumask_var(tracing_buffer_mask);
6813out:
6814 return ret;
6815}
6816
6817__init static int clear_boot_tracer(void)

--- 21 unchanged lines hidden ---