Blame view

kernel/linux-imx6_3.14.28/tools/perf/tests/task-exit.c 2.7 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
  #include "evlist.h"
  #include "evsel.h"
  #include "thread_map.h"
  #include "cpumap.h"
  #include "tests.h"
  
  #include <signal.h>
  
  static int exited;
  static int nr_exit;
  
  static void sig_handler(int sig __maybe_unused)
  {
  	exited = 1;
  }
  
  /*
   * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
   * we asked by setting its exec_error to this handler.
   */
  static void workload_exec_failed_signal(int signo __maybe_unused,
  					siginfo_t *info __maybe_unused,
  					void *ucontext __maybe_unused)
  {
  	exited	= 1;
  	nr_exit = -1;
  }
  
  /*
   * This test will start a workload that does nothing then it checks
   * if the number of exit event reported by the kernel is 1 or not
   * in order to check the kernel returns correct number of event.
   */
  int test__task_exit(void)
  {
  	int err = -1;
  	union perf_event *event;
  	struct perf_evsel *evsel;
  	struct perf_evlist *evlist;
  	struct target target = {
  		.uid		= UINT_MAX,
  		.uses_mmap	= true,
  	};
  	const char *argv[] = { "true", NULL };
  
  	signal(SIGCHLD, sig_handler);
  
  	evlist = perf_evlist__new_default();
  	if (evlist == NULL) {
  		pr_debug("perf_evlist__new_default
  ");
  		return -1;
  	}
  
  	/*
  	 * Create maps of threads and cpus to monitor. In this case
  	 * we start with all threads and cpus (-1, -1) but then in
  	 * perf_evlist__prepare_workload we'll fill in the only thread
  	 * we're monitoring, the one forked there.
  	 */
  	evlist->cpus = cpu_map__dummy_new();
  	evlist->threads = thread_map__new_by_tid(-1);
  	if (!evlist->cpus || !evlist->threads) {
  		err = -ENOMEM;
  		pr_debug("Not enough memory to create thread/cpu maps
  ");
  		goto out_delete_evlist;
  	}
  
  	err = perf_evlist__prepare_workload(evlist, &target, argv, false,
  					    workload_exec_failed_signal);
  	if (err < 0) {
  		pr_debug("Couldn't run the workload!
  ");
  		goto out_delete_evlist;
  	}
  
  	evsel = perf_evlist__first(evlist);
  	evsel->attr.task = 1;
  	evsel->attr.sample_freq = 0;
  	evsel->attr.inherit = 0;
  	evsel->attr.watermark = 0;
  	evsel->attr.wakeup_events = 1;
  	evsel->attr.exclude_kernel = 1;
  
  	err = perf_evlist__open(evlist);
  	if (err < 0) {
  		pr_debug("Couldn't open the evlist: %s
  ", strerror(-err));
  		goto out_delete_evlist;
  	}
  
  	if (perf_evlist__mmap(evlist, 128, true) < 0) {
  		pr_debug("failed to mmap events: %d (%s)
  ", errno,
  			 strerror(errno));
  		goto out_delete_evlist;
  	}
  
  	perf_evlist__start_workload(evlist);
  
  retry:
  	while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
  		if (event->header.type == PERF_RECORD_EXIT)
  			nr_exit++;
  
  		perf_evlist__mmap_consume(evlist, 0);
  	}
  
  	if (!exited || !nr_exit) {
  		poll(evlist->pollfd, evlist->nr_fds, -1);
  		goto retry;
  	}
  
  	if (nr_exit != 1) {
  		pr_debug("received %d EXIT records
  ", nr_exit);
  		err = -1;
  	}
  
  out_delete_evlist:
  	perf_evlist__delete(evlist);
  	return err;
  }