Blame view

kernel/linux-imx6_3.14.28/drivers/oprofile/cpu_buffer.h 2.84 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
  /**
   * @file cpu_buffer.h
   *
   * @remark Copyright 2002-2009 OProfile authors
   * @remark Read the file COPYING
   *
   * @author John Levon <levon@movementarian.org>
   * @author Robert Richter <robert.richter@amd.com>
   */
  
  #ifndef OPROFILE_CPU_BUFFER_H
  #define OPROFILE_CPU_BUFFER_H
  
  #include <linux/types.h>
  #include <linux/spinlock.h>
  #include <linux/workqueue.h>
  #include <linux/cache.h>
  #include <linux/sched.h>
  #include <linux/ring_buffer.h>
  
  struct task_struct;
  
  int alloc_cpu_buffers(void);
  void free_cpu_buffers(void);
  
  void start_cpu_work(void);
  void end_cpu_work(void);
  void flush_cpu_work(void);
  
  /* CPU buffer is composed of such entries (which are
   * also used for context switch notes)
   */
  struct op_sample {
  	unsigned long eip;
  	unsigned long event;
  	unsigned long data[0];
  };
  
  struct op_entry;
  
  struct oprofile_cpu_buffer {
  	unsigned long buffer_size;
  	struct task_struct *last_task;
  	int last_is_kernel;
  	int tracing;
  	unsigned long sample_received;
  	unsigned long sample_lost_overflow;
  	unsigned long backtrace_aborted;
  	unsigned long sample_invalid_eip;
  	int cpu;
  	struct delayed_work work;
  };
  
  DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
  
  /*
   * Resets the cpu buffer to a sane state.
   *
   * reset these to invalid values; the next sample collected will
   * populate the buffer with proper values to initialize the buffer
   */
  static inline void op_cpu_buffer_reset(int cpu)
  {
  	struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
  
  	cpu_buf->last_is_kernel = -1;
  	cpu_buf->last_task = NULL;
  }
  
  /*
   * op_cpu_buffer_add_data() and op_cpu_buffer_write_commit() may be
   * called only if op_cpu_buffer_write_reserve() did not return NULL or
   * entry->event != NULL, otherwise entry->size or entry->event will be
   * used uninitialized.
   */
  
  struct op_sample
  *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
  int op_cpu_buffer_write_commit(struct op_entry *entry);
  struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
  unsigned long op_cpu_buffer_entries(int cpu);
  
  /* returns the remaining free size of data in the entry */
  static inline
  int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
  {
  	if (!entry->size)
  		return 0;
  	*entry->data = val;
  	entry->size--;
  	entry->data++;
  	return entry->size;
  }
  
  /* returns the size of data in the entry */
  static inline
  int op_cpu_buffer_get_size(struct op_entry *entry)
  {
  	return entry->size;
  }
  
  /* returns 0 if empty or the size of data including the current value */
  static inline
  int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
  {
  	int size = entry->size;
  	if (!size)
  		return 0;
  	*val = *entry->data;
  	entry->size--;
  	entry->data++;
  	return size;
  }
  
  /* extra data flags */
  #define KERNEL_CTX_SWITCH	(1UL << 0)
  #define IS_KERNEL		(1UL << 1)
  #define TRACE_BEGIN		(1UL << 2)
  #define USER_CTX_SWITCH		(1UL << 3)
  
  #endif /* OPROFILE_CPU_BUFFER_H */