Blame view

kernel/linux-imx6_3.14.28/include/linux/kernel_stat.h 2.64 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
  #ifndef _LINUX_KERNEL_STAT_H
  #define _LINUX_KERNEL_STAT_H
  
  #include <linux/smp.h>
  #include <linux/threads.h>
  #include <linux/percpu.h>
  #include <linux/cpumask.h>
  #include <linux/interrupt.h>
  #include <linux/sched.h>
  #include <linux/vtime.h>
  #include <asm/irq.h>
  #include <asm/cputime.h>
  
  /*
   * 'kernel_stat.h' contains the definitions needed for doing
   * some kernel statistics (CPU usage, context switches ...),
   * used by rstatd/perfmeter
   */
  
  enum cpu_usage_stat {
  	CPUTIME_USER,
  	CPUTIME_NICE,
  	CPUTIME_SYSTEM,
  	CPUTIME_SOFTIRQ,
  	CPUTIME_IRQ,
  	CPUTIME_IDLE,
  	CPUTIME_IOWAIT,
  	CPUTIME_STEAL,
  	CPUTIME_GUEST,
  	CPUTIME_GUEST_NICE,
  	NR_STATS,
  };
  
  struct kernel_cpustat {
  	u64 cpustat[NR_STATS];
  };
  
  struct kernel_stat {
  	unsigned long irqs_sum;
  	unsigned int softirqs[NR_SOFTIRQS];
  };
  
  DECLARE_PER_CPU(struct kernel_stat, kstat);
  DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
  
  /* Must have preemption disabled for this to be meaningful. */
  #define kstat_this_cpu (&__get_cpu_var(kstat))
  #define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
  #define kstat_cpu(cpu) per_cpu(kstat, cpu)
  #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
  
  extern unsigned long long nr_context_switches(void);
  
  #include <linux/irq.h>
  extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
  
  #define kstat_incr_irqs_this_cpu(irqno, DESC)		\
  do {							\
  	__this_cpu_inc(*(DESC)->kstat_irqs);		\
  	__this_cpu_inc(kstat.irqs_sum);			\
  } while (0)
  
  static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
  {
  	__this_cpu_inc(kstat.softirqs[irq]);
  }
  
  static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
  {
         return kstat_cpu(cpu).softirqs[irq];
  }
  
  /*
   * Number of interrupts per specific IRQ source, since bootup
   */
  extern unsigned int kstat_irqs(unsigned int irq);
  
  /*
   * Number of interrupts per cpu, since bootup
   */
  static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
  {
  	return kstat_cpu(cpu).irqs_sum;
  }
  
  /*
   * Lock/unlock the current runqueue - to extract task statistics:
   */
  extern unsigned long long task_delta_exec(struct task_struct *);
  
  extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
  extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
  extern void account_steal_time(cputime_t);
  extern void account_idle_time(cputime_t);
  
  #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  static inline void account_process_tick(struct task_struct *tsk, int user)
  {
  	vtime_account_user(tsk);
  }
  #else
  extern void account_process_tick(struct task_struct *, int user);
  #endif
  
  extern void account_steal_ticks(unsigned long ticks);
  extern void account_idle_ticks(unsigned long ticks);
  
  #endif /* _LINUX_KERNEL_STAT_H */