Blame view

kernel/linux-imx6_3.14.28/include/linux/proportions.h 3.17 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
  /*
   * FLoating proportions
   *
   *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
   *
   * This file contains the public data structure and API definitions.
   */
  
  #ifndef _LINUX_PROPORTIONS_H
  #define _LINUX_PROPORTIONS_H
  
  #include <linux/percpu_counter.h>
  #include <linux/spinlock.h>
  #include <linux/mutex.h>
  
  struct prop_global {
  	/*
  	 * The period over which we differentiate
  	 *
  	 *   period = 2^shift
  	 */
  	int shift;
  	/*
  	 * The total event counter aka 'time'.
  	 *
  	 * Treated as an unsigned long; the lower 'shift - 1' bits are the
  	 * counter bits, the remaining upper bits the period counter.
  	 */
  	struct percpu_counter events;
  };
  
  /*
   * global proportion descriptor
   *
   * this is needed to consitently flip prop_global structures.
   */
  struct prop_descriptor {
  	int index;
  	struct prop_global pg[2];
  	struct mutex mutex;		/* serialize the prop_global switch */
  };
  
  int prop_descriptor_init(struct prop_descriptor *pd, int shift);
  void prop_change_shift(struct prop_descriptor *pd, int new_shift);
  
  /*
   * ----- PERCPU ------
   */
  
  struct prop_local_percpu {
  	/*
  	 * the local events counter
  	 */
  	struct percpu_counter events;
  
  	/*
  	 * snapshot of the last seen global state
  	 */
  	int shift;
  	unsigned long period;
  	raw_spinlock_t lock;		/* protect the snapshot state */
  };
  
  int prop_local_init_percpu(struct prop_local_percpu *pl);
  void prop_local_destroy_percpu(struct prop_local_percpu *pl);
  void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
  void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
  		long *numerator, long *denominator);
  
  static inline
  void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	__prop_inc_percpu(pd, pl);
  	local_irq_restore(flags);
  }
  
  /*
   * Limit the time part in order to ensure there are some bits left for the
   * cycle counter and fraction multiply.
   */
  #if BITS_PER_LONG == 32
  #define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
  #else
  #define PROP_MAX_SHIFT (BITS_PER_LONG/2)
  #endif
  
  #define PROP_FRAC_SHIFT		(BITS_PER_LONG - PROP_MAX_SHIFT - 1)
  #define PROP_FRAC_BASE		(1UL << PROP_FRAC_SHIFT)
  
  void __prop_inc_percpu_max(struct prop_descriptor *pd,
  			   struct prop_local_percpu *pl, long frac);
  
  
  /*
   * ----- SINGLE ------
   */
  
  struct prop_local_single {
  	/*
  	 * the local events counter
  	 */
  	unsigned long events;
  
  	/*
  	 * snapshot of the last seen global state
  	 * and a lock protecting this state
  	 */
  	unsigned long period;
  	int shift;
  	raw_spinlock_t lock;		/* protect the snapshot state */
  };
  
  #define INIT_PROP_LOCAL_SINGLE(name)			\
  {	.lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock),	\
  }
  
  int prop_local_init_single(struct prop_local_single *pl);
  void prop_local_destroy_single(struct prop_local_single *pl);
  void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
  void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
  		long *numerator, long *denominator);
  
  static inline
  void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	__prop_inc_single(pd, pl);
  	local_irq_restore(flags);
  }
  
  #endif /* _LINUX_PROPORTIONS_H */