6b13f685e
김민수
BSP 최초 추가
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
|
#ifndef __ASM_GENERIC_ATOMIC_H
#define __ASM_GENERIC_ATOMIC_H
#include <asm/cmpxchg.h>
#ifdef CONFIG_SMP
# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \
!defined(atomic_clear_mask) || !defined(atomic_set_mask)
# error "SMP requires a little arch-specific magic"
# endif
#endif
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
#ifndef atomic_read
#define atomic_read(v) (*(volatile int *)&(v)->counter)
#endif
#define atomic_set(v, i) (((v)->counter) = (i))
#include <linux/irqflags.h>
#ifndef atomic_add_return
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
int temp;
raw_local_irq_save(flags);
temp = v->counter;
temp += i;
v->counter = temp;
raw_local_irq_restore(flags);
return temp;
}
#endif
#ifndef atomic_sub_return
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
int temp;
raw_local_irq_save(flags);
temp = v->counter;
temp -= i;
v->counter = temp;
raw_local_irq_restore(flags);
return temp;
}
#endif
static inline int atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
}
static inline void atomic_add(int i, atomic_t *v)
{
atomic_add_return(i, v);
}
static inline void atomic_sub(int i, atomic_t *v)
{
atomic_sub_return(i, v);
}
static inline void atomic_inc(atomic_t *v)
{
atomic_add_return(1, v);
}
static inline void atomic_dec(atomic_t *v)
{
atomic_sub_return(1, v);
}
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
c = old;
return c;
}
#ifndef atomic_clear_mask
static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
{
unsigned long flags;
mask = ~mask;
raw_local_irq_save(flags);
v->counter &= mask;
raw_local_irq_restore(flags);
}
#endif
#ifndef atomic_set_mask
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags);
v->counter |= mask;
raw_local_irq_restore(flags);
}
#endif
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_ATOMIC_H */
|