Blame view

kernel/linux-imx6_3.14.28/include/asm-generic/percpu.h 3.17 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
  #ifndef _ASM_GENERIC_PERCPU_H_
  #define _ASM_GENERIC_PERCPU_H_
  
  #include <linux/compiler.h>
  #include <linux/threads.h>
  #include <linux/percpu-defs.h>
  
  #ifdef CONFIG_SMP
  
  /*
   * per_cpu_offset() is the offset that has to be added to a
   * percpu variable to get to the instance for a certain processor.
   *
   * Most arches use the __per_cpu_offset array for those offsets but
   * some arches have their own ways of determining the offset (x86_64, s390).
   */
  #ifndef __per_cpu_offset
  extern unsigned long __per_cpu_offset[NR_CPUS];
  
  #define per_cpu_offset(x) (__per_cpu_offset[x])
  #endif
  
  /*
   * Determine the offset for the currently active processor.
   * An arch may define __my_cpu_offset to provide a more effective
   * means of obtaining the offset to the per cpu variables of the
   * current processor.
   */
  #ifndef __my_cpu_offset
  #define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
  #endif
  #ifdef CONFIG_DEBUG_PREEMPT
  #define my_cpu_offset per_cpu_offset(smp_processor_id())
  #else
  #define my_cpu_offset __my_cpu_offset
  #endif
  
  /*
   * Add a offset to a pointer but keep the pointer as is.
   *
   * Only S390 provides its own means of moving the pointer.
   */
  #ifndef SHIFT_PERCPU_PTR
  /* Weird cast keeps both GCC and sparse happy. */
  #define SHIFT_PERCPU_PTR(__p, __offset)	({				\
  	__verify_pcpu_ptr((__p));					\
  	RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \
  })
  #endif
  
  /*
   * A percpu variable may point to a discarded regions. The following are
   * established ways to produce a usable pointer from the percpu variable
   * offset.
   */
  #define per_cpu(var, cpu) \
  	(*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
  
  #ifndef __this_cpu_ptr
  #define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
  #endif
  #ifdef CONFIG_DEBUG_PREEMPT
  #define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
  #else
  #define this_cpu_ptr(ptr) __this_cpu_ptr(ptr)
  #endif
  
  #define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
  #define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var)))
  
  #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
  extern void setup_per_cpu_areas(void);
  #endif
  
  #else /* ! SMP */
  
  #define VERIFY_PERCPU_PTR(__p) ({			\
  	__verify_pcpu_ptr((__p));			\
  	(typeof(*(__p)) __kernel __force *)(__p);	\
  })
  
  #define per_cpu(var, cpu)	(*((void)(cpu), VERIFY_PERCPU_PTR(&(var))))
  #define __get_cpu_var(var)	(*VERIFY_PERCPU_PTR(&(var)))
  #define __raw_get_cpu_var(var)	(*VERIFY_PERCPU_PTR(&(var)))
  #define this_cpu_ptr(ptr)	per_cpu_ptr(ptr, 0)
  #define __this_cpu_ptr(ptr)	this_cpu_ptr(ptr)
  
  #endif	/* SMP */
  
  #ifndef PER_CPU_BASE_SECTION
  #ifdef CONFIG_SMP
  #define PER_CPU_BASE_SECTION ".data..percpu"
  #else
  #define PER_CPU_BASE_SECTION ".data"
  #endif
  #endif
  
  #ifdef CONFIG_SMP
  
  #ifdef MODULE
  #define PER_CPU_SHARED_ALIGNED_SECTION ""
  #define PER_CPU_ALIGNED_SECTION ""
  #else
  #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
  #define PER_CPU_ALIGNED_SECTION "..shared_aligned"
  #endif
  #define PER_CPU_FIRST_SECTION "..first"
  
  #else
  
  #define PER_CPU_SHARED_ALIGNED_SECTION ""
  #define PER_CPU_ALIGNED_SECTION "..shared_aligned"
  #define PER_CPU_FIRST_SECTION ""
  
  #endif
  
  #ifndef PER_CPU_ATTRIBUTES
  #define PER_CPU_ATTRIBUTES
  #endif
  
  #ifndef PER_CPU_DEF_ATTRIBUTES
  #define PER_CPU_DEF_ATTRIBUTES
  #endif
  
  #endif /* _ASM_GENERIC_PERCPU_H_ */