Blame view

kernel/linux-imx6_3.14.28/include/linux/kmemcheck.h 4.24 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
  #ifndef LINUX_KMEMCHECK_H
  #define LINUX_KMEMCHECK_H
  
  #include <linux/mm_types.h>
  #include <linux/types.h>
  
  #ifdef CONFIG_KMEMCHECK
  extern int kmemcheck_enabled;
  
  /* The slab-related functions. */
  void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
  void kmemcheck_free_shadow(struct page *page, int order);
  void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
  			  size_t size);
  void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
  
  void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
  			       gfp_t gfpflags);
  
  void kmemcheck_show_pages(struct page *p, unsigned int n);
  void kmemcheck_hide_pages(struct page *p, unsigned int n);
  
  bool kmemcheck_page_is_tracked(struct page *p);
  
  void kmemcheck_mark_unallocated(void *address, unsigned int n);
  void kmemcheck_mark_uninitialized(void *address, unsigned int n);
  void kmemcheck_mark_initialized(void *address, unsigned int n);
  void kmemcheck_mark_freed(void *address, unsigned int n);
  
  void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
  void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
  void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
  
  int kmemcheck_show_addr(unsigned long address);
  int kmemcheck_hide_addr(unsigned long address);
  
  bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
  
  /*
   * Bitfield annotations
   *
   * How to use: If you have a struct using bitfields, for example
   *
   *     struct a {
   *             int x:8, y:8;
   *     };
   *
   * then this should be rewritten as
   *
   *     struct a {
   *             kmemcheck_bitfield_begin(flags);
   *             int x:8, y:8;
   *             kmemcheck_bitfield_end(flags);
   *     };
   *
   * Now the "flags_begin" and "flags_end" members may be used to refer to the
   * beginning and end, respectively, of the bitfield (and things like
   * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
   * fields should be annotated:
   *
   *     struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
   *     kmemcheck_annotate_bitfield(a, flags);
   */
  #define kmemcheck_bitfield_begin(name)	\
  	int name##_begin[0];
  
  #define kmemcheck_bitfield_end(name)	\
  	int name##_end[0];
  
  #define kmemcheck_annotate_bitfield(ptr, name)				\
  	do {								\
  		int _n;							\
  									\
  		if (!ptr)						\
  			break;						\
  									\
  		_n = (long) &((ptr)->name##_end)			\
  			- (long) &((ptr)->name##_begin);		\
  		BUILD_BUG_ON(_n < 0);					\
  									\
  		kmemcheck_mark_initialized(&((ptr)->name##_begin), _n);	\
  	} while (0)
  
  #define kmemcheck_annotate_variable(var)				\
  	do {								\
  		kmemcheck_mark_initialized(&(var), sizeof(var));	\
  	} while (0)							\
  
  #else
  #define kmemcheck_enabled 0
  
  static inline void
  kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
  {
  }
  
  static inline void
  kmemcheck_free_shadow(struct page *page, int order)
  {
  }
  
  static inline void
  kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
  		     size_t size)
  {
  }
  
  static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
  				       size_t size)
  {
  }
  
  static inline void kmemcheck_pagealloc_alloc(struct page *p,
  	unsigned int order, gfp_t gfpflags)
  {
  }
  
  static inline bool kmemcheck_page_is_tracked(struct page *p)
  {
  	return false;
  }
  
  static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
  {
  }
  
  static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
  {
  }
  
  static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
  {
  }
  
  static inline void kmemcheck_mark_freed(void *address, unsigned int n)
  {
  }
  
  static inline void kmemcheck_mark_unallocated_pages(struct page *p,
  						    unsigned int n)
  {
  }
  
  static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
  						      unsigned int n)
  {
  }
  
  static inline void kmemcheck_mark_initialized_pages(struct page *p,
  						    unsigned int n)
  {
  }
  
  static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
  {
  	return true;
  }
  
  #define kmemcheck_bitfield_begin(name)
  #define kmemcheck_bitfield_end(name)
  #define kmemcheck_annotate_bitfield(ptr, name)	\
  	do {					\
  	} while (0)
  
  #define kmemcheck_annotate_variable(var)	\
  	do {					\
  	} while (0)
  
  #endif /* CONFIG_KMEMCHECK */
  
  #endif /* LINUX_KMEMCHECK_H */