Blame view

kernel/linux-rt-4.4.41/include/linux/kasan.h 3.01 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
  #ifndef _LINUX_KASAN_H
  #define _LINUX_KASAN_H
  
  #include <linux/types.h>
  
  struct kmem_cache;
  struct page;
  struct vm_struct;
  
  #ifdef CONFIG_KASAN
  
  #define KASAN_SHADOW_SCALE_SHIFT 3
  
  #include <asm/kasan.h>
  #include <asm/pgtable.h>
  #include <linux/sched.h>
  
  extern unsigned char kasan_zero_page[PAGE_SIZE];
  extern pte_t kasan_zero_pte[PTRS_PER_PTE];
  extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
  extern pud_t kasan_zero_pud[PTRS_PER_PUD];
  
  void kasan_populate_zero_shadow(const void *shadow_start,
  				const void *shadow_end);
  
  static inline void *kasan_mem_to_shadow(const void *addr)
  {
  	return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
  		+ KASAN_SHADOW_OFFSET;
  }
  
  /* Enable reporting bugs after kasan_disable_current() */
  static inline void kasan_enable_current(void)
  {
  	current->kasan_depth++;
  }
  
  /* Disable reporting bugs for current task */
  static inline void kasan_disable_current(void)
  {
  	current->kasan_depth--;
  }
  
  void kasan_unpoison_shadow(const void *address, size_t size);
  
  void kasan_alloc_pages(struct page *page, unsigned int order);
  void kasan_free_pages(struct page *page, unsigned int order);
  
  void kasan_poison_slab(struct page *page);
  void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
  void kasan_poison_object_data(struct kmem_cache *cache, void *object);
  
  void kasan_kmalloc_large(const void *ptr, size_t size);
  void kasan_kfree_large(const void *ptr);
  void kasan_kfree(void *ptr);
  void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
  void kasan_krealloc(const void *object, size_t new_size);
  
  void kasan_slab_alloc(struct kmem_cache *s, void *object);
  void kasan_slab_free(struct kmem_cache *s, void *object);
  
  int kasan_module_alloc(void *addr, size_t size);
  void kasan_free_shadow(const struct vm_struct *vm);
  
  #else /* CONFIG_KASAN */
  
  static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
  
  static inline void kasan_enable_current(void) {}
  static inline void kasan_disable_current(void) {}
  
  static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
  static inline void kasan_free_pages(struct page *page, unsigned int order) {}
  
  static inline void kasan_poison_slab(struct page *page) {}
  static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
  					void *object) {}
  static inline void kasan_poison_object_data(struct kmem_cache *cache,
  					void *object) {}
  
  static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
  static inline void kasan_kfree_large(const void *ptr) {}
  static inline void kasan_kfree(void *ptr) {}
  static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
  				size_t size) {}
  static inline void kasan_krealloc(const void *object, size_t new_size) {}
  
  static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
  static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
  
  static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
  static inline void kasan_free_shadow(const struct vm_struct *vm) {}
  
  #endif /* CONFIG_KASAN */
  
  #endif /* LINUX_KASAN_H */