Blame view

kernel/linux-rt-4.4.41/include/linux/nmi.h 2.69 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
  /*
   *  linux/include/linux/nmi.h
   */
  #ifndef LINUX_NMI_H
  #define LINUX_NMI_H
  
  #include <linux/sched.h>
  #include <asm/irq.h>
  
  /**
   * touch_nmi_watchdog - restart NMI watchdog timeout.
   * 
   * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
   * may be used to reset the timeout - for code which intentionally
   * disables interrupts for a long time. This call is stateless.
   */
  #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
  #include <asm/nmi.h>
  extern void touch_nmi_watchdog(void);
  #else
  static inline void touch_nmi_watchdog(void)
  {
  	touch_softlockup_watchdog();
  }
  #endif
  
  #if defined(CONFIG_HARDLOCKUP_DETECTOR)
  extern void hardlockup_detector_disable(void);
  #else
  static inline void hardlockup_detector_disable(void) {}
  #endif
  
  /*
   * Create trigger_all_cpu_backtrace() out of the arch-provided
   * base function. Return whether such support was available,
   * to allow calling code to fall back to some other mechanism:
   */
  #ifdef arch_trigger_all_cpu_backtrace
  static inline bool trigger_all_cpu_backtrace(void)
  {
  	arch_trigger_all_cpu_backtrace(true);
  
  	return true;
  }
  static inline bool trigger_allbutself_cpu_backtrace(void)
  {
  	arch_trigger_all_cpu_backtrace(false);
  	return true;
  }
  
  /* generic implementation */
  void nmi_trigger_all_cpu_backtrace(bool include_self,
  				   void (*raise)(cpumask_t *mask));
  bool nmi_cpu_backtrace(struct pt_regs *regs);
  
  #else
  static inline bool trigger_all_cpu_backtrace(void)
  {
  	return false;
  }
  static inline bool trigger_allbutself_cpu_backtrace(void)
  {
  	return false;
  }
  #endif
  
  #ifdef CONFIG_LOCKUP_DETECTOR
  int hw_nmi_is_cpu_stuck(struct pt_regs *);
  u64 hw_nmi_get_sample_period(int watchdog_thresh);
  extern int nmi_watchdog_enabled;
  extern int soft_watchdog_enabled;
  extern int watchdog_user_enabled;
  extern int watchdog_thresh;
  extern unsigned long *watchdog_cpumask_bits;
  extern int sysctl_softlockup_all_cpu_backtrace;
  extern int sysctl_hardlockup_all_cpu_backtrace;
  struct ctl_table;
  extern int proc_watchdog(struct ctl_table *, int ,
  			 void __user *, size_t *, loff_t *);
  extern int proc_nmi_watchdog(struct ctl_table *, int ,
  			     void __user *, size_t *, loff_t *);
  extern int proc_soft_watchdog(struct ctl_table *, int ,
  			      void __user *, size_t *, loff_t *);
  extern int proc_watchdog_thresh(struct ctl_table *, int ,
  				void __user *, size_t *, loff_t *);
  extern int proc_watchdog_cpumask(struct ctl_table *, int,
  				 void __user *, size_t *, loff_t *);
  extern int lockup_detector_suspend(void);
  extern void lockup_detector_resume(void);
  #else
  static inline int lockup_detector_suspend(void)
  {
  	return 0;
  }
  
  static inline void lockup_detector_resume(void)
  {
  }
  #endif
  
  #ifdef CONFIG_HAVE_ACPI_APEI_NMI
  #include <asm/nmi.h>
  #endif
  
  #endif