Blame view

kernel/linux-rt-4.4.41/arch/x86/include/asm/vgtod.h 1.84 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
  #ifndef _ASM_X86_VGTOD_H
  #define _ASM_X86_VGTOD_H
  
  #include <linux/compiler.h>
  #include <linux/clocksource.h>
  
  #ifdef BUILD_VDSO32_64
  typedef u64 gtod_long_t;
  #else
  typedef unsigned long gtod_long_t;
  #endif
  /*
   * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
   * so be carefull by modifying this structure.
   */
  struct vsyscall_gtod_data {
  	unsigned seq;
  
  	int vclock_mode;
  	cycle_t	cycle_last;
  	cycle_t	mask;
  	u32	mult;
  	u32	shift;
  
  	/* open coded 'struct timespec' */
  	u64		wall_time_snsec;
  	gtod_long_t	wall_time_sec;
  	gtod_long_t	monotonic_time_sec;
  	u64		monotonic_time_snsec;
  	gtod_long_t	wall_time_coarse_sec;
  	gtod_long_t	wall_time_coarse_nsec;
  	gtod_long_t	monotonic_time_coarse_sec;
  	gtod_long_t	monotonic_time_coarse_nsec;
  
  	int		tz_minuteswest;
  	int		tz_dsttime;
  };
  extern struct vsyscall_gtod_data vsyscall_gtod_data;
  
  static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
  {
  	unsigned ret;
  
  repeat:
  	ret = ACCESS_ONCE(s->seq);
  	if (unlikely(ret & 1)) {
  		cpu_relax();
  		goto repeat;
  	}
  	smp_rmb();
  	return ret;
  }
  
  static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
  					unsigned start)
  {
  	smp_rmb();
  	return unlikely(s->seq != start);
  }
  
  static inline void gtod_write_begin(struct vsyscall_gtod_data *s)
  {
  	++s->seq;
  	smp_wmb();
  }
  
  static inline void gtod_write_end(struct vsyscall_gtod_data *s)
  {
  	smp_wmb();
  	++s->seq;
  }
  
  #ifdef CONFIG_X86_64
  
  #define VGETCPU_CPU_MASK 0xfff
  
  static inline unsigned int __getcpu(void)
  {
  	unsigned int p;
  
  	/*
  	 * Load per CPU data from GDT.  LSL is faster than RDTSCP and
  	 * works on all CPUs.  This is volatile so that it orders
  	 * correctly wrt barrier() and to keep gcc from cleverly
  	 * hoisting it out of the calling function.
  	 */
  	asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
  
  	return p;
  }
  
  #endif /* CONFIG_X86_64 */
  
  #endif /* _ASM_X86_VGTOD_H */