Blame view

kernel/linux-rt-4.4.41/arch/sparc/include/asm/switch_to_64.h 2.51 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
  #ifndef __SPARC64_SWITCH_TO_64_H
  #define __SPARC64_SWITCH_TO_64_H
  
  #include <asm/visasm.h>
  
  #define prepare_arch_switch(next)		\
  do {						\
  	flushw_all();				\
  } while (0)
  
  	/* See what happens when you design the chip correctly?
  	 *
  	 * We tell gcc we clobber all non-fixed-usage registers except
  	 * for l0/l1.  It will use one for 'next' and the other to hold
  	 * the output value of 'last'.  'next' is not referenced again
  	 * past the invocation of switch_to in the scheduler, so we need
  	 * not preserve it's value.  Hairy, but it lets us remove 2 loads
  	 * and 2 stores in this critical code path.  -DaveM
  	 */
  #define switch_to(prev, next, last)					\
  do {	save_and_clear_fpu();						\
  	/* If you are tempted to conditionalize the following */	\
  	/* so that ASI is only written if it changes, think again. */	\
  	__asm__ __volatile__("wr %%g0, %0, %%asi"			\
  	: : "r" (task_thread_info(next)->current_ds));\
  	trap_block[current_thread_info()->cpu].thread =			\
  		task_thread_info(next);					\
  	__asm__ __volatile__(						\
  	"mov	%%g4, %%g7
  \t"						\
  	"stx	%%i6, [%%sp + 2047 + 0x70]
  \t"				\
  	"stx	%%i7, [%%sp + 2047 + 0x78]
  \t"				\
  	"rdpr	%%wstate, %%o5
  \t"					\
  	"stx	%%o6, [%%g6 + %6]
  \t"					\
  	"stb	%%o5, [%%g6 + %5]
  \t"					\
  	"rdpr	%%cwp, %%o5
  \t"					\
  	"stb	%%o5, [%%g6 + %8]
  \t"					\
  	"wrpr	%%g0, 15, %%pil
  \t"					\
  	"mov	%4, %%g6
  \t"						\
  	"ldub	[%4 + %8], %%g1
  \t"					\
  	"wrpr	%%g1, %%cwp
  \t"					\
  	"ldx	[%%g6 + %6], %%o6
  \t"					\
  	"ldub	[%%g6 + %5], %%o5
  \t"					\
  	"ldub	[%%g6 + %7], %%o7
  \t"					\
  	"wrpr	%%o5, 0x0, %%wstate
  \t"				\
  	"ldx	[%%sp + 2047 + 0x70], %%i6
  \t"				\
  	"ldx	[%%sp + 2047 + 0x78], %%i7
  \t"				\
  	"ldx	[%%g6 + %9], %%g4
  \t"					\
  	"wrpr	%%g0, 14, %%pil
  \t"					\
  	"brz,pt %%o7, switch_to_pc
  \t"					\
  	" mov	%%g7, %0
  \t"						\
  	"sethi	%%hi(ret_from_fork), %%g1
  \t"				\
  	"jmpl	%%g1 + %%lo(ret_from_fork), %%g0
  \t"			\
  	" nop
  \t"							\
  	".globl switch_to_pc
  \t"					\
  	"switch_to_pc:
  \t"						\
  	: "=&r" (last), "=r" (current), "=r" (current_thread_info_reg),	\
  	  "=r" (__local_per_cpu_offset)					\
  	: "0" (task_thread_info(next)),					\
  	  "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD),            \
  	  "i" (TI_CWP), "i" (TI_TASK)					\
  	: "cc",								\
  	        "g1", "g2", "g3",                   "g7",		\
  	        "l1", "l2", "l3", "l4", "l5", "l6", "l7",		\
  	  "i0", "i1", "i2", "i3", "i4", "i5",				\
  	  "o0", "o1", "o2", "o3", "o4", "o5",       "o7");		\
  } while(0)
  
  void synchronize_user_stack(void);
  void fault_in_user_windows(void);
  
  #endif /* __SPARC64_SWITCH_TO_64_H */