Blame view

kernel/linux-imx6_3.14.28/arch/x86/lib/rwsem.S 3.56 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
  /*
   * x86 semaphore implementation.
   *
   * (C) Copyright 1999 Linus Torvalds
   *
   * Portions Copyright 1999 Red Hat, Inc.
   *
   *	This program is free software; you can redistribute it and/or
   *	modify it under the terms of the GNU General Public License
   *	as published by the Free Software Foundation; either version
   *	2 of the License, or (at your option) any later version.
   *
   * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
   */
  
  #include <linux/linkage.h>
  #include <asm/alternative-asm.h>
  #include <asm/dwarf2.h>
  
  #define __ASM_HALF_REG(reg)	__ASM_SEL(reg, e##reg)
  #define __ASM_HALF_SIZE(inst)	__ASM_SEL(inst##w, inst##l)
  
  #ifdef CONFIG_X86_32
  
  /*
   * The semaphore operations have a special calling sequence that
   * allow us to do a simpler in-line version of them. These routines
   * need to convert that sequence back into the C sequence when
   * there is contention on the semaphore.
   *
   * %eax contains the semaphore pointer on entry. Save the C-clobbered
   * registers (%eax, %edx and %ecx) except %eax whish is either a return
   * value or just clobbered..
   */
  
  #define save_common_regs \
  	pushl_cfi %ecx; CFI_REL_OFFSET ecx, 0
  
  #define restore_common_regs \
  	popl_cfi %ecx; CFI_RESTORE ecx
  
  	/* Avoid uglifying the argument copying x86-64 needs to do. */
  	.macro movq src, dst
  	.endm
  
  #else
  
  /*
   * x86-64 rwsem wrappers
   *
   * This interfaces the inline asm code to the slow-path
   * C routines. We need to save the call-clobbered regs
   * that the asm does not mark as clobbered, and move the
   * argument from %rax to %rdi.
   *
   * NOTE! We don't need to save %rax, because the functions
   * will always return the semaphore pointer in %rax (which
   * is also the input argument to these helpers)
   *
   * The following can clobber %rdx because the asm clobbers it:
   *   call_rwsem_down_write_failed
   *   call_rwsem_wake
   * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
   */
  
  #define save_common_regs \
  	pushq_cfi %rdi; CFI_REL_OFFSET rdi, 0; \
  	pushq_cfi %rsi; CFI_REL_OFFSET rsi, 0; \
  	pushq_cfi %rcx; CFI_REL_OFFSET rcx, 0; \
  	pushq_cfi %r8;  CFI_REL_OFFSET r8,  0; \
  	pushq_cfi %r9;  CFI_REL_OFFSET r9,  0; \
  	pushq_cfi %r10; CFI_REL_OFFSET r10, 0; \
  	pushq_cfi %r11; CFI_REL_OFFSET r11, 0
  
  #define restore_common_regs \
  	popq_cfi %r11; CFI_RESTORE r11; \
  	popq_cfi %r10; CFI_RESTORE r10; \
  	popq_cfi %r9;  CFI_RESTORE r9; \
  	popq_cfi %r8;  CFI_RESTORE r8; \
  	popq_cfi %rcx; CFI_RESTORE rcx; \
  	popq_cfi %rsi; CFI_RESTORE rsi; \
  	popq_cfi %rdi; CFI_RESTORE rdi
  
  #endif
  
  /* Fix up special calling conventions */
  ENTRY(call_rwsem_down_read_failed)
  	CFI_STARTPROC
  	save_common_regs
  	__ASM_SIZE(push,_cfi) %__ASM_REG(dx)
  	CFI_REL_OFFSET __ASM_REG(dx), 0
  	movq %rax,%rdi
  	call rwsem_down_read_failed
  	__ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
  	CFI_RESTORE __ASM_REG(dx)
  	restore_common_regs
  	ret
  	CFI_ENDPROC
  ENDPROC(call_rwsem_down_read_failed)
  
  ENTRY(call_rwsem_down_write_failed)
  	CFI_STARTPROC
  	save_common_regs
  	movq %rax,%rdi
  	call rwsem_down_write_failed
  	restore_common_regs
  	ret
  	CFI_ENDPROC
  ENDPROC(call_rwsem_down_write_failed)
  
  ENTRY(call_rwsem_wake)
  	CFI_STARTPROC
  	/* do nothing if still outstanding active readers */
  	__ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
  	jnz 1f
  	save_common_regs
  	movq %rax,%rdi
  	call rwsem_wake
  	restore_common_regs
  1:	ret
  	CFI_ENDPROC
  ENDPROC(call_rwsem_wake)
  
  ENTRY(call_rwsem_downgrade_wake)
  	CFI_STARTPROC
  	save_common_regs
  	__ASM_SIZE(push,_cfi) %__ASM_REG(dx)
  	CFI_REL_OFFSET __ASM_REG(dx), 0
  	movq %rax,%rdi
  	call rwsem_downgrade_wake
  	__ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
  	CFI_RESTORE __ASM_REG(dx)
  	restore_common_regs
  	ret
  	CFI_ENDPROC
  ENDPROC(call_rwsem_downgrade_wake)