Blame view

kernel/linux-rt-4.4.41/arch/powerpc/lib/memcpy_64.S 4.02 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
  /*
   * Copyright (C) 2002 Paul Mackerras, IBM Corp.
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License
   * as published by the Free Software Foundation; either version
   * 2 of the License, or (at your option) any later version.
   */
  #include <asm/processor.h>
  #include <asm/ppc_asm.h>
  
  	.align	7
  _GLOBAL_TOC(memcpy)
  BEGIN_FTR_SECTION
  #ifdef __LITTLE_ENDIAN__
  	cmpdi	cr7,r5,0
  #else
  	std	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* save destination pointer for return value */
  #endif
  FTR_SECTION_ELSE
  #ifndef SELFTEST
  	b	memcpy_power7
  #endif
  ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
  #ifdef __LITTLE_ENDIAN__
  	/* dumb little-endian memcpy that will get replaced at runtime */
  	addi r9,r3,-1
  	addi r4,r4,-1
  	beqlr cr7
  	mtctr r5
  1:	lbzu r10,1(r4)
  	stbu r10,1(r9)
  	bdnz 1b
  	blr
  #else
  	PPC_MTOCRF(0x01,r5)
  	cmpldi	cr1,r5,16
  	neg	r6,r3		# LS 3 bits = # bytes to 8-byte dest bdry
  	andi.	r6,r6,7
  	dcbt	0,r4
  	blt	cr1,.Lshort_copy
  /* Below we want to nop out the bne if we're on a CPU that has the
     CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
     cleared.
     At the time of writing the only CPU that has this combination of bits
     set is Power6. */
  BEGIN_FTR_SECTION
  	nop
  FTR_SECTION_ELSE
  	bne	.Ldst_unaligned
  ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
                      CPU_FTR_UNALIGNED_LD_STD)
  .Ldst_aligned:
  	addi	r3,r3,-16
  BEGIN_FTR_SECTION
  	andi.	r0,r4,7
  	bne	.Lsrc_unaligned
  END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
  	srdi	r7,r5,4
  	ld	r9,0(r4)
  	addi	r4,r4,-8
  	mtctr	r7
  	andi.	r5,r5,7
  	bf	cr7*4+0,2f
  	addi	r3,r3,8
  	addi	r4,r4,8
  	mr	r8,r9
  	blt	cr1,3f
  1:	ld	r9,8(r4)
  	std	r8,8(r3)
  2:	ldu	r8,16(r4)
  	stdu	r9,16(r3)
  	bdnz	1b
  3:	std	r8,8(r3)
  	beq	3f
  	addi	r3,r3,16
  .Ldo_tail:
  	bf	cr7*4+1,1f
  	lwz	r9,8(r4)
  	addi	r4,r4,4
  	stw	r9,0(r3)
  	addi	r3,r3,4
  1:	bf	cr7*4+2,2f
  	lhz	r9,8(r4)
  	addi	r4,r4,2
  	sth	r9,0(r3)
  	addi	r3,r3,2
  2:	bf	cr7*4+3,3f
  	lbz	r9,8(r4)
  	stb	r9,0(r3)
  3:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* return dest pointer */
  	blr
  
  .Lsrc_unaligned:
  	srdi	r6,r5,3
  	addi	r5,r5,-16
  	subf	r4,r0,r4
  	srdi	r7,r5,4
  	sldi	r10,r0,3
  	cmpdi	cr6,r6,3
  	andi.	r5,r5,7
  	mtctr	r7
  	subfic	r11,r10,64
  	add	r5,r5,r0
  
  	bt	cr7*4+0,0f
  
  	ld	r9,0(r4)	# 3+2n loads, 2+2n stores
  	ld	r0,8(r4)
  	sld	r6,r9,r10
  	ldu	r9,16(r4)
  	srd	r7,r0,r11
  	sld	r8,r0,r10
  	or	r7,r7,r6
  	blt	cr6,4f
  	ld	r0,8(r4)
  	# s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12
  	b	2f
  
  0:	ld	r0,0(r4)	# 4+2n loads, 3+2n stores
  	ldu	r9,8(r4)
  	sld	r8,r0,r10
  	addi	r3,r3,-8
  	blt	cr6,5f
  	ld	r0,8(r4)
  	srd	r12,r9,r11
  	sld	r6,r9,r10
  	ldu	r9,16(r4)
  	or	r12,r8,r12
  	srd	r7,r0,r11
  	sld	r8,r0,r10
  	addi	r3,r3,16
  	beq	cr6,3f
  
  	# d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9
  1:	or	r7,r7,r6
  	ld	r0,8(r4)
  	std	r12,8(r3)
  2:	srd	r12,r9,r11
  	sld	r6,r9,r10
  	ldu	r9,16(r4)
  	or	r12,r8,r12
  	stdu	r7,16(r3)
  	srd	r7,r0,r11
  	sld	r8,r0,r10
  	bdnz	1b
  
  3:	std	r12,8(r3)
  	or	r7,r7,r6
  4:	std	r7,16(r3)
  5:	srd	r12,r9,r11
  	or	r12,r8,r12
  	std	r12,24(r3)
  	beq	4f
  	cmpwi	cr1,r5,8
  	addi	r3,r3,32
  	sld	r9,r9,r10
  	ble	cr1,6f
  	ld	r0,8(r4)
  	srd	r7,r0,r11
  	or	r9,r7,r9
  6:
  	bf	cr7*4+1,1f
  	rotldi	r9,r9,32
  	stw	r9,0(r3)
  	addi	r3,r3,4
  1:	bf	cr7*4+2,2f
  	rotldi	r9,r9,16
  	sth	r9,0(r3)
  	addi	r3,r3,2
  2:	bf	cr7*4+3,3f
  	rotldi	r9,r9,8
  	stb	r9,0(r3)
  3:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* return dest pointer */
  	blr
  
  .Ldst_unaligned:
  	PPC_MTOCRF(0x01,r6)		# put #bytes to 8B bdry into cr7
  	subf	r5,r6,r5
  	li	r7,0
  	cmpldi	cr1,r5,16
  	bf	cr7*4+3,1f
  	lbz	r0,0(r4)
  	stb	r0,0(r3)
  	addi	r7,r7,1
  1:	bf	cr7*4+2,2f
  	lhzx	r0,r7,r4
  	sthx	r0,r7,r3
  	addi	r7,r7,2
  2:	bf	cr7*4+1,3f
  	lwzx	r0,r7,r4
  	stwx	r0,r7,r3
  3:	PPC_MTOCRF(0x01,r5)
  	add	r4,r6,r4
  	add	r3,r6,r3
  	b	.Ldst_aligned
  
  .Lshort_copy:
  	bf	cr7*4+0,1f
  	lwz	r0,0(r4)
  	lwz	r9,4(r4)
  	addi	r4,r4,8
  	stw	r0,0(r3)
  	stw	r9,4(r3)
  	addi	r3,r3,8
  1:	bf	cr7*4+1,2f
  	lwz	r0,0(r4)
  	addi	r4,r4,4
  	stw	r0,0(r3)
  	addi	r3,r3,4
  2:	bf	cr7*4+2,3f
  	lhz	r0,0(r4)
  	addi	r4,r4,2
  	sth	r0,0(r3)
  	addi	r3,r3,2
  3:	bf	cr7*4+3,4f
  	lbz	r0,0(r4)
  	stb	r0,0(r3)
  4:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* return dest pointer */
  	blr
  #endif