Blame view

kernel/linux-imx6_3.14.28/arch/arm/mach-ep93xx/crunch-bits.S 8.32 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
  /*
   * arch/arm/kernel/crunch-bits.S
   * Cirrus MaverickCrunch context switching and handling
   *
   * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
   *
   * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is
   * Copyright (c) 2003-2004, MontaVista Software, Inc.
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   */
  
  #include <linux/linkage.h>
  #include <asm/ptrace.h>
  #include <asm/thread_info.h>
  #include <asm/asm-offsets.h>
  #include <mach/ep93xx-regs.h>
  
  /*
   * We can't use hex constants here due to a bug in gas.
   */
  #define CRUNCH_MVDX0		0
  #define CRUNCH_MVDX1		8
  #define CRUNCH_MVDX2		16
  #define CRUNCH_MVDX3		24
  #define CRUNCH_MVDX4		32
  #define CRUNCH_MVDX5		40
  #define CRUNCH_MVDX6		48
  #define CRUNCH_MVDX7		56
  #define CRUNCH_MVDX8		64
  #define CRUNCH_MVDX9		72
  #define CRUNCH_MVDX10		80
  #define CRUNCH_MVDX11		88
  #define CRUNCH_MVDX12		96
  #define CRUNCH_MVDX13		104
  #define CRUNCH_MVDX14		112
  #define CRUNCH_MVDX15		120
  #define CRUNCH_MVAX0L		128
  #define CRUNCH_MVAX0M		132
  #define CRUNCH_MVAX0H		136
  #define CRUNCH_MVAX1L		140
  #define CRUNCH_MVAX1M		144
  #define CRUNCH_MVAX1H		148
  #define CRUNCH_MVAX2L		152
  #define CRUNCH_MVAX2M		156
  #define CRUNCH_MVAX2H		160
  #define CRUNCH_MVAX3L		164
  #define CRUNCH_MVAX3M		168
  #define CRUNCH_MVAX3H		172
  #define CRUNCH_DSPSC		176
  
  #define CRUNCH_SIZE		184
  
  	.text
  
  /*
   * Lazy switching of crunch coprocessor context
   *
   * r10 = struct thread_info pointer
   * r9  = ret_from_exception
   * lr  = undefined instr exit
   *
   * called from prefetch exception handler with interrupts disabled
   */
  ENTRY(crunch_task_enable)
  	ldr	r8, =(EP93XX_APB_VIRT_BASE + 0x00130000)	@ syscon addr
  
  	ldr	r1, [r8, #0x80]
  	tst	r1, #0x00800000			@ access to crunch enabled?
  	movne	pc, lr				@ if so no business here
  	mov	r3, #0xaa			@ unlock syscon swlock
  	str	r3, [r8, #0xc0]
  	orr	r1, r1, #0x00800000		@ enable access to crunch
  	str	r1, [r8, #0x80]
  
  	ldr	r3, =crunch_owner
  	add	r0, r10, #TI_CRUNCH_STATE	@ get task crunch save area
  	ldr	r2, [sp, #60]			@ current task pc value
  	ldr	r1, [r3]			@ get current crunch owner
  	str	r0, [r3]			@ this task now owns crunch
  	sub	r2, r2, #4			@ adjust pc back
  	str	r2, [sp, #60]
  
  	ldr	r2, [r8, #0x80]
  	mov	r2, r2				@ flush out enable (@@@)
  
  	teq	r1, #0				@ test for last ownership
  	mov	lr, r9				@ normal exit from exception
  	beq	crunch_load			@ no owner, skip save
  
  crunch_save:
  	cfstr64		mvdx0, [r1, #CRUNCH_MVDX0]	@ save 64b registers
  	cfstr64		mvdx1, [r1, #CRUNCH_MVDX1]
  	cfstr64		mvdx2, [r1, #CRUNCH_MVDX2]
  	cfstr64		mvdx3, [r1, #CRUNCH_MVDX3]
  	cfstr64		mvdx4, [r1, #CRUNCH_MVDX4]
  	cfstr64		mvdx5, [r1, #CRUNCH_MVDX5]
  	cfstr64		mvdx6, [r1, #CRUNCH_MVDX6]
  	cfstr64		mvdx7, [r1, #CRUNCH_MVDX7]
  	cfstr64		mvdx8, [r1, #CRUNCH_MVDX8]
  	cfstr64		mvdx9, [r1, #CRUNCH_MVDX9]
  	cfstr64		mvdx10, [r1, #CRUNCH_MVDX10]
  	cfstr64		mvdx11, [r1, #CRUNCH_MVDX11]
  	cfstr64		mvdx12, [r1, #CRUNCH_MVDX12]
  	cfstr64		mvdx13, [r1, #CRUNCH_MVDX13]
  	cfstr64		mvdx14, [r1, #CRUNCH_MVDX14]
  	cfstr64		mvdx15, [r1, #CRUNCH_MVDX15]
  
  #ifdef __ARMEB__
  #error fix me for ARMEB
  #endif
  
  	cfmv32al	mvfx0, mvax0			@ save 72b accumulators
  	cfstr32		mvfx0, [r1, #CRUNCH_MVAX0L]
  	cfmv32am	mvfx0, mvax0
  	cfstr32		mvfx0, [r1, #CRUNCH_MVAX0M]
  	cfmv32ah	mvfx0, mvax0
  	cfstr32		mvfx0, [r1, #CRUNCH_MVAX0H]
  	cfmv32al	mvfx0, mvax1
  	cfstr32		mvfx0, [r1, #CRUNCH_MVAX1L]
  	cfmv32am	mvfx0, mvax1
  	cfstr32		mvfx0, [r1, #CRUNCH_MVAX1M]
  	cfmv32ah	mvfx0, mvax1
  	cfstr32		mvfx0, [r1, #CRUNCH_MVAX1H]
  	cfmv32al	mvfx0, mvax2
  	cfstr32		mvfx0, [r1, #CRUNCH_MVAX2L]
  	cfmv32am	mvfx0, mvax2
  	cfstr32		mvfx0, [r1, #CRUNCH_MVAX2M]
  	cfmv32ah	mvfx0, mvax2
  	cfstr32		mvfx0, [r1, #CRUNCH_MVAX2H]
  	cfmv32al	mvfx0, mvax3
  	cfstr32		mvfx0, [r1, #CRUNCH_MVAX3L]
  	cfmv32am	mvfx0, mvax3
  	cfstr32		mvfx0, [r1, #CRUNCH_MVAX3M]
  	cfmv32ah	mvfx0, mvax3
  	cfstr32		mvfx0, [r1, #CRUNCH_MVAX3H]
  
  	cfmv32sc	mvdx0, dspsc			@ save status word
  	cfstr64		mvdx0, [r1, #CRUNCH_DSPSC]
  
  	teq		r0, #0				@ anything to load?
  	cfldr64eq	mvdx0, [r1, #CRUNCH_MVDX0]	@ mvdx0 was clobbered
  	moveq		pc, lr
  
  crunch_load:
  	cfldr64		mvdx0, [r0, #CRUNCH_DSPSC]	@ load status word
  	cfmvsc32	dspsc, mvdx0
  
  	cfldr32		mvfx0, [r0, #CRUNCH_MVAX0L]	@ load 72b accumulators
  	cfmval32	mvax0, mvfx0
  	cfldr32		mvfx0, [r0, #CRUNCH_MVAX0M]
  	cfmvam32	mvax0, mvfx0
  	cfldr32		mvfx0, [r0, #CRUNCH_MVAX0H]
  	cfmvah32	mvax0, mvfx0
  	cfldr32		mvfx0, [r0, #CRUNCH_MVAX1L]
  	cfmval32	mvax1, mvfx0
  	cfldr32		mvfx0, [r0, #CRUNCH_MVAX1M]
  	cfmvam32	mvax1, mvfx0
  	cfldr32		mvfx0, [r0, #CRUNCH_MVAX1H]
  	cfmvah32	mvax1, mvfx0
  	cfldr32		mvfx0, [r0, #CRUNCH_MVAX2L]
  	cfmval32	mvax2, mvfx0
  	cfldr32		mvfx0, [r0, #CRUNCH_MVAX2M]
  	cfmvam32	mvax2, mvfx0
  	cfldr32		mvfx0, [r0, #CRUNCH_MVAX2H]
  	cfmvah32	mvax2, mvfx0
  	cfldr32		mvfx0, [r0, #CRUNCH_MVAX3L]
  	cfmval32	mvax3, mvfx0
  	cfldr32		mvfx0, [r0, #CRUNCH_MVAX3M]
  	cfmvam32	mvax3, mvfx0
  	cfldr32		mvfx0, [r0, #CRUNCH_MVAX3H]
  	cfmvah32	mvax3, mvfx0
  
  	cfldr64		mvdx0, [r0, #CRUNCH_MVDX0]	@ load 64b registers
  	cfldr64		mvdx1, [r0, #CRUNCH_MVDX1]
  	cfldr64		mvdx2, [r0, #CRUNCH_MVDX2]
  	cfldr64		mvdx3, [r0, #CRUNCH_MVDX3]
  	cfldr64		mvdx4, [r0, #CRUNCH_MVDX4]
  	cfldr64		mvdx5, [r0, #CRUNCH_MVDX5]
  	cfldr64		mvdx6, [r0, #CRUNCH_MVDX6]
  	cfldr64		mvdx7, [r0, #CRUNCH_MVDX7]
  	cfldr64		mvdx8, [r0, #CRUNCH_MVDX8]
  	cfldr64		mvdx9, [r0, #CRUNCH_MVDX9]
  	cfldr64		mvdx10, [r0, #CRUNCH_MVDX10]
  	cfldr64		mvdx11, [r0, #CRUNCH_MVDX11]
  	cfldr64		mvdx12, [r0, #CRUNCH_MVDX12]
  	cfldr64		mvdx13, [r0, #CRUNCH_MVDX13]
  	cfldr64		mvdx14, [r0, #CRUNCH_MVDX14]
  	cfldr64		mvdx15, [r0, #CRUNCH_MVDX15]
  
  	mov	pc, lr
  
  /*
   * Back up crunch regs to save area and disable access to them
   * (mainly for gdb or sleep mode usage)
   *
   * r0 = struct thread_info pointer of target task or NULL for any
   */
  ENTRY(crunch_task_disable)
  	stmfd	sp!, {r4, r5, lr}
  
  	mrs	ip, cpsr
  	orr	r2, ip, #PSR_I_BIT		@ disable interrupts
  	msr	cpsr_c, r2
  
  	ldr	r4, =(EP93XX_APB_VIRT_BASE + 0x00130000)	@ syscon addr
  
  	ldr	r3, =crunch_owner
  	add	r2, r0, #TI_CRUNCH_STATE	@ get task crunch save area
  	ldr	r1, [r3]			@ get current crunch owner
  	teq	r1, #0				@ any current owner?
  	beq	1f				@ no: quit
  	teq	r0, #0				@ any owner?
  	teqne	r1, r2				@ or specified one?
  	bne	1f				@ no: quit
  
  	ldr	r5, [r4, #0x80]			@ enable access to crunch
  	mov	r2, #0xaa
  	str	r2, [r4, #0xc0]
  	orr	r5, r5, #0x00800000
  	str	r5, [r4, #0x80]
  
  	mov	r0, #0				@ nothing to load
  	str	r0, [r3]			@ no more current owner
  	ldr	r2, [r4, #0x80]			@ flush out enable (@@@)
  	mov	r2, r2
  	bl	crunch_save
  
  	mov	r2, #0xaa			@ disable access to crunch
  	str	r2, [r4, #0xc0]
  	bic	r5, r5, #0x00800000
  	str	r5, [r4, #0x80]
  	ldr	r5, [r4, #0x80]			@ flush out enable (@@@)
  	mov	r5, r5
  
  1:	msr	cpsr_c, ip			@ restore interrupt mode
  	ldmfd	sp!, {r4, r5, pc}
  
  /*
   * Copy crunch state to given memory address
   *
   * r0 = struct thread_info pointer of target task
   * r1 = memory address where to store crunch state
   *
   * this is called mainly in the creation of signal stack frames
   */
  ENTRY(crunch_task_copy)
  	mrs	ip, cpsr
  	orr	r2, ip, #PSR_I_BIT		@ disable interrupts
  	msr	cpsr_c, r2
  
  	ldr	r3, =crunch_owner
  	add	r2, r0, #TI_CRUNCH_STATE	@ get task crunch save area
  	ldr	r3, [r3]			@ get current crunch owner
  	teq	r2, r3				@ does this task own it...
  	beq	1f
  
  	@ current crunch values are in the task save area
  	msr	cpsr_c, ip			@ restore interrupt mode
  	mov	r0, r1
  	mov	r1, r2
  	mov	r2, #CRUNCH_SIZE
  	b	memcpy
  
  1:	@ this task owns crunch regs -- grab a copy from there
  	mov	r0, #0				@ nothing to load
  	mov	r3, lr				@ preserve return address
  	bl	crunch_save
  	msr	cpsr_c, ip			@ restore interrupt mode
  	mov	pc, r3
  
  /*
   * Restore crunch state from given memory address
   *
   * r0 = struct thread_info pointer of target task
   * r1 = memory address where to get crunch state from
   *
   * this is used to restore crunch state when unwinding a signal stack frame
   */
  ENTRY(crunch_task_restore)
  	mrs	ip, cpsr
  	orr	r2, ip, #PSR_I_BIT		@ disable interrupts
  	msr	cpsr_c, r2
  
  	ldr	r3, =crunch_owner
  	add	r2, r0, #TI_CRUNCH_STATE	@ get task crunch save area
  	ldr	r3, [r3]			@ get current crunch owner
  	teq	r2, r3				@ does this task own it...
  	beq	1f
  
  	@ this task doesn't own crunch regs -- use its save area
  	msr	cpsr_c, ip			@ restore interrupt mode
  	mov	r0, r2
  	mov	r2, #CRUNCH_SIZE
  	b	memcpy
  
  1:	@ this task owns crunch regs -- load them directly
  	mov	r0, r1
  	mov	r1, #0				@ nothing to save
  	mov	r3, lr				@ preserve return address
  	bl	crunch_load
  	msr	cpsr_c, ip			@ restore interrupt mode
  	mov	pc, r3