Blame view

kernel/linux-rt-4.4.41/arch/arm/mach-omap2/sleep33xx.S 5.6 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
  /*
   * Low level suspend code for AM33XX SoCs
   *
   * Copyright (C) 2012-2015 Texas Instruments Incorporated - http://www.ti.com/
   *	Vaibhav Bedia, Dave Gerlach
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License as
   * published by the Free Software Foundation version 2.
   *
   * This program is distributed "as is" WITHOUT ANY WARRANTY of any
   * kind, whether express or implied; without even the implied warranty
   * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   */
  
  #include <linux/linkage.h>
  #include <asm/memory.h>
  #include <asm/assembler.h>
  #include <linux/platform_data/pm33xx.h>
  
  #include "iomap.h"
  #include "cm33xx.h"
  
  /* replicated define because linux/bitops.h cannot be included in assembly */
  #define BIT(nr)			(1 << (nr))
  
  #define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE			0x0003
  #define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE			0x0002
  
  	.text
  	.align 3
  
  ENTRY(am33xx_do_wfi)
  	stmfd	sp!, {r4 - r11, lr}	@ save registers on stack
  
  	/* Save wfi_flags argument and move to a preserved register */
  	str	r0, wfi_flags
  	mov	r4, r0
  
  	/* Only flush cache is we know we are losing MPU context */
  	tst	r4, #WFI_FLAG_FLUSH_CACHE
  	beq	cache_skip_flush
  
  	/*
  	 * Flush all data from the L1 and L2 data cache before disabling
  	 * SCTLR.C bit.
  	 */
  	ldr	r1, kernel_flush
  	blx	r1
  
  	/*
  	 * Clear the SCTLR.C bit to prevent further data cache
  	 * allocation. Clearing SCTLR.C would make all the data accesses
  	 * strongly ordered and would not hit the cache.
  	 */
  	mrc	p15, 0, r0, c1, c0, 0
  	bic	r0, r0, #(1 << 2)	@ Disable the C bit
  	mcr	p15, 0, r0, c1, c0, 0
  	isb
  
  	/*
  	 * Invalidate L1 and L2 data cache.
  	 */
  	ldr	r1, kernel_flush
  	blx	r1
  
  	/* v7_flush_dcache_all doesn't preserve registers so reload wfi_flags*/
  	ldr	r4, wfi_flags
  
  cache_skip_flush:
  	/* Only necessary if PER is losing context */
  	tst	r4, #WFI_FLAG_SAVE_EMIF
  	beq	emif_skip_save
  
  	ldr	r1, ti_emif_save_context
  	blx	r1
  
  emif_skip_save:
  	/* Check if we want self refresh, if so enter SR and disable EMIF */
  	tst	r4, #WFI_FLAG_SELF_REFRESH
  	beq	emif_skip_enter_sr
  
  	ldr	r1, ti_emif_enter_sr
  	blx	r1
  
  	/* Disable EMIF */
  	ldr     r1, virt_emif_clkctrl
  	ldr     r2, [r1]
  	bic     r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
  	str     r2, [r1]
  
  	ldr	r1, virt_emif_clkctrl
  wait_emif_disable:
  	ldr	r2, [r1]
  	ldr	r3, module_disabled_val
  	cmp	r2, r3
  	bne	wait_emif_disable
  
  emif_skip_enter_sr:
  	tst	r4, #WFI_FLAG_WAKE_M3
  	beq	wkup_m3_skip
  
  	/*
  	 * For the MPU WFI to be registered as an interrupt
  	 * to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set
  	 * to DISABLED
  	 */
  	ldr	r1, virt_mpu_clkctrl
  	ldr	r2, [r1]
  	bic	r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
  	str	r2, [r1]
  
  wkup_m3_skip:
  	/*
  	 * Execute an ISB instruction to ensure that all of the
  	 * CP15 register changes have been committed.
  	 */
  	isb
  
  	/*
  	 * Execute a barrier instruction to ensure that all cache,
  	 * TLB and branch predictor maintenance operations issued
  	 * have completed.
  	 */
  	dsb
  	dmb
  
  	/*
  	 * Execute a WFI instruction and wait until the
  	 * STANDBYWFI output is asserted to indicate that the
  	 * CPU is in idle and low power state. CPU can specualatively
  	 * prefetch the instructions so add NOPs after WFI. Thirteen
  	 * NOPs as per Cortex-A8 pipeline.
  	 */
  	wfi
  
  	nop
  	nop
  	nop
  	nop
  	nop
  	nop
  	nop
  	nop
  	nop
  	nop
  	nop
  	nop
  	nop
  
  	/* We come here in case of an abort due to a late interrupt */
  
  	/* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */
  	ldr	r1, virt_mpu_clkctrl
  	mov	r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
  	str	r2, [r1]
  
  	/* Only necessary if PER is losing context */
  	tst	r4, #WFI_FLAG_SELF_REFRESH
  	beq	emif_skip_exit_sr_abt
  
  	/* Re-enable EMIF */
  	ldr	r1, virt_emif_clkctrl
  	mov	r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
  	str	r2, [r1]
  wait_emif_enable:
  	ldr	r3, [r1]
  	cmp	r2, r3
  	bne	wait_emif_enable
  
  	ldr	r1, ti_emif_abort_sr
  	blx	r1
  
  emif_skip_exit_sr_abt:
  	tst	r4, #WFI_FLAG_FLUSH_CACHE
  	beq	cache_skip_restore
  
  	/*
  	 * Set SCTLR.C bit to allow data cache allocation
  	 */
  	mrc	p15, 0, r0, c1, c0, 0
  	orr	r0, r0, #(1 << 2)	@ Enable the C bit
  	mcr	p15, 0, r0, c1, c0, 0
  	isb
  
  cache_skip_restore:
  	/* Let the suspend code know about the abort */
  	mov	r0, #1
  	ldmfd	sp!, {r4 - r11, pc}	@ restore regs and return
  ENDPROC(am33xx_do_wfi)
  
  	.align
  ENTRY(am33xx_resume_offset)
  	.word . - am33xx_do_wfi
  
  ENTRY(am33xx_resume_from_deep_sleep)
  	/* Re-enable EMIF */
  	ldr	r0, phys_emif_clkctrl
  	mov	r1, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
  	str	r1, [r0]
  wait_emif_enable1:
  	ldr	r2, [r0]
  	cmp	r1, r2
  	bne	wait_emif_enable1
  
  	adr	sp, temp_stack
  
  	ldr	r1, ti_emif_restore_context
  	blx	r1
  
  	ldr	r1, ti_emif_exit_sr
  	blx	r1
  
  resume_to_ddr:
  	/* We are back. Branch to the common CPU resume routine */
  	mov	r0, #0
  	ldr	pc, resume_addr
  ENDPROC(am33xx_resume_from_deep_sleep)
  
  /*
   * Local variables
   */
  	.align
  kernel_flush:
  	.word   v7_flush_dcache_all
  virt_mpu_clkctrl:
  	.word	AM33XX_CM_MPU_MPU_CLKCTRL
  virt_emif_clkctrl:
  	.word	AM33XX_CM_PER_EMIF_CLKCTRL
  phys_emif_clkctrl:
  	.word	(AM33XX_CM_BASE + AM33XX_CM_PER_MOD + \
  		AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET)
  module_disabled_val:
  	.word	0x30000
  wfi_flags:
  	.word	0x00000000
  
  /* DDR related defines */
  am33xx_emif_sram_table:
  ti_emif_save_context:
  	.word	0x00000000
  ti_emif_restore_context:
  	.word	0x00000000
  ti_emif_enter_sr:
  	.word	0x00000000
  ti_emif_exit_sr:
  	.word	0x00000000
  ti_emif_abort_sr:
  	.word	0x00000000
  	.align 3
  	.space 64
  temp_stack:
  
  ENTRY(am33xx_pm_sram)
  .word am33xx_do_wfi
  .word am33xx_do_wfi_sz
  .word am33xx_resume_offset
  .word am33xx_emif_sram_table
  rtc_base_virt:
  .word 0xdeadbeef
  resume_addr:
  .word cpu_resume - PAGE_OFFSET + 0x80000000
  
  ENTRY(am33xx_do_wfi_sz)
  	.word	. - am33xx_do_wfi