Blame view

kernel/linux-imx6_3.14.28/arch/arm/common/vlock.S 2.6 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
  /*
   * vlock.S - simple voting lock implementation for ARM
   *
   * Created by:	Dave Martin, 2012-08-16
   * Copyright:	(C) 2012-2013  Linaro Limited
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   *
   * This algorithm is described in more detail in
   * Documentation/arm/vlocks.txt.
   */
  
  #include <linux/linkage.h>
  #include "vlock.h"
  
  /* Select different code if voting flags  can fit in a single word. */
  #if VLOCK_VOTING_SIZE > 4
  #define FEW(x...)
  #define MANY(x...) x
  #else
  #define FEW(x...) x
  #define MANY(x...)
  #endif
  
  @ voting lock for first-man coordination
  
  .macro voting_begin rbase:req, rcpu:req, rscratch:req
  	mov	\rscratch, #1
  	strb	\rscratch, [\rbase, \rcpu]
  	dmb
  .endm
  
  .macro voting_end rbase:req, rcpu:req, rscratch:req
  	dmb
  	mov	\rscratch, #0
  	strb	\rscratch, [\rbase, \rcpu]
  	dsb	st
  	sev
  .endm
  
  /*
   * The vlock structure must reside in Strongly-Ordered or Device memory.
   * This implementation deliberately eliminates most of the barriers which
   * would be required for other memory types, and assumes that independent
   * writes to neighbouring locations within a cacheline do not interfere
   * with one another.
   */
  
  @ r0: lock structure base
  @ r1: CPU ID (0-based index within cluster)
  ENTRY(vlock_trylock)
  	add	r1, r1, #VLOCK_VOTING_OFFSET
  
  	voting_begin	r0, r1, r2
  
  	ldrb	r2, [r0, #VLOCK_OWNER_OFFSET]	@ check whether lock is held
  	cmp	r2, #VLOCK_OWNER_NONE
  	bne	trylock_fail			@ fail if so
  
  	@ Control dependency implies strb not observable before previous ldrb.
  
  	strb	r1, [r0, #VLOCK_OWNER_OFFSET]	@ submit my vote
  
  	voting_end	r0, r1, r2		@ implies DMB
  
  	@ Wait for the current round of voting to finish:
  
   MANY(	mov	r3, #VLOCK_VOTING_OFFSET			)
  0:
   MANY(	ldr	r2, [r0, r3]					)
   FEW(	ldr	r2, [r0, #VLOCK_VOTING_OFFSET]			)
  	cmp	r2, #0
  	wfene
  	bne	0b
   MANY(	add	r3, r3, #4					)
   MANY(	cmp	r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE	)
   MANY(	bne	0b						)
  
  	@ Check who won:
  
  	dmb
  	ldrb	r2, [r0, #VLOCK_OWNER_OFFSET]
  	eor	r0, r1, r2			@ zero if I won, else nonzero
  	bx	lr
  
  trylock_fail:
  	voting_end	r0, r1, r2
  	mov	r0, #1				@ nonzero indicates that I lost
  	bx	lr
  ENDPROC(vlock_trylock)
  
  @ r0: lock structure base
  ENTRY(vlock_unlock)
  	dmb
  	mov	r1, #VLOCK_OWNER_NONE
  	strb	r1, [r0, #VLOCK_OWNER_OFFSET]
  	dsb	st
  	sev
  	bx	lr
  ENDPROC(vlock_unlock)