Blame view

kernel/linux-imx6_3.14.28/arch/arm/mm/copypage-xsc3.c 2.81 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
  /*
   *  linux/arch/arm/mm/copypage-xsc3.S
   *
   *  Copyright (C) 2004 Intel Corp.
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   *
   * Adapted for 3rd gen XScale core, no more mini-dcache
   * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
   */
  #include <linux/init.h>
  #include <linux/highmem.h>
  
  /*
   * General note:
   *  We don't really want write-allocate cache behaviour for these functions
   *  since that will just eat through 8K of the cache.
   */
  
  /*
   * XSC3 optimised copy_user_highpage
   *  r0 = destination
   *  r1 = source
   *
   * The source page may have some clean entries in the cache already, but we
   * can safely ignore them - break_cow() will flush them out of the cache
   * if we eventually end up using our copied page.
   *
   */
  static void __naked
  xsc3_mc_copy_user_page(void *kto, const void *kfrom)
  {
  	asm("\
  	stmfd	sp!, {r4, r5, lr}		
  \
  	mov	lr, %2				
  \
  						
  \
  	pld	[r1, #0]			
  \
  	pld	[r1, #32]			
  \
  1:	pld	[r1, #64]			
  \
  	pld	[r1, #96]			
  \
  						
  \
  2:	ldrd	r2, [r1], #8			
  \
  	mov	ip, r0				
  \
  	ldrd	r4, [r1], #8			
  \
  	mcr	p15, 0, ip, c7, c6, 1		@ invalidate
  \
  	strd	r2, [r0], #8			
  \
  	ldrd	r2, [r1], #8			
  \
  	strd	r4, [r0], #8			
  \
  	ldrd	r4, [r1], #8			
  \
  	strd	r2, [r0], #8			
  \
  	strd	r4, [r0], #8			
  \
  	ldrd	r2, [r1], #8			
  \
  	mov	ip, r0				
  \
  	ldrd	r4, [r1], #8			
  \
  	mcr	p15, 0, ip, c7, c6, 1		@ invalidate
  \
  	strd	r2, [r0], #8			
  \
  	ldrd	r2, [r1], #8			
  \
  	subs	lr, lr, #1			
  \
  	strd	r4, [r0], #8			
  \
  	ldrd	r4, [r1], #8			
  \
  	strd	r2, [r0], #8			
  \
  	strd	r4, [r0], #8			
  \
  	bgt	1b				
  \
  	beq	2b				
  \
  						
  \
  	ldmfd	sp!, {r4, r5, pc}"
  	:
  	: "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
  }
  
  void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
  	unsigned long vaddr, struct vm_area_struct *vma)
  {
  	void *kto, *kfrom;
  
  	kto = kmap_atomic(to);
  	kfrom = kmap_atomic(from);
  	flush_cache_page(vma, vaddr, page_to_pfn(from));
  	xsc3_mc_copy_user_page(kto, kfrom);
  	kunmap_atomic(kfrom);
  	kunmap_atomic(kto);
  }
  
  /*
   * XScale optimised clear_user_page
   *  r0 = destination
   *  r1 = virtual user address of ultimate destination page
   */
  void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
  {
  	void *ptr, *kaddr = kmap_atomic(page);
  	asm volatile ("\
  	mov	r1, %2				
  \
  	mov	r2, #0				
  \
  	mov	r3, #0				
  \
  1:	mcr	p15, 0, %0, c7, c6, 1		@ invalidate line
  \
  	strd	r2, [%0], #8			
  \
  	strd	r2, [%0], #8			
  \
  	strd	r2, [%0], #8			
  \
  	strd	r2, [%0], #8			
  \
  	subs	r1, r1, #1			
  \
  	bne	1b"
  	: "=r" (ptr)
  	: "0" (kaddr), "I" (PAGE_SIZE / 32)
  	: "r1", "r2", "r3");
  	kunmap_atomic(kaddr);
  }
  
  struct cpu_user_fns xsc3_mc_user_fns __initdata = {
  	.cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
  	.cpu_copy_user_highpage	= xsc3_mc_copy_user_highpage,
  };