Blame view

kernel/linux-imx6_3.14.28/arch/arm/mm/copypage-v4mc.c 3.4 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
  /*
   *  linux/arch/arm/lib/copypage-armv4mc.S
   *
   *  Copyright (C) 1995-2005 Russell King
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   *
   * This handles the mini data cache, as found on SA11x0 and XScale
   * processors.  When we copy a user page page, we map it in such a way
   * that accesses to this page will not touch the main data cache, but
   * will be cached in the mini data cache.  This prevents us thrashing
   * the main data cache on page faults.
   */
  #include <linux/init.h>
  #include <linux/mm.h>
  #include <linux/highmem.h>
  
  #include <asm/pgtable.h>
  #include <asm/tlbflush.h>
  #include <asm/cacheflush.h>
  
  #include "mm.h"
  
  #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
  				  L_PTE_MT_MINICACHE)
  
  static DEFINE_RAW_SPINLOCK(minicache_lock);
  
  /*
   * ARMv4 mini-dcache optimised copy_user_highpage
   *
   * We flush the destination cache lines just before we write the data into the
   * corresponding address.  Since the Dcache is read-allocate, this removes the
   * Dcache aliasing issue.  The writes will be forwarded to the write buffer,
   * and merged as appropriate.
   *
   * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
   * instruction.  If your processor does not supply this, you have to write your
   * own copy_user_highpage that does the right thing.
   */
  static void __naked
  mc_copy_user_page(void *from, void *to)
  {
  	asm volatile(
  	"stmfd	sp!, {r4, lr}			@ 2
  \
  	mov	r4, %2				@ 1
  \
  	ldmia	%0!, {r2, r3, ip, lr}		@ 4
  \
  1:	mcr	p15, 0, %1, c7, c6, 1		@ 1   invalidate D line
  \
  	stmia	%1!, {r2, r3, ip, lr}		@ 4
  \
  	ldmia	%0!, {r2, r3, ip, lr}		@ 4+1
  \
  	stmia	%1!, {r2, r3, ip, lr}		@ 4
  \
  	ldmia	%0!, {r2, r3, ip, lr}		@ 4
  \
  	mcr	p15, 0, %1, c7, c6, 1		@ 1   invalidate D line
  \
  	stmia	%1!, {r2, r3, ip, lr}		@ 4
  \
  	ldmia	%0!, {r2, r3, ip, lr}		@ 4
  \
  	subs	r4, r4, #1			@ 1
  \
  	stmia	%1!, {r2, r3, ip, lr}		@ 4
  \
  	ldmneia	%0!, {r2, r3, ip, lr}		@ 4
  \
  	bne	1b				@ 1
  \
  	ldmfd	sp!, {r4, pc}			@ 3"
  	:
  	: "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
  }
  
  void v4_mc_copy_user_highpage(struct page *to, struct page *from,
  	unsigned long vaddr, struct vm_area_struct *vma)
  {
  	void *kto = kmap_atomic(to);
  
  	if (!test_and_set_bit(PG_dcache_clean, &from->flags))
  		__flush_dcache_page(page_mapping(from), from);
  
  	raw_spin_lock(&minicache_lock);
  
  	set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
  
  	mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
  
  	raw_spin_unlock(&minicache_lock);
  
  	kunmap_atomic(kto);
  }
  
  /*
   * ARMv4 optimised clear_user_page
   */
  void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
  {
  	void *ptr, *kaddr = kmap_atomic(page);
  	asm volatile("\
  	mov	r1, %2				@ 1
  \
  	mov	r2, #0				@ 1
  \
  	mov	r3, #0				@ 1
  \
  	mov	ip, #0				@ 1
  \
  	mov	lr, #0				@ 1
  \
  1:	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line
  \
  	stmia	%0!, {r2, r3, ip, lr}		@ 4
  \
  	stmia	%0!, {r2, r3, ip, lr}		@ 4
  \
  	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line
  \
  	stmia	%0!, {r2, r3, ip, lr}		@ 4
  \
  	stmia	%0!, {r2, r3, ip, lr}		@ 4
  \
  	subs	r1, r1, #1			@ 1
  \
  	bne	1b				@ 1"
  	: "=r" (ptr)
  	: "0" (kaddr), "I" (PAGE_SIZE / 64)
  	: "r1", "r2", "r3", "ip", "lr");
  	kunmap_atomic(kaddr);
  }
  
  struct cpu_user_fns v4_mc_user_fns __initdata = {
  	.cpu_clear_user_highpage = v4_mc_clear_user_highpage,
  	.cpu_copy_user_highpage	= v4_mc_copy_user_highpage,
  };