Blame view

kernel/linux-rt-4.4.41/arch/xtensa/include/asm/page.h 5.59 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
  /*
   * include/asm-xtensa/page.h
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version2 as
   * published by the Free Software Foundation.
   *
   * Copyright (C) 2001 - 2007 Tensilica Inc.
   */
  
  #ifndef _XTENSA_PAGE_H
  #define _XTENSA_PAGE_H
  
  #include <asm/processor.h>
  #include <asm/types.h>
  #include <asm/cache.h>
  #include <platform/hardware.h>
  
  /*
   * Fixed TLB translations in the processor.
   */
  
  #define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
  #define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
  #define XCHAL_KSEG_PADDR        __XTENSA_UL_CONST(0x00000000)
  #define XCHAL_KSEG_SIZE         __XTENSA_UL_CONST(0x08000000)
  
  /*
   * PAGE_SHIFT determines the page size
   */
  
  #define PAGE_SHIFT	12
  #define PAGE_SIZE	(__XTENSA_UL_CONST(1) << PAGE_SHIFT)
  #define PAGE_MASK	(~(PAGE_SIZE-1))
  
  #ifdef CONFIG_MMU
  #define PAGE_OFFSET	XCHAL_KSEG_CACHED_VADDR
  #define MAX_MEM_PFN	XCHAL_KSEG_SIZE
  #else
  #define PAGE_OFFSET	__XTENSA_UL_CONST(0)
  #define MAX_MEM_PFN	(PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
  #endif
  
  #define PGTABLE_START	0x80000000
  
  /*
   * Cache aliasing:
   *
   * If the cache size for one way is greater than the page size, we have to
   * deal with cache aliasing. The cache index is wider than the page size:
   *
   * |    |cache| cache index
   * | pfn  |off|	virtual address
   * |xxxx:X|zzz|
   * |    : |   |
   * | \  / |   |
   * |trans.|   |
   * | /  \ |   |
   * |yyyy:Y|zzz|	physical address
   *
   * When the page number is translated to the physical page address, the lowest
   * bit(s) (X) that are part of the cache index are also translated (Y).
   * If this translation changes bit(s) (X), the cache index is also afected,
   * thus resulting in a different cache line than before.
   * The kernel does not provide a mechanism to ensure that the page color
   * (represented by this bit) remains the same when allocated or when pages
   * are remapped. When user pages are mapped into kernel space, the color of
   * the page might also change.
   *
   * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
   * to temporarily map a patch so we can match the color.
   */
  
  #if DCACHE_WAY_SIZE > PAGE_SIZE
  # define DCACHE_ALIAS_ORDER	(DCACHE_WAY_SHIFT - PAGE_SHIFT)
  # define DCACHE_ALIAS_MASK	(PAGE_MASK & (DCACHE_WAY_SIZE - 1))
  # define DCACHE_ALIAS(a)	(((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
  # define DCACHE_ALIAS_EQ(a,b)	((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
  #else
  # define DCACHE_ALIAS_ORDER	0
  # define DCACHE_ALIAS(a)	((void)(a), 0)
  #endif
  #define DCACHE_N_COLORS		(1 << DCACHE_ALIAS_ORDER)
  
  #if ICACHE_WAY_SIZE > PAGE_SIZE
  # define ICACHE_ALIAS_ORDER	(ICACHE_WAY_SHIFT - PAGE_SHIFT)
  # define ICACHE_ALIAS_MASK	(PAGE_MASK & (ICACHE_WAY_SIZE - 1))
  # define ICACHE_ALIAS(a)	(((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
  # define ICACHE_ALIAS_EQ(a,b)	((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
  #else
  # define ICACHE_ALIAS_ORDER	0
  #endif
  
  
  #ifdef __ASSEMBLY__
  
  #define __pgprot(x)	(x)
  
  #else
  
  /*
   * These are used to make use of C type-checking..
   */
  
  typedef struct { unsigned long pte; } pte_t;		/* page table entry */
  typedef struct { unsigned long pgd; } pgd_t;		/* PGD table entry */
  typedef struct { unsigned long pgprot; } pgprot_t;
  typedef struct page *pgtable_t;
  
  #define pte_val(x)	((x).pte)
  #define pgd_val(x)	((x).pgd)
  #define pgprot_val(x)	((x).pgprot)
  
  #define __pte(x)	((pte_t) { (x) } )
  #define __pgd(x)	((pgd_t) { (x) } )
  #define __pgprot(x)	((pgprot_t) { (x) } )
  
  /*
   * Pure 2^n version of get_order
   * Use 'nsau' instructions if supported by the processor or the generic version.
   */
  
  #if XCHAL_HAVE_NSA
  
  static inline __attribute_const__ int get_order(unsigned long size)
  {
  	int lz;
  	asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
  	return 32 - lz;
  }
  
  #else
  
  # include <asm-generic/getorder.h>
  
  #endif
  
  struct page;
  struct vm_area_struct;
  extern void clear_page(void *page);
  extern void copy_page(void *to, void *from);
  
  /*
   * If we have cache aliasing and writeback caches, we might have to do
   * some extra work
   */
  
  #if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
  extern void clear_page_alias(void *vaddr, unsigned long paddr);
  extern void copy_page_alias(void *to, void *from,
  			    unsigned long to_paddr, unsigned long from_paddr);
  
  #define clear_user_highpage clear_user_highpage
  void clear_user_highpage(struct page *page, unsigned long vaddr);
  #define __HAVE_ARCH_COPY_USER_HIGHPAGE
  void copy_user_highpage(struct page *to, struct page *from,
  			unsigned long vaddr, struct vm_area_struct *vma);
  #else
  # define clear_user_page(page, vaddr, pg)	clear_page(page)
  # define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
  #endif
  
  /*
   * This handles the memory map.  We handle pages at
   * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
   * These macros are for conversion of kernel address, not user
   * addresses.
   */
  
  #define ARCH_PFN_OFFSET		(PLATFORM_DEFAULT_MEM_START >> PAGE_SHIFT)
  
  #define __pa(x)			((unsigned long) (x) - PAGE_OFFSET)
  #define __va(x)			((void *)((unsigned long) (x) + PAGE_OFFSET))
  #define pfn_valid(pfn) \
  	((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
  
  #ifdef CONFIG_DISCONTIGMEM
  # error CONFIG_DISCONTIGMEM not supported
  #endif
  
  #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  #define page_to_virt(page)	__va(page_to_pfn(page) << PAGE_SHIFT)
  #define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
  #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
  
  #endif /* __ASSEMBLY__ */
  
  #define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
  				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  
  #include <asm-generic/memory_model.h>
  #endif /* _XTENSA_PAGE_H */