Blame view

kernel/linux-imx6_3.14.28/include/asm-generic/tlb.h 5.53 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
  /* include/asm-generic/tlb.h
   *
   *	Generic TLB shootdown code
   *
   * Copyright 2001 Red Hat, Inc.
   * Based on code from mm/memory.c Copyright Linus Torvalds and others.
   *
   * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License
   * as published by the Free Software Foundation; either version
   * 2 of the License, or (at your option) any later version.
   */
  #ifndef _ASM_GENERIC__TLB_H
  #define _ASM_GENERIC__TLB_H
  
  #include <linux/swap.h>
  #include <asm/pgalloc.h>
  #include <asm/tlbflush.h>
  
  #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  /*
   * Semi RCU freeing of the page directories.
   *
   * This is needed by some architectures to implement software pagetable walkers.
   *
   * gup_fast() and other software pagetable walkers do a lockless page-table
   * walk and therefore needs some synchronization with the freeing of the page
   * directories. The chosen means to accomplish that is by disabling IRQs over
   * the walk.
   *
   * Architectures that use IPIs to flush TLBs will then automagically DTRT,
   * since we unlink the page, flush TLBs, free the page. Since the disabling of
   * IRQs delays the completion of the TLB flush we can never observe an already
   * freed page.
   *
   * Architectures that do not have this (PPC) need to delay the freeing by some
   * other means, this is that means.
   *
   * What we do is batch the freed directory pages (tables) and RCU free them.
   * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
   * holds off grace periods.
   *
   * However, in order to batch these pages we need to allocate storage, this
   * allocation is deep inside the MM code and can thus easily fail on memory
   * pressure. To guarantee progress we fall back to single table freeing, see
   * the implementation of tlb_remove_table_one().
   *
   */
  struct mmu_table_batch {
  	struct rcu_head		rcu;
  	unsigned int		nr;
  	void			*tables[0];
  };
  
  #define MAX_TABLE_BATCH		\
  	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
  
  extern void tlb_table_flush(struct mmu_gather *tlb);
  extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
  
  #endif
  
  /*
   * If we can't allocate a page to make a big batch of page pointers
   * to work on, then just handle a few from the on-stack structure.
   */
  #define MMU_GATHER_BUNDLE	8
  
  struct mmu_gather_batch {
  	struct mmu_gather_batch	*next;
  	unsigned int		nr;
  	unsigned int		max;
  	struct page		*pages[0];
  };
  
  #define MAX_GATHER_BATCH	\
  	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
  
  /*
   * Limit the maximum number of mmu_gather batches to reduce a risk of soft
   * lockups for non-preemptible kernels on huge machines when a lot of memory
   * is zapped during unmapping.
   * 10K pages freed at once should be safe even without a preemption point.
   */
  #define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)
  
  /* struct mmu_gather is an opaque type used by the mm code for passing around
   * any data needed by arch specific code for tlb_remove_page.
   */
  struct mmu_gather {
  	struct mm_struct	*mm;
  #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  	struct mmu_table_batch	*batch;
  #endif
  	unsigned long		start;
  	unsigned long		end;
  	unsigned int		need_flush : 1,	/* Did free PTEs */
  	/* we are in the middle of an operation to clear
  	 * a full mm and can make some optimizations */
  				fullmm : 1,
  	/* we have performed an operation which
  	 * requires a complete flush of the tlb */
  				need_flush_all : 1;
  
  	struct mmu_gather_batch *active;
  	struct mmu_gather_batch	local;
  	struct page		*__pages[MMU_GATHER_BUNDLE];
  	unsigned int		batch_count;
  };
  
  #define HAVE_GENERIC_MMU_GATHER
  
  void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
  void tlb_flush_mmu(struct mmu_gather *tlb);
  void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
  							unsigned long end);
  int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
  
  /* tlb_remove_page
   *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
   *	required.
   */
  static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  {
  	if (!__tlb_remove_page(tlb, page))
  		tlb_flush_mmu(tlb);
  }
  
  /**
   * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
   *
   * Record the fact that pte's were really umapped in ->need_flush, so we can
   * later optimise away the tlb invalidate.   This helps when userspace is
   * unmapping already-unmapped pages, which happens quite a lot.
   */
  #define tlb_remove_tlb_entry(tlb, ptep, address)		\
  	do {							\
  		tlb->need_flush = 1;				\
  		__tlb_remove_tlb_entry(tlb, ptep, address);	\
  	} while (0)
  
  /**
   * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
   * This is a nop so far, because only x86 needs it.
   */
  #ifndef __tlb_remove_pmd_tlb_entry
  #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
  #endif
  
  #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)		\
  	do {							\
  		tlb->need_flush = 1;				\
  		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);	\
  	} while (0)
  
  #define pte_free_tlb(tlb, ptep, address)			\
  	do {							\
  		tlb->need_flush = 1;				\
  		__pte_free_tlb(tlb, ptep, address);		\
  	} while (0)
  
  #ifndef __ARCH_HAS_4LEVEL_HACK
  #define pud_free_tlb(tlb, pudp, address)			\
  	do {							\
  		tlb->need_flush = 1;				\
  		__pud_free_tlb(tlb, pudp, address);		\
  	} while (0)
  #endif
  
  #define pmd_free_tlb(tlb, pmdp, address)			\
  	do {							\
  		tlb->need_flush = 1;				\
  		__pmd_free_tlb(tlb, pmdp, address);		\
  	} while (0)
  
  #define tlb_migrate_finish(mm) do {} while (0)
  
  #endif /* _ASM_GENERIC__TLB_H */