Blame view

kernel/linux-rt-4.4.41/arch/cris/mm/tlb.c 2.64 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
  /*
   *  linux/arch/cris/mm/tlb.c
   *
   *  Copyright (C) 2000, 2001  Axis Communications AB
   *  
   *  Authors:   Bjorn Wesen (bjornw@axis.com)
   *
   */
  
  #include <linux/init.h>
  #include <linux/kernel.h>
  #include <asm/tlb.h>
  
  #define D(x)
  
  /* The TLB can host up to 64 different mm contexts at the same time.
   * The running context is R_MMU_CONTEXT, and each TLB entry contains a
   * page_id that has to match to give a hit. In page_id_map, we keep track
   * of which mm we have assigned to which page_id, so that we know when
   * to invalidate TLB entries.
   *
   * The last page_id is never running - it is used as an invalid page_id
   * so we can make TLB entries that will never match.
   *
   * Notice that we need to make the flushes atomic, otherwise an interrupt
   * handler that uses vmalloced memory might cause a TLB load in the middle
   * of a flush causing.
   */
  
  struct mm_struct *page_id_map[NUM_PAGEID];
  static int map_replace_ptr = 1;  /* which page_id_map entry to replace next */
  
  /* the following functions are similar to those used in the PPC port */
  
  static inline void
  alloc_context(struct mm_struct *mm)
  {
  	struct mm_struct *old_mm;
  
  	D(printk("tlb: alloc context %d (%p)
  ", map_replace_ptr, mm));
  
  	/* did we replace an mm ? */
  
  	old_mm = page_id_map[map_replace_ptr];
  
  	if(old_mm) {
  		/* throw out any TLB entries belonging to the mm we replace
  		 * in the map
  		 */
  		flush_tlb_mm(old_mm);
  
  		old_mm->context.page_id = NO_CONTEXT;
  	}
  
  	/* insert it into the page_id_map */
  
  	mm->context.page_id = map_replace_ptr;
  	page_id_map[map_replace_ptr] = mm;
  
  	map_replace_ptr++;
  
  	if(map_replace_ptr == INVALID_PAGEID)
  		map_replace_ptr = 0;         /* wrap around */	
  }
  
  /* 
   * if needed, get a new MMU context for the mm. otherwise nothing is done.
   */
  
  void
  get_mmu_context(struct mm_struct *mm)
  {
  	if(mm->context.page_id == NO_CONTEXT)
  		alloc_context(mm);
  }
  
  /* called by __exit_mm to destroy the used MMU context if any before
   * destroying the mm itself. this is only called when the last user of the mm
   * drops it.
   *
   * the only thing we really need to do here is mark the used PID slot
   * as empty.
   */
  
  void
  destroy_context(struct mm_struct *mm)
  {
  	if(mm->context.page_id != NO_CONTEXT) {
  		D(printk("destroy_context %d (%p)
  ", mm->context.page_id, mm));
  		flush_tlb_mm(mm);  /* TODO this might be redundant ? */
  		page_id_map[mm->context.page_id] = NULL;
  	}
  }
  
  /* called once during VM initialization, from init.c */
  
  void __init
  tlb_init(void)
  {
  	int i;
  
  	/* clear the page_id map */
  
  	for (i = 1; i < ARRAY_SIZE(page_id_map); i++)
  		page_id_map[i] = NULL;
  	
  	/* invalidate the entire TLB */
  
  	flush_tlb_all();
  
  	/* the init_mm has context 0 from the boot */
  
  	page_id_map[0] = &init_mm;
  }