Blame view

kernel/linux-imx6_3.14.28/mm/vmacache.c 2.56 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
  /*
   * Copyright (C) 2014 Davidlohr Bueso.
   */
  #include <linux/sched.h>
  #include <linux/mm.h>
  #include <linux/vmacache.h>
  
  /*
   * Flush vma caches for threads that share a given mm.
   *
   * The operation is safe because the caller holds the mmap_sem
   * exclusively and other threads accessing the vma cache will
   * have mmap_sem held at least for read, so no extra locking
   * is required to maintain the vma cache.
   */
  void vmacache_flush_all(struct mm_struct *mm)
  {
  	struct task_struct *g, *p;
  
  	rcu_read_lock();
  	for_each_process_thread(g, p) {
  		/*
  		 * Only flush the vmacache pointers as the
  		 * mm seqnum is already set and curr's will
  		 * be set upon invalidation when the next
  		 * lookup is done.
  		 */
  		if (mm == p->mm)
  			vmacache_flush(p);
  	}
  	rcu_read_unlock();
  }
  
  /*
   * This task may be accessing a foreign mm via (for example)
   * get_user_pages()->find_vma().  The vmacache is task-local and this
   * task's vmacache pertains to a different mm (ie, its own).  There is
   * nothing we can do here.
   *
   * Also handle the case where a kernel thread has adopted this mm via use_mm().
   * That kernel thread's vmacache is not applicable to this mm.
   */
  static bool vmacache_valid_mm(struct mm_struct *mm)
  {
  	return current->mm == mm && !(current->flags & PF_KTHREAD);
  }
  
  void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
  {
  	if (vmacache_valid_mm(newvma->vm_mm))
  		current->vmacache[VMACACHE_HASH(addr)] = newvma;
  }
  
  static bool vmacache_valid(struct mm_struct *mm)
  {
  	struct task_struct *curr;
  
  	if (!vmacache_valid_mm(mm))
  		return false;
  
  	curr = current;
  	if (mm->vmacache_seqnum != curr->vmacache_seqnum) {
  		/*
  		 * First attempt will always be invalid, initialize
  		 * the new cache for this task here.
  		 */
  		curr->vmacache_seqnum = mm->vmacache_seqnum;
  		vmacache_flush(curr);
  		return false;
  	}
  	return true;
  }
  
  struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
  {
  	int i;
  
  	if (!vmacache_valid(mm))
  		return NULL;
  
  	for (i = 0; i < VMACACHE_SIZE; i++) {
  		struct vm_area_struct *vma = current->vmacache[i];
  
  		if (!vma)
  			continue;
  		if (WARN_ON_ONCE(vma->vm_mm != mm))
  			break;
  		if (vma->vm_start <= addr && vma->vm_end > addr)
  			return vma;
  	}
  
  	return NULL;
  }
  
  #ifndef CONFIG_MMU
  struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
  					   unsigned long start,
  					   unsigned long end)
  {
  	int i;
  
  	if (!vmacache_valid(mm))
  		return NULL;
  
  	for (i = 0; i < VMACACHE_SIZE; i++) {
  		struct vm_area_struct *vma = current->vmacache[i];
  
  		if (vma && vma->vm_start == start && vma->vm_end == end)
  			return vma;
  	}
  
  	return NULL;
  }
  #endif