Blame view

kernel/linux-imx6_3.14.28/arch/tile/kernel/tlb.c 3.03 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
  /*
   * Copyright 2010 Tilera Corporation. All Rights Reserved.
   *
   *   This program is free software; you can redistribute it and/or
   *   modify it under the terms of the GNU General Public License
   *   as published by the Free Software Foundation, version 2.
   *
   *   This program is distributed in the hope that it will be useful, but
   *   WITHOUT ANY WARRANTY; without even the implied warranty of
   *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
   *   NON INFRINGEMENT.  See the GNU General Public License for
   *   more details.
   *
   */
  
  #include <linux/cpumask.h>
  #include <linux/module.h>
  #include <linux/hugetlb.h>
  #include <asm/tlbflush.h>
  #include <asm/homecache.h>
  #include <hv/hypervisor.h>
  
  /* From tlbflush.h */
  DEFINE_PER_CPU(int, current_asid);
  int min_asid, max_asid;
  
  /*
   * Note that we flush the L1I (for VM_EXEC pages) as well as the TLB
   * so that when we are unmapping an executable page, we also flush it.
   * Combined with flushing the L1I at context switch time, this means
   * we don't have to do any other icache flushes.
   */
  
  void flush_tlb_mm(struct mm_struct *mm)
  {
  	HV_Remote_ASID asids[NR_CPUS];
  	int i = 0, cpu;
  	for_each_cpu(cpu, mm_cpumask(mm)) {
  		HV_Remote_ASID *asid = &asids[i++];
  		asid->y = cpu / smp_topology.width;
  		asid->x = cpu % smp_topology.width;
  		asid->asid = per_cpu(current_asid, cpu);
  	}
  	flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm),
  		     0, 0, 0, NULL, asids, i);
  }
  
  void flush_tlb_current_task(void)
  {
  	flush_tlb_mm(current->mm);
  }
  
  void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm,
  		       unsigned long va)
  {
  	unsigned long size = vma_kernel_pagesize(vma);
  	int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
  	flush_remote(0, cache, mm_cpumask(mm),
  		     va, size, size, mm_cpumask(mm), NULL, 0);
  }
  
  void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
  {
  	flush_tlb_page_mm(vma, vma->vm_mm, va);
  }
  EXPORT_SYMBOL(flush_tlb_page);
  
  void flush_tlb_range(struct vm_area_struct *vma,
  		     unsigned long start, unsigned long end)
  {
  	unsigned long size = vma_kernel_pagesize(vma);
  	struct mm_struct *mm = vma->vm_mm;
  	int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
  	flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
  		     mm_cpumask(mm), NULL, 0);
  }
  
  void flush_tlb_all(void)
  {
  	int i;
  	for (i = 0; ; ++i) {
  		HV_VirtAddrRange r = hv_inquire_virtual(i);
  		if (r.size == 0)
  			break;
  		flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask,
  			     r.start, r.size, PAGE_SIZE, cpu_online_mask,
  			     NULL, 0);
  		flush_remote(0, 0, NULL,
  			     r.start, r.size, HPAGE_SIZE, cpu_online_mask,
  			     NULL, 0);
  	}
  }
  
  /*
   * Callers need to flush the L1I themselves if necessary, e.g. for
   * kernel module unload.  Otherwise we assume callers are not using
   * executable pgprot_t's.  Using EVICT_L1I means that dataplane cpus
   * will get an unnecessary interrupt otherwise.
   */
  void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  {
  	flush_remote(0, 0, NULL,
  		     start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0);
  }