Blame view

kernel/linux-rt-4.4.41/arch/arm/mm/pageattr.c 2.23 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
  /*
   * Copyright (c) 2014, The Linux Foundation. All rights reserved.
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 and
   * only version 2 as published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   */
  #include <linux/mm.h>
  #include <linux/module.h>
  
  #include <asm/pgtable.h>
  #include <asm/tlbflush.h>
  
  struct page_change_data {
  	pgprot_t set_mask;
  	pgprot_t clear_mask;
  };
  
  static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
  			void *data)
  {
  	struct page_change_data *cdata = data;
  	pte_t pte = *ptep;
  
  	pte = clear_pte_bit(pte, cdata->clear_mask);
  	pte = set_pte_bit(pte, cdata->set_mask);
  
  	set_pte_ext(ptep, pte, 0);
  	return 0;
  }
  
  static int change_memory_common(unsigned long addr, int numpages,
  				pgprot_t set_mask, pgprot_t clear_mask)
  {
  	unsigned long start = addr;
  	unsigned long size = PAGE_SIZE*numpages;
  	unsigned long end = start + size;
  	int ret;
  	struct page_change_data data;
  
  	if (!IS_ALIGNED(addr, PAGE_SIZE)) {
  		start &= PAGE_MASK;
  		end = start + size;
  		WARN_ON_ONCE(1);
  	}
  
  	if (start < MODULES_VADDR || start >= MODULES_END)
  		return -EINVAL;
  
  	if (end < MODULES_VADDR || start >= MODULES_END)
  		return -EINVAL;
  
  	data.set_mask = set_mask;
  	data.clear_mask = clear_mask;
  
  	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
  					&data);
  
  	flush_tlb_kernel_range(start, end);
  	return ret;
  }
  
  int set_memory_ro(unsigned long addr, int numpages)
  {
  	return change_memory_common(addr, numpages,
  					__pgprot(L_PTE_RDONLY),
  					__pgprot(0));
  }
  
  int set_memory_rw(unsigned long addr, int numpages)
  {
  	return change_memory_common(addr, numpages,
  					__pgprot(0),
  					__pgprot(L_PTE_RDONLY));
  }
  
  int set_memory_nx(unsigned long addr, int numpages)
  {
  	return change_memory_common(addr, numpages,
  					__pgprot(L_PTE_XN),
  					__pgprot(0));
  }
  
  int set_memory_x(unsigned long addr, int numpages)
  {
  	return change_memory_common(addr, numpages,
  					__pgprot(0),
  					__pgprot(L_PTE_XN));
  }