Blame view

kernel/linux-imx6_3.14.28/arch/s390/mm/page-states.c 2.51 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
  /*
   * Copyright IBM Corp. 2008
   *
   * Guest page hinting for unused pages.
   *
   * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
   */
  
  #include <linux/kernel.h>
  #include <linux/errno.h>
  #include <linux/types.h>
  #include <linux/mm.h>
  #include <linux/gfp.h>
  #include <linux/init.h>
  #include <asm/setup.h>
  #include <asm/ipl.h>
  
  #define ESSA_SET_STABLE		1
  #define ESSA_SET_UNUSED		2
  
  static int cmma_flag = 1;
  
  static int __init cmma(char *str)
  {
  	char *parm;
  
  	parm = strstrip(str);
  	if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
  		cmma_flag = 1;
  		return 1;
  	}
  	cmma_flag = 0;
  	if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
  		return 1;
  	return 0;
  }
  __setup("cmma=", cmma);
  
  void __init cmma_init(void)
  {
  	register unsigned long tmp asm("0") = 0;
  	register int rc asm("1") = -EOPNOTSUPP;
  
  	if (!cmma_flag)
  		return;
  	/*
  	 * Disable CMM for dump, otherwise  the tprot based memory
  	 * detection can fail because of unstable pages.
  	 */
  	if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
  		cmma_flag = 0;
  		return;
  	}
  	asm volatile(
  		"       .insn rrf,0xb9ab0000,%1,%1,0,0
  "
  		"0:     la      %0,0
  "
  		"1:
  "
  		EX_TABLE(0b,1b)
  		: "+&d" (rc), "+&d" (tmp));
  	if (rc)
  		cmma_flag = 0;
  }
  
  static inline void set_page_unstable(struct page *page, int order)
  {
  	int i, rc;
  
  	for (i = 0; i < (1 << order); i++)
  		asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  			     : "=&d" (rc)
  			     : "a" (page_to_phys(page + i)),
  			       "i" (ESSA_SET_UNUSED));
  }
  
  void arch_free_page(struct page *page, int order)
  {
  	if (!cmma_flag)
  		return;
  	set_page_unstable(page, order);
  }
  
  static inline void set_page_stable(struct page *page, int order)
  {
  	int i, rc;
  
  	for (i = 0; i < (1 << order); i++)
  		asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  			     : "=&d" (rc)
  			     : "a" (page_to_phys(page + i)),
  			       "i" (ESSA_SET_STABLE));
  }
  
  void arch_alloc_page(struct page *page, int order)
  {
  	if (!cmma_flag)
  		return;
  	set_page_stable(page, order);
  }
  
  void arch_set_page_states(int make_stable)
  {
  	unsigned long flags, order, t;
  	struct list_head *l;
  	struct page *page;
  	struct zone *zone;
  
  	if (!cmma_flag)
  		return;
  	if (make_stable)
  		drain_local_pages(NULL);
  	for_each_populated_zone(zone) {
  		spin_lock_irqsave(&zone->lock, flags);
  		for_each_migratetype_order(order, t) {
  			list_for_each(l, &zone->free_area[order].free_list[t]) {
  				page = list_entry(l, struct page, lru);
  				if (make_stable)
  					set_page_stable(page, order);
  				else
  					set_page_unstable(page, order);
  			}
  		}
  		spin_unlock_irqrestore(&zone->lock, flags);
  	}
  }