Blame view

kernel/linux-rt-4.4.41/arch/powerpc/mm/mmap.c 2.8 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
  /*
   *  flexible mmap layout support
   *
   * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
   * All Rights Reserved.
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   *
   *
   * Started by Ingo Molnar <mingo@elte.hu>
   */
  
  #include <linux/personality.h>
  #include <linux/mm.h>
  #include <linux/random.h>
  #include <linux/sched.h>
  
  /*
   * Top of mmap area (just below the process stack).
   *
   * Leave at least a ~128 MB hole on 32bit applications.
   *
   * On 64bit applications we randomise the stack by 1GB so we need to
   * space our mmap start address by a further 1GB, otherwise there is a
   * chance the mmap area will end up closer to the stack than our ulimit
   * requires.
   */
  #define MIN_GAP32 (128*1024*1024)
  #define MIN_GAP64 ((128 + 1024)*1024*1024UL)
  #define MIN_GAP ((is_32bit_task()) ? MIN_GAP32 : MIN_GAP64)
  #define MAX_GAP (TASK_SIZE/6*5)
  
  static inline int mmap_is_legacy(void)
  {
  	if (current->personality & ADDR_COMPAT_LAYOUT)
  		return 1;
  
  	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
  		return 1;
  
  	return sysctl_legacy_va_layout;
  }
  
  unsigned long arch_mmap_rnd(void)
  {
  	unsigned long rnd;
  
  	/* 8MB for 32bit, 1GB for 64bit */
  	if (is_32bit_task())
  		rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT));
  	else
  		rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT));
  
  	return rnd << PAGE_SHIFT;
  }
  
  static inline unsigned long mmap_base(unsigned long rnd)
  {
  	unsigned long gap = rlimit(RLIMIT_STACK);
  
  	if (gap < MIN_GAP)
  		gap = MIN_GAP;
  	else if (gap > MAX_GAP)
  		gap = MAX_GAP;
  
  	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
  }
  
  /*
   * This function, called very early during the creation of a new
   * process VM image, sets up which VM layout function to use:
   */
  void arch_pick_mmap_layout(struct mm_struct *mm)
  {
  	unsigned long random_factor = 0UL;
  
  	if (current->flags & PF_RANDOMIZE)
  		random_factor = arch_mmap_rnd();
  
  	/*
  	 * Fall back to the standard layout if the personality
  	 * bit is set, or if the expected stack growth is unlimited:
  	 */
  	if (mmap_is_legacy()) {
  		mm->mmap_base = TASK_UNMAPPED_BASE;
  		mm->get_unmapped_area = arch_get_unmapped_area;
  	} else {
  		mm->mmap_base = mmap_base(random_factor);
  		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
  	}
  }