Blame view

kernel/linux-imx6_3.14.28/arch/blackfin/mm/init.c 3.11 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
  /*
   * Copyright 2004-2009 Analog Devices Inc.
   *
   * Licensed under the GPL-2 or later.
   */
  
  #include <linux/gfp.h>
  #include <linux/swap.h>
  #include <linux/bootmem.h>
  #include <linux/uaccess.h>
  #include <linux/export.h>
  #include <asm/bfin-global.h>
  #include <asm/pda.h>
  #include <asm/cplbinit.h>
  #include <asm/early_printk.h>
  #include "blackfin_sram.h"
  
  /*
   * ZERO_PAGE is a special page that is used for zero-initialized data and COW.
   * Let the bss do its zero-init magic so we don't have to do it ourselves.
   */
  char empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
  EXPORT_SYMBOL(empty_zero_page);
  
  #ifndef CONFIG_EXCEPTION_L1_SCRATCH
  #if defined CONFIG_SYSCALL_TAB_L1
  __attribute__((l1_data))
  #endif
  static unsigned long exception_stack[NR_CPUS][1024];
  #endif
  
  struct blackfin_pda cpu_pda[NR_CPUS];
  EXPORT_SYMBOL(cpu_pda);
  
  /*
   * paging_init() continues the virtual memory environment setup which
   * was begun by the code in arch/head.S.
   * The parameters are pointers to where to stick the starting and ending
   * addresses  of available kernel virtual memory.
   */
  void __init paging_init(void)
  {
  	/*
  	 * make sure start_mem is page aligned, otherwise bootmem and
  	 * page_alloc get different views of the world
  	 */
  	unsigned long end_mem = memory_end & PAGE_MASK;
  
  	unsigned long zones_size[MAX_NR_ZONES] = {
  		[0] = 0,
  		[ZONE_DMA] = (end_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> PAGE_SHIFT,
  		[ZONE_NORMAL] = 0,
  #ifdef CONFIG_HIGHMEM
  		[ZONE_HIGHMEM] = 0,
  #endif
  	};
  
  	/* Set up SFC/DFC registers (user data space) */
  	set_fs(KERNEL_DS);
  
  	pr_debug("free_area_init -> start_mem is %#lx virtual_end is %#lx
  ",
  	        PAGE_ALIGN(memory_start), end_mem);
  	free_area_init_node(0, zones_size,
  		CONFIG_PHY_RAM_BASE_ADDRESS >> PAGE_SHIFT, NULL);
  }
  
  asmlinkage void __init init_pda(void)
  {
  	unsigned int cpu = raw_smp_processor_id();
  
  	early_shadow_stamp();
  
  	/* Initialize the PDA fields holding references to other parts
  	   of the memory. The content of such memory is still
  	   undefined at the time of the call, we are only setting up
  	   valid pointers to it. */
  	memset(&cpu_pda[cpu], 0, sizeof(cpu_pda[cpu]));
  
  #ifdef CONFIG_EXCEPTION_L1_SCRATCH
  	cpu_pda[cpu].ex_stack = (unsigned long *)(L1_SCRATCH_START + \
  					L1_SCRATCH_LENGTH);
  #else
  	cpu_pda[cpu].ex_stack = exception_stack[cpu + 1];
  #endif
  
  #ifdef CONFIG_SMP
  	cpu_pda[cpu].imask = 0x1f;
  #endif
  }
  
  void __init mem_init(void)
  {
  	char buf[64];
  
  	high_memory = (void *)(memory_end & PAGE_MASK);
  	max_mapnr = MAP_NR(high_memory);
  	printk(KERN_DEBUG "Kernel managed physical pages: %lu
  ", max_mapnr);
  
  	/* This will put all low memory onto the freelists. */
  	free_all_bootmem();
  
  	snprintf(buf, sizeof(buf) - 1, "%uK DMA", DMA_UNCACHED_REGION >> 10);
  	mem_init_print_info(buf);
  }
  
  #ifdef CONFIG_BLK_DEV_INITRD
  void __init free_initrd_mem(unsigned long start, unsigned long end)
  {
  #ifndef CONFIG_MPU
  	free_reserved_area((void *)start, (void *)end, -1, "initrd");
  #endif
  }
  #endif
  
  void __init_refok free_initmem(void)
  {
  #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU
  	free_initmem_default(-1);
  	if (memory_start == (unsigned long)(&__init_end))
  		memory_start = (unsigned long)(&__init_begin);
  #endif
  }