Blame view

kernel/linux-rt-4.4.41/arch/unicore32/mm/pgd.c 2.17 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
  /*
   * linux/arch/unicore32/mm/pgd.c
   *
   * Code specific to PKUnity SoC and UniCore ISA
   *
   * Copyright (C) 2001-2010 GUAN Xue-tao
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   */
  #include <linux/mm.h>
  #include <linux/gfp.h>
  #include <linux/highmem.h>
  
  #include <asm/pgalloc.h>
  #include <asm/page.h>
  #include <asm/tlbflush.h>
  
  #include "mm.h"
  
  #define FIRST_KERNEL_PGD_NR	(FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
  
  /*
   * need to get a 4k page for level 1
   */
  pgd_t *get_pgd_slow(struct mm_struct *mm)
  {
  	pgd_t *new_pgd, *init_pgd;
  	pmd_t *new_pmd, *init_pmd;
  	pte_t *new_pte, *init_pte;
  
  	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 0);
  	if (!new_pgd)
  		goto no_pgd;
  
  	memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
  
  	/*
  	 * Copy over the kernel and IO PGD entries
  	 */
  	init_pgd = pgd_offset_k(0);
  	memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
  		       (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
  
  	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
  
  	if (!vectors_high()) {
  		/*
  		 * On UniCore, first page must always be allocated since it
  		 * contains the machine vectors.
  		 */
  		new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0);
  		if (!new_pmd)
  			goto no_pmd;
  
  		new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
  		if (!new_pte)
  			goto no_pte;
  
  		init_pmd = pmd_offset((pud_t *)init_pgd, 0);
  		init_pte = pte_offset_map(init_pmd, 0);
  		set_pte(new_pte, *init_pte);
  		pte_unmap(init_pte);
  		pte_unmap(new_pte);
  	}
  
  	return new_pgd;
  
  no_pte:
  	pmd_free(mm, new_pmd);
  	mm_dec_nr_pmds(mm);
  no_pmd:
  	free_pages((unsigned long)new_pgd, 0);
  no_pgd:
  	return NULL;
  }
  
  void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
  {
  	pmd_t *pmd;
  	pgtable_t pte;
  
  	if (!pgd)
  		return;
  
  	/* pgd is always present and good */
  	pmd = pmd_off(pgd, 0);
  	if (pmd_none(*pmd))
  		goto free;
  	if (pmd_bad(*pmd)) {
  		pmd_ERROR(*pmd);
  		pmd_clear(pmd);
  		goto free;
  	}
  
  	pte = pmd_pgtable(*pmd);
  	pmd_clear(pmd);
  	pte_free(mm, pte);
  	atomic_long_dec(&mm->nr_ptes);
  	pmd_free(mm, pmd);
  	mm_dec_nr_pmds(mm);
  free:
  	free_pages((unsigned long) pgd, 0);
  }