Blame view

kernel/linux-rt-4.4.41/arch/sh/include/asm/mmu_context_32.h 1.24 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
  #ifndef __ASM_SH_MMU_CONTEXT_32_H
  #define __ASM_SH_MMU_CONTEXT_32_H
  
  /*
   * Destroy context related info for an mm_struct that is about
   * to be put to rest.
   */
  static inline void destroy_context(struct mm_struct *mm)
  {
  	/* Do nothing */
  }
  
  #ifdef CONFIG_CPU_HAS_PTEAEX
  static inline void set_asid(unsigned long asid)
  {
  	__raw_writel(asid, MMU_PTEAEX);
  }
  
  static inline unsigned long get_asid(void)
  {
  	return __raw_readl(MMU_PTEAEX) & MMU_CONTEXT_ASID_MASK;
  }
  #else
  static inline void set_asid(unsigned long asid)
  {
  	unsigned long __dummy;
  
  	__asm__ __volatile__ ("mov.l	%2, %0
  \t"
  			      "and	%3, %0
  \t"
  			      "or	%1, %0
  \t"
  			      "mov.l	%0, %2"
  			      : "=&r" (__dummy)
  			      : "r" (asid), "m" (__m(MMU_PTEH)),
  			        "r" (0xffffff00));
  }
  
  static inline unsigned long get_asid(void)
  {
  	unsigned long asid;
  
  	__asm__ __volatile__ ("mov.l	%1, %0"
  			      : "=r" (asid)
  			      : "m" (__m(MMU_PTEH)));
  	asid &= MMU_CONTEXT_ASID_MASK;
  	return asid;
  }
  #endif /* CONFIG_CPU_HAS_PTEAEX */
  
  /* MMU_TTB is used for optimizing the fault handling. */
  static inline void set_TTB(pgd_t *pgd)
  {
  	__raw_writel((unsigned long)pgd, MMU_TTB);
  }
  
  static inline pgd_t *get_TTB(void)
  {
  	return (pgd_t *)__raw_readl(MMU_TTB);
  }
  #endif /* __ASM_SH_MMU_CONTEXT_32_H */