Blame view

kernel/linux-rt-4.4.41/arch/mn10300/include/asm/div64.h 3.21 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
  /* MN10300 64-bit division
   *
   * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   * Written by David Howells (dhowells@redhat.com)
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public Licence
   * as published by the Free Software Foundation; either version
   * 2 of the Licence, or (at your option) any later version.
   */
  #ifndef _ASM_DIV64
  #define _ASM_DIV64
  
  #include <linux/types.h>
  
  extern void ____unhandled_size_in_do_div___(void);
  
  /*
   * Beginning with gcc 4.6, the MDR register is represented explicitly.  We
   * must, therefore, at least explicitly clobber the register when we make
   * changes to it.  The following assembly fragments *could* be rearranged in
   * order to leave the moves to/from the MDR register to the compiler, but the
   * gains would be minimal at best.
   */
  #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
  # define CLOBBER_MDR_CC		"mdr", "cc"
  #else
  # define CLOBBER_MDR_CC		"cc"
  #endif
  
  /*
   * divide n by base, leaving the result in n and returning the remainder
   * - we can do this quite efficiently on the MN10300 by cascading the divides
   *   through the MDR register
   */
  #define do_div(n, base)							\
  ({									\
  	unsigned __rem = 0;						\
  	if (sizeof(n) <= 4) {						\
  		asm("mov	%1,mdr	
  "				\
  		    "divu	%2,%0	
  "				\
  		    "mov	mdr,%1	
  "				\
  		    : "+r"(n), "=d"(__rem)				\
  		    : "r"(base), "1"(__rem)				\
  		    : CLOBBER_MDR_CC					\
  		    );							\
  	} else if (sizeof(n) <= 8) {					\
  		union {							\
  			unsigned long long l;				\
  			u32 w[2];					\
  		} __quot;						\
  		__quot.l = n;						\
  		asm("mov	%0,mdr	
  "	/* MDR = 0 */		\
  		    "divu	%3,%1	
  "				\
  		    /* __quot.MSL = __div.MSL / base, */		\
  		    /* MDR = MDR:__div.MSL % base */			\
  		    "divu	%3,%2	
  "				\
  		    /* __quot.LSL = MDR:__div.LSL / base, */		\
  		    /* MDR = MDR:__div.LSL % base */			\
  		    "mov	mdr,%0	
  "				\
  		    : "=d"(__rem), "=r"(__quot.w[1]), "=r"(__quot.w[0])	\
  		    : "r"(base), "0"(__rem), "1"(__quot.w[1]),		\
  		      "2"(__quot.w[0])					\
  		    : CLOBBER_MDR_CC					\
  		    );							\
  		n = __quot.l;						\
  	} else {							\
  		____unhandled_size_in_do_div___();			\
  	}								\
  	__rem;								\
  })
  
  /*
   * do an unsigned 32-bit multiply and divide with intermediate 64-bit product
   * so as not to lose accuracy
   * - we use the MDR register to hold the MSW of the product
   */
  static inline __attribute__((const))
  unsigned __muldiv64u(unsigned val, unsigned mult, unsigned div)
  {
  	unsigned result;
  
  	asm("mulu	%2,%0	
  "	/* MDR:val = val*mult */
  	    "divu	%3,%0	
  "	/* val = MDR:val/div;
  					 * MDR = MDR:val%div */
  	    : "=r"(result)
  	    : "0"(val), "ir"(mult), "r"(div)
  	    : CLOBBER_MDR_CC
  	    );
  
  	return result;
  }
  
  /*
   * do a signed 32-bit multiply and divide with intermediate 64-bit product so
   * as not to lose accuracy
   * - we use the MDR register to hold the MSW of the product
   */
  static inline __attribute__((const))
  signed __muldiv64s(signed val, signed mult, signed div)
  {
  	signed result;
  
  	asm("mul	%2,%0	
  "	/* MDR:val = val*mult */
  	    "div	%3,%0	
  "	/* val = MDR:val/div;
  					 * MDR = MDR:val%div */
  	    : "=r"(result)
  	    : "0"(val), "ir"(mult), "r"(div)
  	    : CLOBBER_MDR_CC
  	    );
  
  	return result;
  }
  
  #endif /* _ASM_DIV64 */