Blame view

kernel/linux-imx6_3.14.28/lib/raid6/altivec.uc 3.23 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
  /* -*- linux-c -*- ------------------------------------------------------- *
   *
   *   Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
   *
   *   This program is free software; you can redistribute it and/or modify
   *   it under the terms of the GNU General Public License as published by
   *   the Free Software Foundation, Inc., 53 Temple Place Ste 330,
   *   Boston MA 02111-1307, USA; either version 2 of the License, or
   *   (at your option) any later version; incorporated herein by reference.
   *
   * ----------------------------------------------------------------------- */
  
  /*
   * raid6altivec$#.c
   *
   * $#-way unrolled portable integer math RAID-6 instruction set
   *
   * This file is postprocessed using unroll.awk
   *
   * <benh> hpa: in process,
   * you can just "steal" the vec unit with enable_kernel_altivec() (but
   * bracked this with preempt_disable/enable or in a lock)
   */
  
  #include <linux/raid/pq.h>
  
  #include <altivec.h>
  #ifdef __KERNEL__
  # include <asm/cputable.h>
  # include <asm/switch_to.h>
  
  /*
   * This is the C data type to use.  We use a vector of
   * signed char so vec_cmpgt() will generate the right
   * instruction.
   */
  
  typedef vector signed char unative_t;
  
  #define NBYTES(x) ((vector signed char) {x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
  #define NSIZE	sizeof(unative_t)
  
  /*
   * The SHLBYTE() operation shifts each byte left by 1, *not*
   * rolling over into the next byte
   */
  static inline __attribute_const__ unative_t SHLBYTE(unative_t v)
  {
  	return vec_add(v,v);
  }
  
  /*
   * The MASK() operation returns 0xFF in any byte for which the high
   * bit is 1, 0x00 for any byte for which the high bit is 0.
   */
  static inline __attribute_const__ unative_t MASK(unative_t v)
  {
  	unative_t zv = NBYTES(0);
  
  	/* vec_cmpgt returns a vector bool char; thus the need for the cast */
  	return (unative_t)vec_cmpgt(zv, v);
  }
  
  
  /* This is noinline to make damned sure that gcc doesn't move any of the
     Altivec code around the enable/disable code */
  static void noinline
  raid6_altivec$#_gen_syndrome_real(int disks, size_t bytes, void **ptrs)
  {
  	u8 **dptr = (u8 **)ptrs;
  	u8 *p, *q;
  	int d, z, z0;
  
  	unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
  	unative_t x1d = NBYTES(0x1d);
  
  	z0 = disks - 3;		/* Highest data disk */
  	p = dptr[z0+1];		/* XOR parity */
  	q = dptr[z0+2];		/* RS syndrome */
  
  	for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
  		wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
  		for ( z = z0-1 ; z >= 0 ; z-- ) {
  			wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
  			wp$$ = vec_xor(wp$$, wd$$);
  			w2$$ = MASK(wq$$);
  			w1$$ = SHLBYTE(wq$$);
  			w2$$ = vec_and(w2$$, x1d);
  			w1$$ = vec_xor(w1$$, w2$$);
  			wq$$ = vec_xor(w1$$, wd$$);
  		}
  		*(unative_t *)&p[d+NSIZE*$$] = wp$$;
  		*(unative_t *)&q[d+NSIZE*$$] = wq$$;
  	}
  }
  
  static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
  {
  	preempt_disable();
  	enable_kernel_altivec();
  
  	raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs);
  
  	preempt_enable();
  }
  
  int raid6_have_altivec(void);
  #if $# == 1
  int raid6_have_altivec(void)
  {
  	/* This assumes either all CPUs have Altivec or none does */
  # ifdef __KERNEL__
  	return cpu_has_feature(CPU_FTR_ALTIVEC);
  # else
  	return 1;
  # endif
  }
  #endif
  
  const struct raid6_calls raid6_altivec$# = {
  	raid6_altivec$#_gen_syndrome,
  	raid6_have_altivec,
  	"altivecx$#",
  	0
  };
  
  #endif /* CONFIG_ALTIVEC */