crc32fast/specialized/
pclmulqdq.rs
1#[cfg(target_arch = "x86")]
2use core::arch::x86 as arch;
3#[cfg(target_arch = "x86_64")]
4use core::arch::x86_64 as arch;
5
6#[derive(Clone)]
7pub struct State {
8 state: u32,
9}
10
11impl State {
12 #[cfg(not(feature = "std"))]
13 pub fn new(state: u32) -> Option<Self> {
14 if cfg!(target_feature = "pclmulqdq")
15 && cfg!(target_feature = "sse2")
16 && cfg!(target_feature = "sse4.1")
17 {
18 Some(Self { state })
21 } else {
22 None
23 }
24 }
25
26 #[cfg(feature = "std")]
27 pub fn new(state: u32) -> Option<Self> {
28 if is_x86_feature_detected!("pclmulqdq")
29 && is_x86_feature_detected!("sse2")
30 && is_x86_feature_detected!("sse4.1")
31 {
32 Some(Self { state })
35 } else {
36 None
37 }
38 }
39
40 pub fn update(&mut self, buf: &[u8]) {
41 self.state = unsafe { calculate(self.state, buf) }
44 }
45
46 pub fn finalize(self) -> u32 {
47 self.state
48 }
49
50 pub fn reset(&mut self) {
51 self.state = 0;
52 }
53
54 pub fn combine(&mut self, other: u32, amount: u64) {
55 self.state = ::combine::combine(self.state, other, amount);
56 }
57}
58
59const K1: i64 = 0x154442bd4;
60const K2: i64 = 0x1c6e41596;
61const K3: i64 = 0x1751997d0;
62const K4: i64 = 0x0ccaa009e;
63const K5: i64 = 0x163cd6124;
64
65const P_X: i64 = 0x1DB710641;
66const U_PRIME: i64 = 0x1F7011641;
67
68#[cfg(feature = "std")]
69unsafe fn debug(s: &str, a: arch::__m128i) -> arch::__m128i {
70 if false {
71 union A {
72 a: arch::__m128i,
73 b: [u8; 16],
74 }
75 let x = A { a }.b;
76 print!(" {:20} | ", s);
77 for x in x.iter() {
78 print!("{:02x} ", x);
79 }
80 println!();
81 }
82 return a;
83}
84
85#[cfg(not(feature = "std"))]
86unsafe fn debug(_s: &str, a: arch::__m128i) -> arch::__m128i {
87 a
88}
89
90#[target_feature(enable = "pclmulqdq", enable = "sse2", enable = "sse4.1")]
91unsafe fn calculate(crc: u32, mut data: &[u8]) -> u32 {
92 if data.len() < 128 {
96 return ::baseline::update_fast_16(crc, data);
97 }
98
99 let mut x3 = get(&mut data);
101 let mut x2 = get(&mut data);
102 let mut x1 = get(&mut data);
103 let mut x0 = get(&mut data);
104
105 x3 = arch::_mm_xor_si128(x3, arch::_mm_cvtsi32_si128(!crc as i32));
107
108 let k1k2 = arch::_mm_set_epi64x(K2, K1);
109 while data.len() >= 64 {
110 x3 = reduce128(x3, get(&mut data), k1k2);
111 x2 = reduce128(x2, get(&mut data), k1k2);
112 x1 = reduce128(x1, get(&mut data), k1k2);
113 x0 = reduce128(x0, get(&mut data), k1k2);
114 }
115
116 let k3k4 = arch::_mm_set_epi64x(K4, K3);
117 let mut x = reduce128(x3, x2, k3k4);
118 x = reduce128(x, x1, k3k4);
119 x = reduce128(x, x0, k3k4);
120
121 while data.len() >= 16 {
123 x = reduce128(x, get(&mut data), k3k4);
124 }
125
126 debug("128 > 64 init", x);
127
128 let x = arch::_mm_xor_si128(
147 arch::_mm_clmulepi64_si128(x, k3k4, 0x10),
148 arch::_mm_srli_si128(x, 8),
149 );
150 let x = arch::_mm_xor_si128(
151 arch::_mm_clmulepi64_si128(
152 arch::_mm_and_si128(x, arch::_mm_set_epi32(0, 0, 0, !0)),
153 arch::_mm_set_epi64x(0, K5),
154 0x00,
155 ),
156 arch::_mm_srli_si128(x, 4),
157 );
158 debug("128 > 64 xx", x);
159
160 let pu = arch::_mm_set_epi64x(U_PRIME, P_X);
164
165 let t1 = arch::_mm_clmulepi64_si128(
167 arch::_mm_and_si128(x, arch::_mm_set_epi32(0, 0, 0, !0)),
168 pu,
169 0x10,
170 );
171 let t2 = arch::_mm_clmulepi64_si128(
173 arch::_mm_and_si128(t1, arch::_mm_set_epi32(0, 0, 0, !0)),
174 pu,
175 0x00,
176 );
177 let c = arch::_mm_extract_epi32(arch::_mm_xor_si128(x, t2), 1) as u32;
182
183 if !data.is_empty() {
184 ::baseline::update_fast_16(!c, data)
185 } else {
186 !c
187 }
188}
189
190unsafe fn reduce128(a: arch::__m128i, b: arch::__m128i, keys: arch::__m128i) -> arch::__m128i {
191 let t1 = arch::_mm_clmulepi64_si128(a, keys, 0x00);
192 let t2 = arch::_mm_clmulepi64_si128(a, keys, 0x11);
193 arch::_mm_xor_si128(arch::_mm_xor_si128(b, t1), t2)
194}
195
196unsafe fn get(a: &mut &[u8]) -> arch::__m128i {
197 debug_assert!(a.len() >= 16);
198 let r = arch::_mm_loadu_si128(a.as_ptr() as *const arch::__m128i);
199 *a = &a[16..];
200 return r;
201}
202
203#[cfg(test)]
204mod test {
205 quickcheck! {
206 fn check_against_baseline(init: u32, chunks: Vec<(Vec<u8>, usize)>) -> bool {
207 let mut baseline = super::super::super::baseline::State::new(init);
208 let mut pclmulqdq = super::State::new(init).expect("not supported");
209 for (chunk, mut offset) in chunks {
210 offset &= 0xF;
212 if chunk.len() <= offset {
213 baseline.update(&chunk);
214 pclmulqdq.update(&chunk);
215 } else {
216 baseline.update(&chunk[offset..]);
217 pclmulqdq.update(&chunk[offset..]);
218 }
219 }
220 pclmulqdq.finalize() == baseline.finalize()
221 }
222 }
223}