crc32fast/specialized/
pclmulqdq.rs

1#[cfg(target_arch = "x86")]
2use core::arch::x86 as arch;
3#[cfg(target_arch = "x86_64")]
4use core::arch::x86_64 as arch;
5
6#[derive(Clone)]
7pub struct State {
8    state: u32,
9}
10
11impl State {
12    #[cfg(not(feature = "std"))]
13    pub fn new(state: u32) -> Option<Self> {
14        if cfg!(target_feature = "pclmulqdq")
15            && cfg!(target_feature = "sse2")
16            && cfg!(target_feature = "sse4.1")
17        {
18            // SAFETY: The conditions above ensure that all
19            //         required instructions are supported by the CPU.
20            Some(Self { state })
21        } else {
22            None
23        }
24    }
25
26    #[cfg(feature = "std")]
27    pub fn new(state: u32) -> Option<Self> {
28        if is_x86_feature_detected!("pclmulqdq")
29            && is_x86_feature_detected!("sse2")
30            && is_x86_feature_detected!("sse4.1")
31        {
32            // SAFETY: The conditions above ensure that all
33            //         required instructions are supported by the CPU.
34            Some(Self { state })
35        } else {
36            None
37        }
38    }
39
40    pub fn update(&mut self, buf: &[u8]) {
41        // SAFETY: The `State::new` constructor ensures that all
42        //         required instructions are supported by the CPU.
43        self.state = unsafe { calculate(self.state, buf) }
44    }
45
46    pub fn finalize(self) -> u32 {
47        self.state
48    }
49
50    pub fn reset(&mut self) {
51        self.state = 0;
52    }
53
54    pub fn combine(&mut self, other: u32, amount: u64) {
55        self.state = ::combine::combine(self.state, other, amount);
56    }
57}
58
59const K1: i64 = 0x154442bd4;
60const K2: i64 = 0x1c6e41596;
61const K3: i64 = 0x1751997d0;
62const K4: i64 = 0x0ccaa009e;
63const K5: i64 = 0x163cd6124;
64
65const P_X: i64 = 0x1DB710641;
66const U_PRIME: i64 = 0x1F7011641;
67
68#[cfg(feature = "std")]
69unsafe fn debug(s: &str, a: arch::__m128i) -> arch::__m128i {
70    if false {
71        union A {
72            a: arch::__m128i,
73            b: [u8; 16],
74        }
75        let x = A { a }.b;
76        print!(" {:20} | ", s);
77        for x in x.iter() {
78            print!("{:02x} ", x);
79        }
80        println!();
81    }
82    return a;
83}
84
85#[cfg(not(feature = "std"))]
86unsafe fn debug(_s: &str, a: arch::__m128i) -> arch::__m128i {
87    a
88}
89
90#[target_feature(enable = "pclmulqdq", enable = "sse2", enable = "sse4.1")]
91unsafe fn calculate(crc: u32, mut data: &[u8]) -> u32 {
92    // In theory we can accelerate smaller chunks too, but for now just rely on
93    // the fallback implementation as it's too much hassle and doesn't seem too
94    // beneficial.
95    if data.len() < 128 {
96        return ::baseline::update_fast_16(crc, data);
97    }
98
99    // Step 1: fold by 4 loop
100    let mut x3 = get(&mut data);
101    let mut x2 = get(&mut data);
102    let mut x1 = get(&mut data);
103    let mut x0 = get(&mut data);
104
105    // fold in our initial value, part of the incremental crc checksum
106    x3 = arch::_mm_xor_si128(x3, arch::_mm_cvtsi32_si128(!crc as i32));
107
108    let k1k2 = arch::_mm_set_epi64x(K2, K1);
109    while data.len() >= 64 {
110        x3 = reduce128(x3, get(&mut data), k1k2);
111        x2 = reduce128(x2, get(&mut data), k1k2);
112        x1 = reduce128(x1, get(&mut data), k1k2);
113        x0 = reduce128(x0, get(&mut data), k1k2);
114    }
115
116    let k3k4 = arch::_mm_set_epi64x(K4, K3);
117    let mut x = reduce128(x3, x2, k3k4);
118    x = reduce128(x, x1, k3k4);
119    x = reduce128(x, x0, k3k4);
120
121    // Step 2: fold by 1 loop
122    while data.len() >= 16 {
123        x = reduce128(x, get(&mut data), k3k4);
124    }
125
126    debug("128 > 64 init", x);
127
128    // Perform step 3, reduction from 128 bits to 64 bits. This is
129    // significantly different from the paper and basically doesn't follow it
130    // at all. It's not really clear why, but implementations of this algorithm
131    // in Chrome/Linux diverge in the same way. It is beyond me why this is
132    // different than the paper, maybe the paper has like errata or something?
133    // Unclear.
134    //
135    // It's also not clear to me what's actually happening here and/or why, but
136    // algebraically what's happening is:
137    //
138    // x = (x[0:63] • K4) ^ x[64:127]           // 96 bit result
139    // x = ((x[0:31] as u64) • K5) ^ x[32:95]   // 64 bit result
140    //
141    // It's... not clear to me what's going on here. The paper itself is pretty
142    // vague on this part but definitely uses different constants at least.
143    // It's not clear to me, reading the paper, where the xor operations are
144    // happening or why things are shifting around. This implementation...
145    // appears to work though!
146    let x = arch::_mm_xor_si128(
147        arch::_mm_clmulepi64_si128(x, k3k4, 0x10),
148        arch::_mm_srli_si128(x, 8),
149    );
150    let x = arch::_mm_xor_si128(
151        arch::_mm_clmulepi64_si128(
152            arch::_mm_and_si128(x, arch::_mm_set_epi32(0, 0, 0, !0)),
153            arch::_mm_set_epi64x(0, K5),
154            0x00,
155        ),
156        arch::_mm_srli_si128(x, 4),
157    );
158    debug("128 > 64 xx", x);
159
160    // Perform a Barrett reduction from our now 64 bits to 32 bits. The
161    // algorithm for this is described at the end of the paper, and note that
162    // this also implements the "bit reflected input" variant.
163    let pu = arch::_mm_set_epi64x(U_PRIME, P_X);
164
165    // T1(x) = ⌊(R(x) % x^32)⌋ • μ
166    let t1 = arch::_mm_clmulepi64_si128(
167        arch::_mm_and_si128(x, arch::_mm_set_epi32(0, 0, 0, !0)),
168        pu,
169        0x10,
170    );
171    // T2(x) = ⌊(T1(x) % x^32)⌋ • P(x)
172    let t2 = arch::_mm_clmulepi64_si128(
173        arch::_mm_and_si128(t1, arch::_mm_set_epi32(0, 0, 0, !0)),
174        pu,
175        0x00,
176    );
177    // We're doing the bit-reflected variant, so get the upper 32-bits of the
178    // 64-bit result instead of the lower 32-bits.
179    //
180    // C(x) = R(x) ^ T2(x) / x^32
181    let c = arch::_mm_extract_epi32(arch::_mm_xor_si128(x, t2), 1) as u32;
182
183    if !data.is_empty() {
184        ::baseline::update_fast_16(!c, data)
185    } else {
186        !c
187    }
188}
189
190unsafe fn reduce128(a: arch::__m128i, b: arch::__m128i, keys: arch::__m128i) -> arch::__m128i {
191    let t1 = arch::_mm_clmulepi64_si128(a, keys, 0x00);
192    let t2 = arch::_mm_clmulepi64_si128(a, keys, 0x11);
193    arch::_mm_xor_si128(arch::_mm_xor_si128(b, t1), t2)
194}
195
196unsafe fn get(a: &mut &[u8]) -> arch::__m128i {
197    debug_assert!(a.len() >= 16);
198    let r = arch::_mm_loadu_si128(a.as_ptr() as *const arch::__m128i);
199    *a = &a[16..];
200    return r;
201}
202
203#[cfg(test)]
204mod test {
205    quickcheck! {
206        fn check_against_baseline(init: u32, chunks: Vec<(Vec<u8>, usize)>) -> bool {
207            let mut baseline = super::super::super::baseline::State::new(init);
208            let mut pclmulqdq = super::State::new(init).expect("not supported");
209            for (chunk, mut offset) in chunks {
210                // simulate random alignments by offsetting the slice by up to 15 bytes
211                offset &= 0xF;
212                if chunk.len() <= offset {
213                    baseline.update(&chunk);
214                    pclmulqdq.update(&chunk);
215                } else {
216                    baseline.update(&chunk[offset..]);
217                    pclmulqdq.update(&chunk[offset..]);
218                }
219            }
220            pclmulqdq.finalize() == baseline.finalize()
221        }
222    }
223}