crc32fast/specialized/
pclmulqdq.rs1#[cfg(target_arch = "x86")]
11use core::arch::x86 as arch;
12#[cfg(target_arch = "x86_64")]
13use core::arch::x86_64 as arch;
14
15#[derive(Clone)]
16pub struct State {
17    state: u32,
18}
19
20impl State {
21    #[cfg(not(feature = "std"))]
22    pub fn new(state: u32) -> Option<Self> {
23        if cfg!(target_feature = "pclmulqdq")
24            && cfg!(target_feature = "sse2")
25            && cfg!(target_feature = "sse4.1")
26        {
27            Some(Self { state })
30        } else {
31            None
32        }
33    }
34
35    #[cfg(feature = "std")]
36    pub fn new(state: u32) -> Option<Self> {
37        if is_x86_feature_detected!("pclmulqdq")
38            && is_x86_feature_detected!("sse2")
39            && is_x86_feature_detected!("sse4.1")
40        {
41            Some(Self { state })
44        } else {
45            None
46        }
47    }
48
49    pub fn update(&mut self, buf: &[u8]) {
50        self.state = unsafe { calculate(self.state, buf) }
53    }
54
55    pub fn finalize(self) -> u32 {
56        self.state
57    }
58
59    pub fn reset(&mut self) {
60        self.state = 0;
61    }
62
63    pub fn combine(&mut self, other: u32, amount: u64) {
64        self.state = crate::combine::combine(self.state, other, amount);
65    }
66}
67
68const K1: i64 = 0x154442bd4;
69const K2: i64 = 0x1c6e41596;
70const K3: i64 = 0x1751997d0;
71const K4: i64 = 0x0ccaa009e;
72const K5: i64 = 0x163cd6124;
73
74const P_X: i64 = 0x1DB710641;
75const U_PRIME: i64 = 0x1F7011641;
76
77#[target_feature(enable = "pclmulqdq", enable = "sse2", enable = "sse4.1")]
78unsafe fn calculate(crc: u32, mut data: &[u8]) -> u32 {
79    if data.len() < 128 {
83        return crate::baseline::update_fast_16(crc, data);
84    }
85
86    let mut x3 = get(&mut data);
88    let mut x2 = get(&mut data);
89    let mut x1 = get(&mut data);
90    let mut x0 = get(&mut data);
91
92    x3 = arch::_mm_xor_si128(x3, arch::_mm_cvtsi32_si128(!crc as i32));
94
95    let k1k2 = arch::_mm_set_epi64x(K2, K1);
96    while data.len() >= 64 {
97        x3 = reduce128(x3, get(&mut data), k1k2);
98        x2 = reduce128(x2, get(&mut data), k1k2);
99        x1 = reduce128(x1, get(&mut data), k1k2);
100        x0 = reduce128(x0, get(&mut data), k1k2);
101    }
102
103    let k3k4 = arch::_mm_set_epi64x(K4, K3);
104    let mut x = reduce128(x3, x2, k3k4);
105    x = reduce128(x, x1, k3k4);
106    x = reduce128(x, x0, k3k4);
107
108    while data.len() >= 16 {
110        x = reduce128(x, get(&mut data), k3k4);
111    }
112
113    let x = arch::_mm_xor_si128(
132        arch::_mm_clmulepi64_si128(x, k3k4, 0x10),
133        arch::_mm_srli_si128(x, 8),
134    );
135    let x = arch::_mm_xor_si128(
136        arch::_mm_clmulepi64_si128(
137            arch::_mm_and_si128(x, arch::_mm_set_epi32(0, 0, 0, !0)),
138            arch::_mm_set_epi64x(0, K5),
139            0x00,
140        ),
141        arch::_mm_srli_si128(x, 4),
142    );
143
144    let pu = arch::_mm_set_epi64x(U_PRIME, P_X);
148
149    let t1 = arch::_mm_clmulepi64_si128(
151        arch::_mm_and_si128(x, arch::_mm_set_epi32(0, 0, 0, !0)),
152        pu,
153        0x10,
154    );
155    let t2 = arch::_mm_clmulepi64_si128(
157        arch::_mm_and_si128(t1, arch::_mm_set_epi32(0, 0, 0, !0)),
158        pu,
159        0x00,
160    );
161    let c = arch::_mm_extract_epi32(arch::_mm_xor_si128(x, t2), 1) as u32;
166
167    if !data.is_empty() {
168        crate::baseline::update_fast_16(!c, data)
169    } else {
170        !c
171    }
172}
173
174unsafe fn reduce128(a: arch::__m128i, b: arch::__m128i, keys: arch::__m128i) -> arch::__m128i {
175    let t1 = arch::_mm_clmulepi64_si128(a, keys, 0x00);
176    let t2 = arch::_mm_clmulepi64_si128(a, keys, 0x11);
177    arch::_mm_xor_si128(arch::_mm_xor_si128(b, t1), t2)
178}
179
180unsafe fn get(a: &mut &[u8]) -> arch::__m128i {
181    debug_assert!(a.len() >= 16);
182    let r = arch::_mm_loadu_si128(a.as_ptr() as *const arch::__m128i);
183    *a = &a[16..];
184    r
185}
186
187#[cfg(test)]
188mod test {
189    quickcheck::quickcheck! {
190        fn check_against_baseline(init: u32, chunks: Vec<(Vec<u8>, usize)>) -> bool {
191            let mut baseline = super::super::super::baseline::State::new(init);
192            let mut pclmulqdq = super::State::new(init).expect("not supported");
193            for (chunk, mut offset) in chunks {
194                offset &= 0xF;
196                if chunk.len() <= offset {
197                    baseline.update(&chunk);
198                    pclmulqdq.update(&chunk);
199                } else {
200                    baseline.update(&chunk[offset..]);
201                    pclmulqdq.update(&chunk[offset..]);
202                }
203            }
204            pclmulqdq.finalize() == baseline.finalize()
205        }
206    }
207}