sha2/sha512/
x86_avx2.rs

1//! SHA-512 `x86`/`x86_64` backend
2
3#![allow(clippy::many_single_char_names, unsafe_op_in_unsafe_fn)]
4
5#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
6compile_error!("x86-avx2 backend can be used only on x86 and x86_64 target arches");
7
8use core::mem::size_of;
9
10#[cfg(target_arch = "x86")]
11use core::arch::x86::*;
12#[cfg(target_arch = "x86_64")]
13use core::arch::x86_64::*;
14
15use crate::consts::K64;
16
17#[target_feature(enable = "avx2")]
18pub(super) unsafe fn compress(state: &mut [u64; 8], blocks: &[[u8; 128]]) {
19    let mut start_block = 0;
20
21    if blocks.len() & 0b1 != 0 {
22        sha512_compress_x86_64_avx(state, &blocks[0]);
23        start_block += 1;
24    }
25
26    let mut ms: MsgSchedule = [_mm_setzero_si128(); 8];
27    let mut t2: RoundStates = [_mm_setzero_si128(); 40];
28    let mut x = [_mm256_setzero_si256(); 8];
29
30    for i in (start_block..blocks.len()).step_by(2) {
31        load_data_avx2(&mut x, &mut ms, &mut t2, blocks.as_ptr().add(i).cast());
32
33        // First block
34        let mut current_state = *state;
35        rounds_0_63_avx2(&mut current_state, &mut x, &mut ms, &mut t2);
36        rounds_64_79(&mut current_state, &ms);
37        accumulate_state(state, &current_state);
38
39        // Second block
40        current_state = *state;
41        process_second_block(&mut current_state, &t2);
42        accumulate_state(state, &current_state);
43    }
44}
45
46#[inline(always)]
47unsafe fn sha512_compress_x86_64_avx(state: &mut [u64; 8], block: &[u8; 128]) {
48    let mut ms = [_mm_setzero_si128(); 8];
49    let mut x = [_mm_setzero_si128(); 8];
50
51    // Reduced to single iteration
52    let mut current_state = *state;
53    load_data_avx(&mut x, &mut ms, block.as_ptr().cast());
54    rounds_0_63_avx(&mut current_state, &mut x, &mut ms);
55    rounds_64_79(&mut current_state, &ms);
56    accumulate_state(state, &current_state);
57}
58
59#[inline(always)]
60unsafe fn load_data_avx(x: &mut [__m128i; 8], ms: &mut MsgSchedule, data: *const __m128i) {
61    #[allow(non_snake_case)]
62    let MASK = _mm_setr_epi32(0x04050607, 0x00010203, 0x0c0d0e0f, 0x08090a0b);
63
64    macro_rules! unrolled_iterations {
65        ($($i:literal),*) => {$(
66            x[$i] = _mm_loadu_si128(data.add($i).cast());
67            x[$i] = _mm_shuffle_epi8(x[$i], MASK);
68
69            let y = _mm_add_epi64(
70                x[$i],
71                _mm_loadu_si128(K64.as_ptr().add(2 * $i).cast()),
72            );
73
74            ms[$i] = y;
75        )*};
76    }
77
78    unrolled_iterations!(0, 1, 2, 3, 4, 5, 6, 7);
79}
80
81#[inline(always)]
82unsafe fn load_data_avx2(
83    x: &mut [__m256i; 8],
84    ms: &mut MsgSchedule,
85    t2: &mut RoundStates,
86    data: *const __m128i,
87) {
88    #[allow(non_snake_case)]
89    let MASK = _mm256_set_epi64x(
90        0x0809_0A0B_0C0D_0E0F_i64,
91        0x0001_0203_0405_0607_i64,
92        0x0809_0A0B_0C0D_0E0F_i64,
93        0x0001_0203_0405_0607_i64,
94    );
95
96    macro_rules! unrolled_iterations {
97        ($($i:literal),*) => {$(
98            x[$i] = _mm256_insertf128_si256(x[$i], _mm_loadu_si128(data.add(8 + $i).cast()), 1);
99            x[$i] = _mm256_insertf128_si256(x[$i], _mm_loadu_si128(data.add($i).cast()), 0);
100
101            x[$i] = _mm256_shuffle_epi8(x[$i], MASK);
102
103            let t = _mm_loadu_si128(K64.as_ptr().add($i * 2).cast());
104            let y = _mm256_add_epi64(x[$i], _mm256_set_m128i(t, t));
105
106            ms[$i] = _mm256_extracti128_si256(y, 0);
107            t2[$i] = _mm256_extracti128_si256(y, 1);
108        )*};
109    }
110
111    unrolled_iterations!(0, 1, 2, 3, 4, 5, 6, 7);
112}
113
114#[inline(always)]
115unsafe fn rounds_0_63_avx(current_state: &mut State, x: &mut [__m128i; 8], ms: &mut MsgSchedule) {
116    let mut k64_idx: usize = SHA512_BLOCK_WORDS_NUM;
117
118    for _ in 0..4 {
119        for j in 0..8 {
120            let k64 = _mm_loadu_si128(K64.as_ptr().add(k64_idx).cast());
121            let y = sha512_update_x_avx(x, k64);
122
123            {
124                let ms = cast_ms(ms);
125                sha_round(current_state, ms[2 * j]);
126                sha_round(current_state, ms[2 * j + 1]);
127            }
128
129            ms[j] = y;
130            k64_idx += 2;
131        }
132    }
133}
134
135#[inline(always)]
136unsafe fn rounds_0_63_avx2(
137    current_state: &mut State,
138    x: &mut [__m256i; 8],
139    ms: &mut MsgSchedule,
140    t2: &mut RoundStates,
141) {
142    let mut k64x4_idx: usize = SHA512_BLOCK_WORDS_NUM;
143
144    for i in 1..5 {
145        for j in 0..8 {
146            let t = _mm_loadu_si128(K64.as_ptr().add(k64x4_idx).cast());
147            let y = sha512_update_x_avx2(x, _mm256_set_m128i(t, t));
148
149            {
150                let ms = cast_ms(ms);
151                sha_round(current_state, ms[2 * j]);
152                sha_round(current_state, ms[2 * j + 1]);
153            }
154
155            ms[j] = _mm256_extracti128_si256(y, 0);
156            t2[8 * i + j] = _mm256_extracti128_si256(y, 1);
157
158            k64x4_idx += 2;
159        }
160    }
161}
162
163#[inline(always)]
164fn rounds_64_79(current_state: &mut State, ms: &MsgSchedule) {
165    let ms = cast_ms(ms);
166    for i in 64..80 {
167        sha_round(current_state, ms[i & 0xf]);
168    }
169}
170
171#[inline(always)]
172fn process_second_block(current_state: &mut State, t2: &RoundStates) {
173    for t2 in cast_rs(t2).iter() {
174        sha_round(current_state, *t2);
175    }
176}
177
178#[inline(always)]
179fn sha_round(s: &mut State, x: u64) {
180    macro_rules! big_sigma0 {
181        ($a:expr) => {
182            $a.rotate_right(28) ^ $a.rotate_right(34) ^ $a.rotate_right(39)
183        };
184    }
185    macro_rules! big_sigma1 {
186        ($a:expr) => {
187            $a.rotate_right(14) ^ $a.rotate_right(18) ^ $a.rotate_right(41)
188        };
189    }
190    macro_rules! bool3ary_202 {
191        ($a:expr, $b:expr, $c:expr) => {
192            $c ^ ($a & ($b ^ $c))
193        };
194    } // Choose, MD5F, SHA1C
195    macro_rules! bool3ary_232 {
196        ($a:expr, $b:expr, $c:expr) => {
197            ($a & $b) ^ ($a & $c) ^ ($b & $c)
198        };
199    } // Majority, SHA1M
200
201    macro_rules! rotate_state {
202        ($s:ident) => {{
203            let tmp = $s[7];
204            $s[7] = $s[6];
205            $s[6] = $s[5];
206            $s[5] = $s[4];
207            $s[4] = $s[3];
208            $s[3] = $s[2];
209            $s[2] = $s[1];
210            $s[1] = $s[0];
211            $s[0] = tmp;
212        }};
213    }
214
215    let t = x
216        .wrapping_add(s[7])
217        .wrapping_add(big_sigma1!(s[4]))
218        .wrapping_add(bool3ary_202!(s[4], s[5], s[6]));
219
220    s[7] = t
221        .wrapping_add(big_sigma0!(s[0]))
222        .wrapping_add(bool3ary_232!(s[0], s[1], s[2]));
223    s[3] = s[3].wrapping_add(t);
224
225    rotate_state!(s);
226}
227
228#[inline(always)]
229fn accumulate_state(dst: &mut State, src: &State) {
230    for i in 0..SHA512_HASH_WORDS_NUM {
231        dst[i] = dst[i].wrapping_add(src[i]);
232    }
233}
234
235macro_rules! fn_sha512_update_x {
236    ($name:ident, $ty:ident, {
237        ADD64 = $ADD64:ident,
238        ALIGNR8 = $ALIGNR8:ident,
239        SRL64 = $SRL64:ident,
240        SLL64 = $SLL64:ident,
241        XOR = $XOR:ident,
242    }) => {
243        unsafe fn $name(x: &mut [$ty; 8], k64: $ty) -> $ty {
244            // q[2:1]
245            let mut t0 = $ALIGNR8(x[1], x[0], 8);
246            // q[10:9]
247            let mut t3 = $ALIGNR8(x[5], x[4], 8);
248            // q[2:1] >> s0[0]
249            let mut t2 = $SRL64(t0, 1);
250            // q[1:0] + q[10:9]
251            x[0] = $ADD64(x[0], t3);
252            // q[2:1] >> s0[2]
253            t3 = $SRL64(t0, 7);
254            // q[2:1] << (64 - s0[1])
255            let mut t1 = $SLL64(t0, 64 - 8);
256            // (q[2:1] >> s0[2]) ^
257            // (q[2:1] >> s0[0])
258            t0 = $XOR(t3, t2);
259            // q[2:1] >> s0[1]
260            t2 = $SRL64(t2, 8 - 1);
261            // (q[2:1] >> s0[2]) ^
262            // (q[2:1] >> s0[0]) ^
263            // q[2:1] << (64 - s0[1])
264            t0 = $XOR(t0, t1);
265            // q[2:1] << (64 - s0[0])
266            t1 = $SLL64(t1, 8 - 1);
267            // sigma1(q[2:1])
268            t0 = $XOR(t0, t2);
269            t0 = $XOR(t0, t1);
270            // q[15:14] >> s1[2]
271            t3 = $SRL64(x[7], 6);
272            // q[15:14] >> (64 - s1[1])
273            t2 = $SLL64(x[7], 64 - 61);
274            // q[1:0] + sigma0(q[2:1])
275            x[0] = $ADD64(x[0], t0);
276            // q[15:14] >> s1[0]
277            t1 = $SRL64(x[7], 19);
278            // q[15:14] >> s1[2] ^
279            // q[15:14] >> (64 - s1[1])
280            t3 = $XOR(t3, t2);
281            // q[15:14] >> (64 - s1[0])
282            t2 = $SLL64(t2, 61 - 19);
283            // q[15:14] >> s1[2] ^
284            // q[15:14] >> (64 - s1[1] ^
285            // q[15:14] >> s1[0]
286            t3 = $XOR(t3, t1);
287            // q[15:14] >> s1[1]
288            t1 = $SRL64(t1, 61 - 19);
289            // sigma1(q[15:14])
290            t3 = $XOR(t3, t2);
291            t3 = $XOR(t3, t1);
292
293            // q[1:0] + q[10:9] + sigma1(q[15:14]) + sigma0(q[2:1])
294            x[0] = $ADD64(x[0], t3);
295
296            // rotate
297            let temp = x[0];
298            x[0] = x[1];
299            x[1] = x[2];
300            x[2] = x[3];
301            x[3] = x[4];
302            x[4] = x[5];
303            x[5] = x[6];
304            x[6] = x[7];
305            x[7] = temp;
306
307            $ADD64(x[7], k64)
308        }
309    };
310}
311
312fn_sha512_update_x!(sha512_update_x_avx, __m128i, {
313        ADD64 = _mm_add_epi64,
314        ALIGNR8 = _mm_alignr_epi8,
315        SRL64 = _mm_srli_epi64,
316        SLL64 = _mm_slli_epi64,
317        XOR = _mm_xor_si128,
318});
319
320fn_sha512_update_x!(sha512_update_x_avx2, __m256i, {
321        ADD64 = _mm256_add_epi64,
322        ALIGNR8 = _mm256_alignr_epi8,
323        SRL64 = _mm256_srli_epi64,
324        SLL64 = _mm256_slli_epi64,
325        XOR = _mm256_xor_si256,
326});
327
328#[inline(always)]
329fn cast_ms(ms: &MsgSchedule) -> &[u64; SHA512_BLOCK_WORDS_NUM] {
330    unsafe { &*(ms.as_ptr().cast()) }
331}
332
333#[inline(always)]
334fn cast_rs(rs: &RoundStates) -> &[u64; SHA512_ROUNDS_NUM] {
335    unsafe { &*(rs.as_ptr().cast()) }
336}
337
338type State = [u64; SHA512_HASH_WORDS_NUM];
339type MsgSchedule = [__m128i; SHA512_BLOCK_WORDS_NUM / 2];
340type RoundStates = [__m128i; SHA512_ROUNDS_NUM / 2];
341
342const SHA512_BLOCK_BYTE_LEN: usize = 128;
343const SHA512_ROUNDS_NUM: usize = 80;
344const SHA512_HASH_BYTE_LEN: usize = 64;
345const SHA512_HASH_WORDS_NUM: usize = SHA512_HASH_BYTE_LEN / size_of::<u64>();
346const SHA512_BLOCK_WORDS_NUM: usize = SHA512_BLOCK_BYTE_LEN / size_of::<u64>();