From 312a92b5104fd5e9090bc079f458f9105a91e1ea Mon Sep 17 00:00:00 2001 From: Mattias Andrée Date: Fri, 8 Jul 2022 13:23:00 +0200 Subject: Use SHA extensions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Mattias Andrée --- process.c | 250 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 244 insertions(+), 6 deletions(-) (limited to 'process.c') diff --git a/process.c b/process.c index a309628..d7d6ab8 100644 --- a/process.c +++ b/process.c @@ -1,5 +1,15 @@ /* See LICENSE file for copyright and license details. */ #include "common.h" +#include + +#if defined(__SSE4_1__) && defined(__SSSE3__) && defined(__SSE2__) && defined(__SHA__) +# define HAVE_X86_SHA_INTRINSICS +#endif + + +#ifdef HAVE_X86_SHA_INTRINSICS +# include +#endif /** @@ -58,9 +68,224 @@ h[i] = TRUNC(h[i] + work_h[i]); -void -libsha2_process(struct libsha2_state *restrict state, const unsigned char *restrict chunk) +#ifdef HAVE_X86_SHA_INTRINSICS + +static size_t +process_x86_sha256(struct libsha2_state *restrict state, const unsigned char *restrict data, size_t len) +{ + const __m128i SHUFFLE_MASK = _mm_set_epi64x(0x0C0D0E0F08090A0BULL, 0x0405060700010203ULL); + register __m128i temp, s0, s1, msg, msg0, msg1, msg2, msg3; + __m128i abef_orig, cdgh_orig; + const unsigned char *restrict chunk; + size_t off = 0; + + temp = _mm_shuffle_epi32(_mm_loadu_si128((const __m128i *)&state->h.b32[0]), 0xB1); + s1 = _mm_shuffle_epi32(_mm_loadu_si128((const __m128i *)&state->h.b32[4]), 0x1B); + s0 = _mm_alignr_epi8(temp, s1, 8); + s1 = _mm_blend_epi16(s1, temp, 0xF0); + + for (; len - off >= state->chunk_size; off += state->chunk_size) { + chunk = &data[off]; + + abef_orig = s0; + cdgh_orig = s1; + + msg = _mm_loadu_si128((const __m128i *)&chunk[0]); + msg0 = _mm_shuffle_epi8(msg, SHUFFLE_MASK); + msg = _mm_add_epi32(msg0, _mm_set_epi64x(0xE9B5DBA5B5C0FBCFULL, 0x71374491428A2F98ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + + msg1 = _mm_loadu_si128((const __m128i *)&chunk[16]); + msg1 = _mm_shuffle_epi8(msg1, SHUFFLE_MASK); + msg = _mm_add_epi32(msg1, _mm_set_epi64x(0xAB1C5ED5923F82A4ULL, 0x59F111F13956C25BULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + msg = _mm_shuffle_epi32(msg, 0x0E); + msg0 = _mm_sha256msg1_epu32(msg0, msg1); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + + msg2 = _mm_loadu_si128((const __m128i *)&chunk[32]); + msg2 = _mm_shuffle_epi8(msg2, SHUFFLE_MASK); + msg = _mm_add_epi32(msg2, _mm_set_epi64x(0x550C7DC3243185BEULL, 0x12835B01D807AA98ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg1 = _mm_sha256msg1_epu32(msg1, msg2); + + msg3 = _mm_loadu_si128((const __m128i *)&chunk[48]); + msg3 = _mm_shuffle_epi8(msg3, SHUFFLE_MASK); + msg = _mm_add_epi32(msg3, _mm_set_epi64x(0xC19BF1749BDC06A7ULL, 0x80DEB1FE72BE5D74ULL)); + temp = _mm_alignr_epi8(msg3, msg2, 4); + msg0 = _mm_add_epi32(msg0, temp); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + msg = _mm_shuffle_epi32(msg, 0x0E); + msg0 = _mm_sha256msg2_epu32(msg0, msg3); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg2 = _mm_sha256msg1_epu32(msg2, msg3); + + msg = _mm_add_epi32(msg0, _mm_set_epi64x(0x240CA1CC0FC19DC6ULL, 0xEFBE4786E49B69C1ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg0, msg3, 4); + msg1 = _mm_add_epi32(msg1, temp); + msg = _mm_shuffle_epi32(msg, 0x0E); + msg1 = _mm_sha256msg2_epu32(msg1, msg0); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg3 = _mm_sha256msg1_epu32(msg3, msg0); + + msg = _mm_add_epi32(msg1, _mm_set_epi64x(0x76F988DA5CB0A9DCULL, 0x4A7484AA2DE92C6FULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg1, msg0, 4); + msg2 = _mm_add_epi32(msg2, temp); + msg2 = _mm_sha256msg2_epu32(msg2, msg1); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg0 = _mm_sha256msg1_epu32(msg0, msg1); + + msg = _mm_add_epi32(msg2, _mm_set_epi64x(0xBF597FC7B00327C8ULL, 0xA831C66D983E5152ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg2, msg1, 4); + msg3 = _mm_add_epi32(msg3, temp); + msg3 = _mm_sha256msg2_epu32(msg3, msg2); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg1 = _mm_sha256msg1_epu32(msg1, msg2); + + msg = _mm_add_epi32(msg3, _mm_set_epi64x(0x1429296706CA6351ULL, 0xD5A79147C6E00BF3ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg3, msg2, 4); + msg0 = _mm_add_epi32(msg0, temp); + msg0 = _mm_sha256msg2_epu32(msg0, msg3); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg2 = _mm_sha256msg1_epu32(msg2, msg3); + + msg = _mm_add_epi32(msg0, _mm_set_epi64x(0x53380D134D2C6DFCULL, 0x2E1B213827B70A85ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg0, msg3, 4); + msg1 = _mm_add_epi32(msg1, temp); + msg1 = _mm_sha256msg2_epu32(msg1, msg0); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg3 = _mm_sha256msg1_epu32(msg3, msg0); + + msg = _mm_add_epi32(msg1, _mm_set_epi64x(0x92722C8581C2C92EULL, 0x766A0ABB650A7354ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg1, msg0, 4); + msg2 = _mm_add_epi32(msg2, temp); + msg2 = _mm_sha256msg2_epu32(msg2, msg1); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg0 = _mm_sha256msg1_epu32(msg0, msg1); + + msg = _mm_add_epi32(msg2, _mm_set_epi64x(0xC76C51A3C24B8B70ULL, 0xA81A664BA2BFE8A1ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg2, msg1, 4); + msg3 = _mm_add_epi32(msg3, temp); + msg3 = _mm_sha256msg2_epu32(msg3, msg2); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg1 = _mm_sha256msg1_epu32(msg1, msg2); + + msg = _mm_add_epi32(msg3, _mm_set_epi64x(0x106AA070F40E3585ULL, 0xD6990624D192E819ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg3, msg2, 4); + msg0 = _mm_add_epi32(msg0, temp); + msg0 = _mm_sha256msg2_epu32(msg0, msg3); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg2 = _mm_sha256msg1_epu32(msg2, msg3); + + msg = _mm_add_epi32(msg0, _mm_set_epi64x(0x34B0BCB52748774CULL, 0x1E376C0819A4C116ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg0, msg3, 4); + msg1 = _mm_add_epi32(msg1, temp); + msg1 = _mm_sha256msg2_epu32(msg1, msg0); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg3 = _mm_sha256msg1_epu32(msg3, msg0); + + msg = _mm_add_epi32(msg1, _mm_set_epi64x(0x682E6FF35B9CCA4FULL, 0x4ED8AA4A391C0CB3ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg1, msg0, 4); + msg2 = _mm_add_epi32(msg2, temp); + msg2 = _mm_sha256msg2_epu32(msg2, msg1); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + + msg = _mm_add_epi32(msg2, _mm_set_epi64x(0x8CC7020884C87814ULL, 0x78A5636F748F82EEULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg2, msg1, 4); + msg3 = _mm_add_epi32(msg3, temp); + msg3 = _mm_sha256msg2_epu32(msg3, msg2); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + + msg = _mm_add_epi32(msg3, _mm_set_epi64x(0xC67178F2BEF9A3F7ULL, 0xA4506CEB90BEFFFAULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + + s0 = _mm_add_epi32(s0, abef_orig); + s1 = _mm_add_epi32(s1, cdgh_orig); + } + + temp = _mm_shuffle_epi32(s0, 0x1B); + s1 = _mm_shuffle_epi32(s1, 0xB1); + s0 = _mm_blend_epi16(temp, s1, 0xF0); + s1 = _mm_alignr_epi8(s1, temp, 8); + + _mm_storeu_si128((__m128i *)&state->h.b32[0], s0); + _mm_storeu_si128((__m128i *)&state->h.b32[4], s1); + + return off; +} + +# if defined(__GNUC__) +__attribute__((__constructor__)) +# endif +static int +have_sha_intrinsics(void) { + static volatile int ret = -1; + static volatile atomic_flag spinlock = ATOMIC_FLAG_INIT; + + if (ret != -1) + return ret; + + while (atomic_flag_test_and_set(&spinlock)); + + if (ret != -1) + goto out; + + int a = 7, b, c = 0, d; + __asm__ volatile("cpuid" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "a"(a), "c"(c)); + if (!(b & (1 << 29))) { + ret = 0; + goto out; + } + a = 1; + __asm__ volatile("cpuid" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "a"(a), "c"(c)); + if (!(c & (1 << 19)) || !(c & (1 << 0)) || !(d & (1 << 26))) { + ret = 0; + goto out; + } + ret = 1; + +out: + atomic_flag_clear(&spinlock); + return ret; +} + +#endif + + +size_t +libsha2_process(struct libsha2_state *restrict state, const unsigned char *restrict data, size_t len) +{ + const unsigned char *restrict chunk; + size_t off = 0; + if (state->algorithm <= LIBSHA2_256) { uint_least32_t s0, s1; size_t i, j; @@ -71,8 +296,16 @@ libsha2_process(struct libsha2_state *restrict state, const unsigned char *restr #endif #define ROTR(X, N) TRUNC32(((X) >> (N)) | ((X) << (32 - (N)))) - SHA2_IMPLEMENTATION(chunk, 7, 18, 3, 17, 19, 10, 6, 11, 25, 2, 13, 22, uint_least32_t, - 4, TRUNC32, state->k.b32, state->w.b32, state->h.b32, state->work_h.b32); +#ifdef HAVE_X86_SHA_INTRINSICS + if (have_sha_intrinsics()) + return process_x86_sha256(state, data, len); +#endif + + for (; len - off >= state->chunk_size; off += state->chunk_size) { + chunk = &data[off]; + SHA2_IMPLEMENTATION(chunk, 7, 18, 3, 17, 19, 10, 6, 11, 25, 2, 13, 22, uint_least32_t, 4, + TRUNC32, state->k.b32, state->w.b32, state->h.b32, state->work_h.b32); + } #undef ROTR #if defined(__GNUC__) @@ -85,9 +318,14 @@ libsha2_process(struct libsha2_state *restrict state, const unsigned char *restr #define ROTR(X, N) TRUNC64(((X) >> (N)) | ((X) << (64 - (N)))) - SHA2_IMPLEMENTATION(chunk, 1, 8, 7, 19, 61, 6, 14, 18, 41, 28, 34, 39, uint_least64_t, - 8, TRUNC64, state->k.b64, state->w.b64, state->h.b64, state->work_h.b64); + for (; len - off >= state->chunk_size; off += state->chunk_size) { + chunk = &data[off]; + SHA2_IMPLEMENTATION(chunk, 1, 8, 7, 19, 61, 6, 14, 18, 41, 28, 34, 39, uint_least64_t, 8, + TRUNC64, state->k.b64, state->w.b64, state->h.b64, state->work_h.b64); + } #undef ROTR } + + return off; } -- cgit v1.2.3-70-g09d2