diff options
Diffstat (limited to '')
-rw-r--r-- | common.h | 8 | ||||
-rw-r--r-- | config-coverage-gcc.mk | 2 | ||||
-rw-r--r-- | config.mk | 4 | ||||
-rw-r--r-- | digest.c | 4 | ||||
-rw-r--r-- | process.c | 250 | ||||
-rw-r--r-- | test.c | 35 | ||||
-rw-r--r-- | update.c | 14 |
7 files changed, 293 insertions, 24 deletions
@@ -35,10 +35,12 @@ /** * Process a chunk using SHA-2 * - * @param state The hashing state - * @param chunk The data to process + * @param state The hashing state + * @param data The data to process + * @param len The amount of available data + * @return The amount of data processed */ #if defined(__GNUC__) __attribute__((__nonnull__, __nothrow__)) #endif -void libsha2_process(struct libsha2_state *restrict, const unsigned char *restrict); +size_t libsha2_process(struct libsha2_state *restrict, const unsigned char *restrict, size_t); diff --git a/config-coverage-gcc.mk b/config-coverage-gcc.mk index 18de147..80c21b6 100644 --- a/config-coverage-gcc.mk +++ b/config-coverage-gcc.mk @@ -1,7 +1,7 @@ CONFIGFILE_PROPER = config.mk include $(CONFIGFILE_PROPER) -CC = $(CC_PREFIX)gcc -std=c99 +CC = $(CC_PREFIX)gcc -std=c11 GCOV = gcov CFLAGS = -g -O0 -pedantic -fprofile-arcs -ftest-coverage @@ -1,10 +1,10 @@ PREFIX = /usr MANPREFIX = $(PREFIX)/share/man -CC = c99 +CC = cc -std=c11 CPPFLAGS = -D_DEFAULT_SOURCE -D_BSD_SOURCE -D_XOPEN_SOURCE=700 -CFLAGS = -Wall -O3 +CFLAGS = -Wall -O3 -msse4 -msha LDFLAGS = -s # You can add -DALLOCA_LIMIT=# to CPPFLAGS, where # is a size_t @@ -29,7 +29,7 @@ libsha2_digest(struct libsha2_state *restrict state, const void *message_, size_ if (off > state->chunk_size - (size_t)8 * (size_t)(1 + (state->algorithm > LIBSHA2_256))) { memset(state->chunk + off, 0, state->chunk_size - off); off = 0; - libsha2_process(state, state->chunk); + libsha2_process(state, state->chunk, state->chunk_size); } memset(state->chunk + off, 0, state->chunk_size - 8 - off); @@ -41,7 +41,7 @@ libsha2_digest(struct libsha2_state *restrict state, const void *message_, size_ state->chunk[state->chunk_size - 3] = (unsigned char)(state->message_size >> 16); state->chunk[state->chunk_size - 2] = (unsigned char)(state->message_size >> 8); state->chunk[state->chunk_size - 1] = (unsigned char)(state->message_size >> 0); - libsha2_process(state, state->chunk); + libsha2_process(state, state->chunk, state->chunk_size); n = libsha2_algorithm_output_size(state->algorithm); if (state->algorithm <= LIBSHA2_256) { @@ -1,5 +1,15 @@ /* See LICENSE file for copyright and license details. */ #include "common.h" +#include <stdatomic.h> + +#if defined(__SSE4_1__) && defined(__SSSE3__) && defined(__SSE2__) && defined(__SHA__) +# define HAVE_X86_SHA_INTRINSICS +#endif + + +#ifdef HAVE_X86_SHA_INTRINSICS +# include <immintrin.h> +#endif /** @@ -58,9 +68,224 @@ h[i] = TRUNC(h[i] + work_h[i]); -void -libsha2_process(struct libsha2_state *restrict state, const unsigned char *restrict chunk) +#ifdef HAVE_X86_SHA_INTRINSICS + +static size_t +process_x86_sha256(struct libsha2_state *restrict state, const unsigned char *restrict data, size_t len) +{ + const __m128i SHUFFLE_MASK = _mm_set_epi64x(0x0C0D0E0F08090A0BULL, 0x0405060700010203ULL); + register __m128i temp, s0, s1, msg, msg0, msg1, msg2, msg3; + __m128i abef_orig, cdgh_orig; + const unsigned char *restrict chunk; + size_t off = 0; + + temp = _mm_shuffle_epi32(_mm_loadu_si128((const __m128i *)&state->h.b32[0]), 0xB1); + s1 = _mm_shuffle_epi32(_mm_loadu_si128((const __m128i *)&state->h.b32[4]), 0x1B); + s0 = _mm_alignr_epi8(temp, s1, 8); + s1 = _mm_blend_epi16(s1, temp, 0xF0); + + for (; len - off >= state->chunk_size; off += state->chunk_size) { + chunk = &data[off]; + + abef_orig = s0; + cdgh_orig = s1; + + msg = _mm_loadu_si128((const __m128i *)&chunk[0]); + msg0 = _mm_shuffle_epi8(msg, SHUFFLE_MASK); + msg = _mm_add_epi32(msg0, _mm_set_epi64x(0xE9B5DBA5B5C0FBCFULL, 0x71374491428A2F98ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + + msg1 = _mm_loadu_si128((const __m128i *)&chunk[16]); + msg1 = _mm_shuffle_epi8(msg1, SHUFFLE_MASK); + msg = _mm_add_epi32(msg1, _mm_set_epi64x(0xAB1C5ED5923F82A4ULL, 0x59F111F13956C25BULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + msg = _mm_shuffle_epi32(msg, 0x0E); + msg0 = _mm_sha256msg1_epu32(msg0, msg1); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + + msg2 = _mm_loadu_si128((const __m128i *)&chunk[32]); + msg2 = _mm_shuffle_epi8(msg2, SHUFFLE_MASK); + msg = _mm_add_epi32(msg2, _mm_set_epi64x(0x550C7DC3243185BEULL, 0x12835B01D807AA98ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg1 = _mm_sha256msg1_epu32(msg1, msg2); + + msg3 = _mm_loadu_si128((const __m128i *)&chunk[48]); + msg3 = _mm_shuffle_epi8(msg3, SHUFFLE_MASK); + msg = _mm_add_epi32(msg3, _mm_set_epi64x(0xC19BF1749BDC06A7ULL, 0x80DEB1FE72BE5D74ULL)); + temp = _mm_alignr_epi8(msg3, msg2, 4); + msg0 = _mm_add_epi32(msg0, temp); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + msg = _mm_shuffle_epi32(msg, 0x0E); + msg0 = _mm_sha256msg2_epu32(msg0, msg3); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg2 = _mm_sha256msg1_epu32(msg2, msg3); + + msg = _mm_add_epi32(msg0, _mm_set_epi64x(0x240CA1CC0FC19DC6ULL, 0xEFBE4786E49B69C1ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg0, msg3, 4); + msg1 = _mm_add_epi32(msg1, temp); + msg = _mm_shuffle_epi32(msg, 0x0E); + msg1 = _mm_sha256msg2_epu32(msg1, msg0); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg3 = _mm_sha256msg1_epu32(msg3, msg0); + + msg = _mm_add_epi32(msg1, _mm_set_epi64x(0x76F988DA5CB0A9DCULL, 0x4A7484AA2DE92C6FULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg1, msg0, 4); + msg2 = _mm_add_epi32(msg2, temp); + msg2 = _mm_sha256msg2_epu32(msg2, msg1); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg0 = _mm_sha256msg1_epu32(msg0, msg1); + + msg = _mm_add_epi32(msg2, _mm_set_epi64x(0xBF597FC7B00327C8ULL, 0xA831C66D983E5152ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg2, msg1, 4); + msg3 = _mm_add_epi32(msg3, temp); + msg3 = _mm_sha256msg2_epu32(msg3, msg2); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg1 = _mm_sha256msg1_epu32(msg1, msg2); + + msg = _mm_add_epi32(msg3, _mm_set_epi64x(0x1429296706CA6351ULL, 0xD5A79147C6E00BF3ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg3, msg2, 4); + msg0 = _mm_add_epi32(msg0, temp); + msg0 = _mm_sha256msg2_epu32(msg0, msg3); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg2 = _mm_sha256msg1_epu32(msg2, msg3); + + msg = _mm_add_epi32(msg0, _mm_set_epi64x(0x53380D134D2C6DFCULL, 0x2E1B213827B70A85ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg0, msg3, 4); + msg1 = _mm_add_epi32(msg1, temp); + msg1 = _mm_sha256msg2_epu32(msg1, msg0); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg3 = _mm_sha256msg1_epu32(msg3, msg0); + + msg = _mm_add_epi32(msg1, _mm_set_epi64x(0x92722C8581C2C92EULL, 0x766A0ABB650A7354ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg1, msg0, 4); + msg2 = _mm_add_epi32(msg2, temp); + msg2 = _mm_sha256msg2_epu32(msg2, msg1); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg0 = _mm_sha256msg1_epu32(msg0, msg1); + + msg = _mm_add_epi32(msg2, _mm_set_epi64x(0xC76C51A3C24B8B70ULL, 0xA81A664BA2BFE8A1ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg2, msg1, 4); + msg3 = _mm_add_epi32(msg3, temp); + msg3 = _mm_sha256msg2_epu32(msg3, msg2); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg1 = _mm_sha256msg1_epu32(msg1, msg2); + + msg = _mm_add_epi32(msg3, _mm_set_epi64x(0x106AA070F40E3585ULL, 0xD6990624D192E819ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg3, msg2, 4); + msg0 = _mm_add_epi32(msg0, temp); + msg0 = _mm_sha256msg2_epu32(msg0, msg3); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg2 = _mm_sha256msg1_epu32(msg2, msg3); + + msg = _mm_add_epi32(msg0, _mm_set_epi64x(0x34B0BCB52748774CULL, 0x1E376C0819A4C116ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg0, msg3, 4); + msg1 = _mm_add_epi32(msg1, temp); + msg1 = _mm_sha256msg2_epu32(msg1, msg0); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + msg3 = _mm_sha256msg1_epu32(msg3, msg0); + + msg = _mm_add_epi32(msg1, _mm_set_epi64x(0x682E6FF35B9CCA4FULL, 0x4ED8AA4A391C0CB3ULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg1, msg0, 4); + msg2 = _mm_add_epi32(msg2, temp); + msg2 = _mm_sha256msg2_epu32(msg2, msg1); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + + msg = _mm_add_epi32(msg2, _mm_set_epi64x(0x8CC7020884C87814ULL, 0x78A5636F748F82EEULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + temp = _mm_alignr_epi8(msg2, msg1, 4); + msg3 = _mm_add_epi32(msg3, temp); + msg3 = _mm_sha256msg2_epu32(msg3, msg2); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + + msg = _mm_add_epi32(msg3, _mm_set_epi64x(0xC67178F2BEF9A3F7ULL, 0xA4506CEB90BEFFFAULL)); + s1 = _mm_sha256rnds2_epu32(s1, s0, msg); + msg = _mm_shuffle_epi32(msg, 0x0E); + s0 = _mm_sha256rnds2_epu32(s0, s1, msg); + + s0 = _mm_add_epi32(s0, abef_orig); + s1 = _mm_add_epi32(s1, cdgh_orig); + } + + temp = _mm_shuffle_epi32(s0, 0x1B); + s1 = _mm_shuffle_epi32(s1, 0xB1); + s0 = _mm_blend_epi16(temp, s1, 0xF0); + s1 = _mm_alignr_epi8(s1, temp, 8); + + _mm_storeu_si128((__m128i *)&state->h.b32[0], s0); + _mm_storeu_si128((__m128i *)&state->h.b32[4], s1); + + return off; +} + +# if defined(__GNUC__) +__attribute__((__constructor__)) +# endif +static int +have_sha_intrinsics(void) { + static volatile int ret = -1; + static volatile atomic_flag spinlock = ATOMIC_FLAG_INIT; + + if (ret != -1) + return ret; + + while (atomic_flag_test_and_set(&spinlock)); + + if (ret != -1) + goto out; + + int a = 7, b, c = 0, d; + __asm__ volatile("cpuid" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "a"(a), "c"(c)); + if (!(b & (1 << 29))) { + ret = 0; + goto out; + } + a = 1; + __asm__ volatile("cpuid" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "a"(a), "c"(c)); + if (!(c & (1 << 19)) || !(c & (1 << 0)) || !(d & (1 << 26))) { + ret = 0; + goto out; + } + ret = 1; + +out: + atomic_flag_clear(&spinlock); + return ret; +} + +#endif + + +size_t +libsha2_process(struct libsha2_state *restrict state, const unsigned char *restrict data, size_t len) +{ + const unsigned char *restrict chunk; + size_t off = 0; + if (state->algorithm <= LIBSHA2_256) { uint_least32_t s0, s1; size_t i, j; @@ -71,8 +296,16 @@ libsha2_process(struct libsha2_state *restrict state, const unsigned char *restr #endif #define ROTR(X, N) TRUNC32(((X) >> (N)) | ((X) << (32 - (N)))) - SHA2_IMPLEMENTATION(chunk, 7, 18, 3, 17, 19, 10, 6, 11, 25, 2, 13, 22, uint_least32_t, - 4, TRUNC32, state->k.b32, state->w.b32, state->h.b32, state->work_h.b32); +#ifdef HAVE_X86_SHA_INTRINSICS + if (have_sha_intrinsics()) + return process_x86_sha256(state, data, len); +#endif + + for (; len - off >= state->chunk_size; off += state->chunk_size) { + chunk = &data[off]; + SHA2_IMPLEMENTATION(chunk, 7, 18, 3, 17, 19, 10, 6, 11, 25, 2, 13, 22, uint_least32_t, 4, + TRUNC32, state->k.b32, state->w.b32, state->h.b32, state->work_h.b32); + } #undef ROTR #if defined(__GNUC__) @@ -85,9 +318,14 @@ libsha2_process(struct libsha2_state *restrict state, const unsigned char *restr #define ROTR(X, N) TRUNC64(((X) >> (N)) | ((X) << (64 - (N)))) - SHA2_IMPLEMENTATION(chunk, 1, 8, 7, 19, 61, 6, 14, 18, 41, 28, 34, 39, uint_least64_t, - 8, TRUNC64, state->k.b64, state->w.b64, state->h.b64, state->work_h.b64); + for (; len - off >= state->chunk_size; off += state->chunk_size) { + chunk = &data[off]; + SHA2_IMPLEMENTATION(chunk, 1, 8, 7, 19, 61, 6, 14, 18, 41, 28, 34, 39, uint_least64_t, 8, + TRUNC64, state->k.b64, state->w.b64, state->h.b64, state->work_h.b64); + } #undef ROTR } + + return off; } @@ -9,6 +9,23 @@ #include <unistd.h> +#define TEST_SHA256 1 +#define TEST_SHA512 1 + + +#if TEST_SHA256 +# define IF_TEST_SHA256(IF, ELSE) IF +#else +# define IF_TEST_SHA256(IF, ELSE) ELSE +#endif + +#if TEST_SHA512 +# define IF_TEST_SHA512(IF, ELSE) IF +#else +# define IF_TEST_SHA512(IF, ELSE) ELSE +#endif + + #define test(EXPR)\ do {\ if (EXPR)\ @@ -131,12 +148,16 @@ main(int argc, char *argv[]) libsha2_unhex(buf, "AAbbCcdD"); test(!memcmp(buf, "\xAA\xBB\xCC\xDD", 4)); +#if TEST_SHA256 test(libsha2_algorithm_output_size(LIBSHA2_224) == 28); test(libsha2_algorithm_output_size(LIBSHA2_256) == 32); +#endif +#if TEST_SHA512 test(libsha2_algorithm_output_size(LIBSHA2_384) == 48); test(libsha2_algorithm_output_size(LIBSHA2_512) == 64); test(libsha2_algorithm_output_size(LIBSHA2_512_224) == 28); test(libsha2_algorithm_output_size(LIBSHA2_512_256) == 32); +#endif test(!errno); test(libsha2_algorithm_output_size(~0) == 0); /* should test `errno == EINVAL`, optimising compiler breaks it */ @@ -144,6 +165,7 @@ main(int argc, char *argv[]) test(libsha2_init(&s, ~0) == -1 && errno == EINVAL); errno = 0; +#if TEST_SHA256 test(!libsha2_init(&s, LIBSHA2_224)); test(libsha2_state_output_size(&s) == 28); libsha2_digest(&s, "", 0, buf); @@ -155,7 +177,9 @@ main(int argc, char *argv[]) libsha2_digest(&s, "", 0, buf); libsha2_behex_lower(str, buf, libsha2_state_output_size(&s)); test_str(str, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); +#endif +#if TEST_SHA512 test(!libsha2_init(&s, LIBSHA2_384)); test(libsha2_state_output_size(&s) == 48); libsha2_digest(&s, "", 0, buf); @@ -179,7 +203,9 @@ main(int argc, char *argv[]) libsha2_digest(&s, "", 0, buf); libsha2_behex_lower(str, buf, libsha2_state_output_size(&s)); test_str(str, "c672b8d1ef56ed28ab87c3622c5114069bdd3ad7b8f9737498d0c01ecef0967a"); +#endif +#if TEST_SHA256 test_repeated(0xFF, 1, LIBSHA2_224, "e33f9d75e6ae1369dbabf81b96b4591ae46bba30b591a6b6c62542b5"); test_custom("\xE5\xE0\x99\x24", LIBSHA2_224, "fd19e74690d291467ce59f077df311638f1c3a46e510d0e49a67062d"); test_repeated(0x00, 56, LIBSHA2_224, "5c3e25b69d0ea26f260cfae87e23759e1eca9d1ecc9fbf3c62266804"); @@ -210,7 +236,9 @@ main(int argc, char *argv[]) test_custom("abc", LIBSHA2_256, "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"); test_custom("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", LIBSHA2_256, "248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1"); +#endif +#if TEST_SHA512 test_repeated(0x00, 111, LIBSHA2_384,"435770712c611be7293a66dd0dc8d1450dc7ff7337bfe115bf058ef2eb9bed09cee85c26963a5bcc0905dc2df7cc6a76"); test_repeated(0x00, 112, LIBSHA2_384, "3e0cbf3aee0e3aa70415beae1bd12dd7db821efa446440f12132edffce76f635e53526a111491e75ee8e27b9700eec20"); test_repeated(0x00, 113, LIBSHA2_384, "6be9af2cf3cd5dd12c8d9399ec2b34e66034fbd699d4e0221d39074172a380656089caafe8f39963f94cc7c0a07e3d21"); @@ -250,9 +278,10 @@ main(int argc, char *argv[]) test_custom("abc", LIBSHA2_512_256, "53048e2681941ef99b2e29b76b4c7dabe4c2d0c634fc6d46e0e2f13107e7af23"); test_custom("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu", LIBSHA2_512_256, "3928e184fb8690f840da3988121d31be65cb9d3ef83ee6146feac861e19b563a"); +#endif for (i = 0; i < 1000; i++) { - for (j = 0; j < 6; j++) { + for (j = IF_TEST_SHA256(0, 2); j < IF_TEST_SHA512(6, 2); j++) { memset(buf, 0x41, 1000); test(!libsha2_init(&s, (enum libsha2_algorithm)j)); libsha2_update(&s, buf, i * 8); @@ -309,6 +338,7 @@ main(int argc, char *argv[]) test(!errno); +#if TEST_SHA256 test(!pipe(fds)); test((pid = fork()) >= 0); if (!pid) { @@ -394,7 +424,9 @@ main(int argc, char *argv[]) "53616d706c65206d65737361676520666f72206b65796c656e3d626c6f636b6c656e", "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f60616263", "bdccb6c72ddeadb500ae768386cb38cc41c63dbb0878ddb9c7a38a431b78378d"); +#endif +#if TEST_SHA512 test_hmac(LIBSHA2_384, "53616d706c65206d65737361676520666f72206b65796c656e3d626c6f636b6c656e", "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f", @@ -424,6 +456,7 @@ main(int argc, char *argv[]) "53616d706c65206d65737361676520666f72206b65796c656e3d626c6f636b6c656e", "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7", "d93ec8d2de1ad2a9957cb9b83f14e76ad6b5e0cce285079a127d3b14bccb7aa7286d4ac0d4ce64215f2bc9e6870b33d97438be4aaa20cda5c5a912b48b8e27f3"); +#endif return 0; } @@ -16,17 +16,13 @@ libsha2_update(struct libsha2_state *restrict state, const void *restrict messag n = msglen < state->chunk_size - off ? msglen : state->chunk_size - off; memcpy(&state->chunk[off], message, n); if (off + n == state->chunk_size) - libsha2_process(state, state->chunk); - message += n; + libsha2_process(state, state->chunk, state->chunk_size); + message = &message[n]; msglen -= n; } - while (msglen >= state->chunk_size) { - libsha2_process(state, (const unsigned char *)message); - message += state->chunk_size; - msglen -= state->chunk_size; - } + off = libsha2_process(state, (const unsigned char *)message, msglen); - if (msglen) - memcpy(state->chunk, message, msglen); + if (msglen > off) + memcpy(state->chunk, &message[off], msglen - off); } |