#include <stdint.h>
#include <stdbool.h>
#include <stdlib.h>
-#include <stdio.h>
+#ifndef __APPLE__
#include <malloc.h>
+#endif
+#include <stdio.h>
#include <string.h>
#include "crapto1/crapto1.h"
#include "parity.h"
#define MAX_BITSLICES 128
#elif defined(__SSE2__)
#define MAX_BITSLICES 128
-#else // MMX or SSE
+#else // MMX or SSE or NOSIMD
#define MAX_BITSLICES 64
#endif
#elif defined (__MMX__)
#define BITSLICE_TEST_NONCES bitslice_test_nonces_MMX
#define CRACK_STATES_BITSLICED crack_states_bitsliced_MMX
+#else
+#define BITSLICE_TEST_NONCES bitslice_test_nonces_NOSIMD
+#define CRACK_STATES_BITSLICED crack_states_bitsliced_NOSIMD
#endif
// typedefs and declaration of functions:
crack_states_bitsliced_t crack_states_bitsliced_AVX;
crack_states_bitsliced_t crack_states_bitsliced_SSE2;
crack_states_bitsliced_t crack_states_bitsliced_MMX;
+crack_states_bitsliced_t crack_states_bitsliced_NOSIMD;
crack_states_bitsliced_t crack_states_bitsliced_dispatch;
typedef void bitslice_test_nonces_t(uint32_t, uint32_t*, uint8_t*);
bitslice_test_nonces_t bitslice_test_nonces_AVX;
bitslice_test_nonces_t bitslice_test_nonces_SSE2;
bitslice_test_nonces_t bitslice_test_nonces_MMX;
+bitslice_test_nonces_t bitslice_test_nonces_NOSIMD;
bitslice_test_nonces_t bitslice_test_nonces_dispatch;
-#ifdef _WIN32
+#if defined (_WIN32)
#define malloc_bitslice(x) __builtin_assume_aligned(_aligned_malloc((x), MAX_BITSLICES/8), MAX_BITSLICES/8)
#define free_bitslice(x) _aligned_free(x)
+#elif defined (__APPLE__)
+static void *malloc_bitslice(size_t x) {
+ char *allocated_memory;
+ if (posix_memalign((void**)&allocated_memory, MAX_BITSLICES/8, x)) {
+ return NULL;
+ } else {
+ return __builtin_assume_aligned(allocated_memory, MAX_BITSLICES/8);
+ }
+}
+#define free_bitslice(x) free(x)
#else
#define malloc_bitslice(x) memalign(MAX_BITSLICES/8, (x))
#define free_bitslice(x) free(x)
#endif
-#if defined (__MMX__) // (including more sophisticated instruction sets)
typedef enum {
EVEN_STATE = 0,
ODD_STATE = 1
// }
#endif
// add the even state bits
- const bitslice_t const *restrict bitsliced_even_state = bitsliced_even_states[block_idx];
+ const bitslice_t *restrict bitsliced_even_state = bitsliced_even_states[block_idx];
for(uint32_t state_idx = 1; state_idx < STATE_SIZE; state_idx += 2) {
state_p[state_idx] = bitsliced_even_state[state_idx/2];
}
#endif
return key;
}
-#endif
+
#ifndef __MMX__
crack_states_bitsliced_t *crack_states_bitsliced_function_p = &crack_states_bitsliced_dispatch;
bitslice_test_nonces_t *bitslice_test_nonces_function_p = &bitslice_test_nonces_dispatch;
+static SIMDExecInstr intSIMDInstr = SIMD_AUTO;
+
+void SetSIMDInstr(SIMDExecInstr instr) {
+ intSIMDInstr = instr;
+
+ crack_states_bitsliced_function_p = &crack_states_bitsliced_dispatch;
+ bitslice_test_nonces_function_p = &bitslice_test_nonces_dispatch;
+}
+
+SIMDExecInstr GetSIMDInstr() {
+ SIMDExecInstr instr = SIMD_NONE;
+
+#if defined (__i386__) || defined (__x86_64__)
+ #if !defined(__APPLE__) || (defined(__APPLE__) && (__clang_major__ > 8 || __clang_major__ == 8 && __clang_minor__ >= 1))
+ #if (__GNUC__ >= 5) && (__GNUC__ > 5 || __GNUC_MINOR__ > 2)
+ if (__builtin_cpu_supports("avx512f")) instr = SIMD_AVX512;
+ else if (__builtin_cpu_supports("avx2")) instr = SIMD_AVX2;
+ #else
+ if (__builtin_cpu_supports("avx2")) instr = SIMD_AVX2;
+ #endif
+ else if (__builtin_cpu_supports("avx")) instr = SIMD_AVX;
+ else if (__builtin_cpu_supports("sse2")) instr = SIMD_SSE2;
+ else if (__builtin_cpu_supports("mmx")) instr = SIMD_MMX;
+ else
+ #endif
+#endif
+ instr = SIMD_NONE;
+
+ return instr;
+}
+
+SIMDExecInstr GetSIMDInstrAuto() {
+ SIMDExecInstr instr = intSIMDInstr;
+ if (instr == SIMD_AUTO)
+ return GetSIMDInstr();
+
+ return instr;
+}
+
// determine the available instruction set at runtime and call the correct function
const uint64_t crack_states_bitsliced_dispatch(uint32_t cuid, uint8_t *best_first_bytes, statelist_t *p, uint32_t *keys_found, uint64_t *num_keys_tested, uint32_t nonces_to_bruteforce, uint8_t *bf_test_nonce_2nd_byte, noncelist_t *nonces) {
- #if (__GNUC__ >= 5) && (__GNUC__ > 5 || __GNUC_MINOR__ > 2)
- if (__builtin_cpu_supports("avx512f")) crack_states_bitsliced_function_p = &crack_states_bitsliced_AVX512;
- else if (__builtin_cpu_supports("avx2")) crack_states_bitsliced_function_p = &crack_states_bitsliced_AVX2;
- #else
- if (__builtin_cpu_supports("avx2")) crack_states_bitsliced_function_p = &crack_states_bitsliced_AVX2;
- #endif
- else if (__builtin_cpu_supports("avx")) crack_states_bitsliced_function_p = &crack_states_bitsliced_AVX;
- else if (__builtin_cpu_supports("sse2")) crack_states_bitsliced_function_p = &crack_states_bitsliced_SSE2;
- else if (__builtin_cpu_supports("mmx")) crack_states_bitsliced_function_p = &crack_states_bitsliced_MMX;
- else {
- printf("\nFatal: you need at least a CPU with MMX instruction set support. Aborting...\n");
- exit(5);
- }
+ switch(GetSIMDInstrAuto()) {
+#if defined (__i386__) || defined (__x86_64__)
+#if !defined(__APPLE__) || (defined(__APPLE__) && (__clang_major__ > 8 || __clang_major__ == 8 && __clang_minor__ >= 1))
+#if (__GNUC__ >= 5) && (__GNUC__ > 5 || __GNUC_MINOR__ > 2)
+ case SIMD_AVX512:
+ crack_states_bitsliced_function_p = &crack_states_bitsliced_AVX512;
+ break;
+#endif
+ case SIMD_AVX2:
+ crack_states_bitsliced_function_p = &crack_states_bitsliced_AVX2;
+ break;
+ case SIMD_AVX:
+ crack_states_bitsliced_function_p = &crack_states_bitsliced_AVX;
+ break;
+ case SIMD_SSE2:
+ crack_states_bitsliced_function_p = &crack_states_bitsliced_SSE2;
+ break;
+ case SIMD_MMX:
+ crack_states_bitsliced_function_p = &crack_states_bitsliced_MMX;
+ break;
+#endif
+#endif
+ default:
+ crack_states_bitsliced_function_p = &crack_states_bitsliced_NOSIMD;
+ break;
+ }
+
// call the most optimized function for this CPU
return (*crack_states_bitsliced_function_p)(cuid, best_first_bytes, p, keys_found, num_keys_tested, nonces_to_bruteforce, bf_test_nonce_2nd_byte, nonces);
}
void bitslice_test_nonces_dispatch(uint32_t nonces_to_bruteforce, uint32_t *bf_test_nonce, uint8_t *bf_test_nonce_par) {
- #if (__GNUC__ >= 5) && (__GNUC__ > 5 || __GNUC_MINOR__ > 2)
- if (__builtin_cpu_supports("avx512f")) bitslice_test_nonces_function_p = &bitslice_test_nonces_AVX512;
- else if (__builtin_cpu_supports("avx2")) bitslice_test_nonces_function_p = &bitslice_test_nonces_AVX2;
- #else
- if (__builtin_cpu_supports("avx2")) bitslice_test_nonces_function_p = &bitslice_test_nonces_AVX2;
- #endif
- else if (__builtin_cpu_supports("avx")) bitslice_test_nonces_function_p = &bitslice_test_nonces_AVX;
- else if (__builtin_cpu_supports("sse2")) bitslice_test_nonces_function_p = &bitslice_test_nonces_SSE2;
- else if (__builtin_cpu_supports("mmx")) bitslice_test_nonces_function_p = &bitslice_test_nonces_MMX;
- else {
- printf("\nFatal: you need at least a CPU with MMX instruction set support. Aborting...\n");
- exit(5);
- }
+ switch(GetSIMDInstrAuto()) {
+#if defined (__i386__) || defined (__x86_64__)
+#if !defined(__APPLE__) || (defined(__APPLE__) && (__clang_major__ > 8 || __clang_major__ == 8 && __clang_minor__ >= 1))
+#if (__GNUC__ >= 5) && (__GNUC__ > 5 || __GNUC_MINOR__ > 2)
+ case SIMD_AVX512:
+ bitslice_test_nonces_function_p = &bitslice_test_nonces_AVX512;
+ break;
+#endif
+ case SIMD_AVX2:
+ bitslice_test_nonces_function_p = &bitslice_test_nonces_AVX2;
+ break;
+ case SIMD_AVX:
+ bitslice_test_nonces_function_p = &bitslice_test_nonces_AVX;
+ break;
+ case SIMD_SSE2:
+ bitslice_test_nonces_function_p = &bitslice_test_nonces_SSE2;
+ break;
+ case SIMD_MMX:
+ bitslice_test_nonces_function_p = &bitslice_test_nonces_MMX;
+ break;
+#endif
+#endif
+ default:
+ bitslice_test_nonces_function_p = &bitslice_test_nonces_NOSIMD;
+ break;
+ }
+
// call the most optimized function for this CPU
(*bitslice_test_nonces_function_p)(nonces_to_bruteforce, bf_test_nonce, bf_test_nonce_par);
}