#include "crapto1/crapto1.h"
#include "parity.h"
#include "hardnested/hardnested_bruteforce.h"
+#include "hardnested/hardnested_bf_core.h"
#include "hardnested/hardnested_bitarray_core.h"
#include "zlib.h"
static void get_SIMD_instruction_set(char* instruction_set) {
-#if defined (__i386__) || defined (__x86_64__)
- #if !defined(__APPLE__) || (defined(__APPLE__) && (__clang_major__ > 8 || __clang_major__ == 8 && __clang_minor__ >= 1))
- #if (__GNUC__ >= 5) && (__GNUC__ > 5 || __GNUC_MINOR__ > 2)
- if (__builtin_cpu_supports("avx512f")) strcpy(instruction_set, "AVX512F");
- else if (__builtin_cpu_supports("avx2")) strcpy(instruction_set, "AVX2");
- #else
- if (__builtin_cpu_supports("avx2")) strcpy(instruction_set, "AVX2");
- #endif
- else if (__builtin_cpu_supports("avx")) strcpy(instruction_set, "AVX");
- else if (__builtin_cpu_supports("sse2")) strcpy(instruction_set, "SSE2");
- else if (__builtin_cpu_supports("mmx")) strcpy(instruction_set, "MMX");
- else
- #endif
-#endif
- strcpy(instruction_set, "no");
+ switch(GetSIMDInstrAuto()) {
+ case SIMD_AVX512:
+ strcpy(instruction_set, "AVX512F");
+ break;
+ case SIMD_AVX2:
+ strcpy(instruction_set, "AVX2");
+ break;
+ case SIMD_AVX:
+ strcpy(instruction_set, "AVX");
+ break;
+ case SIMD_SSE2:
+ strcpy(instruction_set, "SSE2");
+ break;
+ case SIMD_MMX:
+ strcpy(instruction_set, "MMX");
+ break;
+ default:
+ strcpy(instruction_set, "no");
+ break;
+ }
}
static void print_progress_header(void) {
char progress_text[80];
- char instr_set[12] = "";
+ char instr_set[12] = {0};
get_SIMD_instruction_set(instr_set);
sprintf(progress_text, "Start using %d threads and %s SIMD core", num_CPUs(), instr_set);
PrintAndLog("\n\n");
int mfnestedhard(uint8_t blockNo, uint8_t keyType, uint8_t *key, uint8_t trgBlockNo, uint8_t trgKeyType, uint8_t *trgkey, bool nonce_file_read, bool nonce_file_write, bool slow, int tests)
{
char progress_text[80];
+
+ SetSIMDInstr(SIMD_NONE);
srand((unsigned) time(NULL));
brute_force_per_second = brute_force_benchmark();
#ifndef __MMX__
-// pointers to functions:
-crack_states_bitsliced_t *crack_states_bitsliced_function_p = &crack_states_bitsliced_dispatch;
-bitslice_test_nonces_t *bitslice_test_nonces_function_p = &bitslice_test_nonces_dispatch;
+static SIMDExecInstr intSIMDInstr = SIMD_AUTO;
-// determine the available instruction set at runtime and call the correct function
-const uint64_t crack_states_bitsliced_dispatch(uint32_t cuid, uint8_t *best_first_bytes, statelist_t *p, uint32_t *keys_found, uint64_t *num_keys_tested, uint32_t nonces_to_bruteforce, uint8_t *bf_test_nonce_2nd_byte, noncelist_t *nonces) {
+void SetSIMDInstr(SIMDExecInstr instr) {
+ intSIMDInstr = instr;
+}
+
+SIMDExecInstr GetSIMDInstr() {
+ SIMDExecInstr instr = SIMD_NONE;
+
#if defined (__i386__) || defined (__x86_64__)
#if !defined(__APPLE__) || (defined(__APPLE__) && (__clang_major__ > 8 || __clang_major__ == 8 && __clang_minor__ >= 1))
#if (__GNUC__ >= 5) && (__GNUC__ > 5 || __GNUC_MINOR__ > 2)
- if (__builtin_cpu_supports("avx512f")) crack_states_bitsliced_function_p = &crack_states_bitsliced_AVX512;
- else if (__builtin_cpu_supports("avx2")) crack_states_bitsliced_function_p = &crack_states_bitsliced_AVX2;
+ if (__builtin_cpu_supports("avx512f")) instr = SIMD_AVX512;
+ else if (__builtin_cpu_supports("avx2")) instr = SIMD_AVX2;
#else
- if (__builtin_cpu_supports("avx2")) crack_states_bitsliced_function_p = &crack_states_bitsliced_AVX2;
+ if (__builtin_cpu_supports("avx2")) instr = SIMD_AVX2;
#endif
- else if (__builtin_cpu_supports("avx")) crack_states_bitsliced_function_p = &crack_states_bitsliced_AVX;
- else if (__builtin_cpu_supports("sse2")) crack_states_bitsliced_function_p = &crack_states_bitsliced_SSE2;
- else if (__builtin_cpu_supports("mmx")) crack_states_bitsliced_function_p = &crack_states_bitsliced_MMX;
+ else if (__builtin_cpu_supports("avx")) instr = SIMD_AVX;
+ else if (__builtin_cpu_supports("sse2")) instr = SIMD_SSE2;
+ else if (__builtin_cpu_supports("mmx")) instr = SIMD_MMX;
else
#endif
#endif
- crack_states_bitsliced_function_p = &crack_states_bitsliced_NOSIMD;
+ instr = SIMD_NONE;
+
+ return instr;
+}
+
+SIMDExecInstr GetSIMDInstrAuto() {
+ SIMDExecInstr instr = intSIMDInstr;
+ if (instr == SIMD_AUTO)
+ return GetSIMDInstr();
+
+ return instr;
+}
+
+// pointers to functions:
+crack_states_bitsliced_t *crack_states_bitsliced_function_p = &crack_states_bitsliced_dispatch;
+bitslice_test_nonces_t *bitslice_test_nonces_function_p = &bitslice_test_nonces_dispatch;
+
+// determine the available instruction set at runtime and call the correct function
+const uint64_t crack_states_bitsliced_dispatch(uint32_t cuid, uint8_t *best_first_bytes, statelist_t *p, uint32_t *keys_found, uint64_t *num_keys_tested, uint32_t nonces_to_bruteforce, uint8_t *bf_test_nonce_2nd_byte, noncelist_t *nonces) {
+ switch(GetSIMDInstrAuto()) {
+ case SIMD_AVX512:
+ crack_states_bitsliced_function_p = &crack_states_bitsliced_AVX512;
+ break;
+ case SIMD_AVX2:
+ crack_states_bitsliced_function_p = &crack_states_bitsliced_AVX2;
+ break;
+ case SIMD_AVX:
+ crack_states_bitsliced_function_p = &crack_states_bitsliced_AVX;
+ break;
+ case SIMD_SSE2:
+ crack_states_bitsliced_function_p = &crack_states_bitsliced_SSE2;
+ break;
+ case SIMD_MMX:
+ crack_states_bitsliced_function_p = &crack_states_bitsliced_MMX;
+ break;
+ default:
+ crack_states_bitsliced_function_p = &crack_states_bitsliced_NOSIMD;
+ break;
+ }
// call the most optimized function for this CPU
return (*crack_states_bitsliced_function_p)(cuid, best_first_bytes, p, keys_found, num_keys_tested, nonces_to_bruteforce, bf_test_nonce_2nd_byte, nonces);
}
void bitslice_test_nonces_dispatch(uint32_t nonces_to_bruteforce, uint32_t *bf_test_nonce, uint8_t *bf_test_nonce_par) {
-#if defined (__i386__) || defined (__x86_64__)
- #if !defined(__APPLE__) || (defined(__APPLE__) && (__clang_major__ > 8 || __clang_major__ == 8 && __clang_minor__ >= 1))
- #if (__GNUC__ >= 5) && (__GNUC__ > 5 || __GNUC_MINOR__ > 2)
- if (__builtin_cpu_supports("avx512f")) bitslice_test_nonces_function_p = &bitslice_test_nonces_AVX512;
- else if (__builtin_cpu_supports("avx2")) bitslice_test_nonces_function_p = &bitslice_test_nonces_AVX2;
- #else
- if (__builtin_cpu_supports("avx2")) bitslice_test_nonces_function_p = &bitslice_test_nonces_AVX2;
- #endif
- else if (__builtin_cpu_supports("avx")) bitslice_test_nonces_function_p = &bitslice_test_nonces_AVX;
- else if (__builtin_cpu_supports("sse2")) bitslice_test_nonces_function_p = &bitslice_test_nonces_SSE2;
- else if (__builtin_cpu_supports("mmx")) bitslice_test_nonces_function_p = &bitslice_test_nonces_MMX;
- else
- #endif
-#endif
- bitslice_test_nonces_function_p = &bitslice_test_nonces_NOSIMD;
+ switch(GetSIMDInstrAuto()) {
+ case SIMD_AVX512:
+ bitslice_test_nonces_function_p = &bitslice_test_nonces_AVX512;
+ break;
+ case SIMD_AVX2:
+ bitslice_test_nonces_function_p = &bitslice_test_nonces_AVX2;
+ break;
+ case SIMD_AVX:
+ bitslice_test_nonces_function_p = &bitslice_test_nonces_AVX;
+ break;
+ case SIMD_SSE2:
+ bitslice_test_nonces_function_p = &bitslice_test_nonces_SSE2;
+ break;
+ case SIMD_MMX:
+ bitslice_test_nonces_function_p = &bitslice_test_nonces_MMX;
+ break;
+ default:
+ bitslice_test_nonces_function_p = &bitslice_test_nonces_NOSIMD;
+ break;
+ }
// call the most optimized function for this CPU
(*bitslice_test_nonces_function_p)(nonces_to_bruteforce, bf_test_nonce, bf_test_nonce_par);