Initial commit

This commit is contained in:
Armin Friedl 2018-04-04 15:18:34 +02:00
commit 12195c3ddc
156 changed files with 27156 additions and 0 deletions

411
.gitignore vendored Normal file
View file

@ -0,0 +1,411 @@
*.blf
# copied from brainflyer gitignore
# Object files
*.o
*.ko
*.obj
*.elf
# Precompiled Headers
*.gch
*.pch
# Libraries
*.lib
*.a
*.la
*.lo
# Shared objects (inc. Windows DLLs)
*.dll
*.so
*.so.*
*.dylib
# Executables
*.exe
*.out
*.app
*.i*86
*.x86_64
*.hex
brainflayer
hexln
hex2blf
blfchk
ecmtabgen
filehex
# Debug files
*.dSYM/
# Tags
GPATH
GRTAGS
GTAGS
# misc
old
notes
# Created by https://www.gitignore.io/api/vim,java,linux,emacs,python,eclipse,windows,intellij+all
### Eclipse ###
.metadata
bin/
tmp/
*.tmp
*.bak
*.swp
*~.nib
local.properties
.settings/
.loadpath
.recommenders
# External tool builders
.externalToolBuilders/
# Locally stored "Eclipse launch configurations"
*.launch
# PyDev specific (Python IDE for Eclipse)
*.pydevproject
# CDT-specific (C/C++ Development Tooling)
.cproject
# Java annotation processor (APT)
.factorypath
# PDT-specific (PHP Development Tools)
.buildpath
# sbteclipse plugin
.target
# Tern plugin
.tern-project
# TeXlipse plugin
.texlipse
# STS (Spring Tool Suite)
.springBeans
# Code Recommenders
.recommenders/
# Scala IDE specific (Scala & Java development for Eclipse)
.cache-main
.scala_dependencies
.worksheet
### Eclipse Patch ###
# Eclipse Core
.project
# JDT-specific (Eclipse Java Development Tools)
.classpath
### Emacs ###
# -*- mode: gitignore; -*-
*~
\#*\#
/.emacs.desktop
/.emacs.desktop.lock
*.elc
auto-save-list
tramp
.\#*
# Org-mode
.org-id-locations
*_archive
# flymake-mode
*_flymake.*
# eshell files
/eshell/history
/eshell/lastdir
# elpa packages
/elpa/
# reftex files
*.rel
# AUCTeX auto folder
/auto/
# cask packages
.cask/
dist/
# Flycheck
flycheck_*.el
# server auth directory
/server/
# projectiles files
.projectile
projectile-bookmarks.eld
# directory configuration
.dir-locals.el
# saveplace
places
# url cache
url/cache/
# cedet
ede-projects.el
# smex
smex-items
# company-statistics
company-statistics-cache.el
# anaconda-mode
anaconda-mode/
### Intellij+all ###
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff:
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/dictionaries
# Sensitive or high-churn files:
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.xml
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
# Gradle:
.idea/**/gradle.xml
.idea/**/libraries
# CMake
cmake-build-debug/
# Mongo Explorer plugin:
.idea/**/mongoSettings.xml
## File-based project format:
*.iws
## Plugin-specific files:
# IntelliJ
/out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# Ruby plugin and RubyMine
/.rakeTasks
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
### Intellij+all Patch ###
# Ignores the whole idea folder
# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360
.idea/
### Java ###
# Compiled class file
*.class
# Log file
*.log
# BlueJ files
*.ctxt
# Mobile Tools for Java (J2ME)
.mtj.tmp/
# Package Files #
*.jar
*.war
*.ear
*.zip
*.tar.gz
*.rar
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
hs_err_pid*
### Linux ###
# temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
# KDE directory preferences
.directory
# Linux trash folder which might appear on any partition or disk
.Trash-*
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
.pytest_cache/
nosetests.xml
coverage.xml
*.cover
.hypothesis/
# Translations
*.mo
*.pot
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule.*
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
### Vim ###
# swap
.sw[a-p]
.*.sw[a-p]
# session
Session.vim
# temporary
.netrwhist
# auto-generated tag files
tags
### Windows ###
# Windows thumbnail cache files
Thumbs.db
ehthumbs.db
ehthumbs_vista.db
# Folder config file
Desktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msm
*.msp
# Windows shortcuts
*.lnk
# End of https://www.gitignore.io/api/vim,java,linux,emacs,python,eclipse,windows,intellij+all

48
arch.md Normal file
View file

@ -0,0 +1,48 @@
# Architecture #
## Partitioning ##
# Protocol #
| t | Client | Server | Comment |
|---|--------|--------|---------------|
| 2 | <CMD> | | Send command |
where `<CMD> = WRK | RES | REP`
(Work | Result | Report)
## WRK ##
Ask for work package
| t | Client | Server | Comment |
|---|--------|----------|--------------------------------------------------|
| 2 | WRK | | Send work command |
| 3 | | <WRKPKG> | Work package: Start key, end key, number of keys |
| 4 | ACK | | Ack work |
where `<WRKPKG> = <HEX1> <HEX2> <INT>`
`<HEX1> a 160 bit hex number`
`<HEX2> a 160 bit hex number strictly greater than <HEX1>`
`<INT> a 32 bit unsigned integer equalling <HEX1> - <HEX2> in base 10`
## RES ##
MUST follow proper WRK
Respond with result for work block.
| t | Client | Server | Comment |
|---|--------------|--------|-----------------------------------------------------|
| 2 | RES <WRKPKG> | | Send result command with corresponding work package |
| 3 | | ACK | Ack receiving |
| 4 | <RES> | | Result type |
| 5 | | ACK | Ack receiving |
where `<RES> = NIL | ACK`
if SUC:
| 6 | <HEX> | | Private key hex if found |
| 7 | | ACK | Ack receiving |
## REP ##
TODO

71
bf/Makefile Normal file
View file

@ -0,0 +1,71 @@
HEADERS = bloom.h crack.h hash160.h warpwallet.h
OBJ_MAIN = brainflayer.o hex2blf.o blfchk.o ecmtabgen.o hexln.o filehex.o
OBJ_UTIL = hex.o bloom.o mmapf.o hsearchf.o ec_pubkey_fast.o ripemd160_256.o dldummy.o
OBJ_ALGO = $(patsubst %.c,%.o,$(wildcard algo/*.c))
OBJECTS = $(OBJ_MAIN) $(OBJ_UTIL) $(OBJ_ALGO)
BINARIES = brainflayer hexln hex2blf blfchk ecmtabgen filehex
LIBS = -lssl -lrt -lcrypto -lz -lgmp
CFLAGS = -O3 \
-flto -funsigned-char -falign-functions=16 -falign-loops=16 -falign-jumps=16 \
-Wall -Wextra -Wno-pointer-sign -Wno-sign-compare \
-pedantic -std=gnu11
COMPILE = gcc $(CFLAGS)
all: $(BINARIES)
.git:
@echo 'This does not look like a cloned git repo. Unable to fetch submodules.'
@false
secp256k1/.libs/libsecp256k1.a: .git
git submodule init
git submodule update
cd secp256k1; make distclean || true
cd secp256k1; ./autogen.sh
cd secp256k1; ./configure
cd secp256k1; make
secp256k1/include/secp256k1.h: secp256k1/.libs/libsecp256k1.a
scrypt-jane/scrypt-jane.h: .git
git submodule init
git submodule update
scrypt-jane/scrypt-jane.o: scrypt-jane/scrypt-jane.h scrypt-jane/scrypt-jane.c
cd scrypt-jane; gcc -O3 -DSCRYPT_SALSA -DSCRYPT_SHA256 -c scrypt-jane.c -o scrypt-jane.o
brainflayer.o: brainflayer.c secp256k1/include/secp256k1.h
algo/warpwallet.o: algo/warpwallet.c scrypt-jane/scrypt-jane.h
algo/brainwalletio.o: algo/brainwalletio.c scrypt-jane/scrypt-jane.h
algo/brainv2.o: algo/brainv2.c scrypt-jane/scrypt-jane.h
ec_pubkey_fast.o: ec_pubkey_fast.c secp256k1/include/secp256k1.h
$(COMPILE) -Wno-unused-function -c $< -o $@
%.o: %.c
$(COMPILE) -c $< -o $@
hexln: hexln.o hex.o
$(COMPILE) $^ $(LIBS) -o $@
blfchk: blfchk.o hex.o bloom.o mmapf.o hsearchf.o
$(COMPILE) $^ $(LIBS) -o $@
hex2blf: hex2blf.o hex.o bloom.o mmapf.o
$(COMPILE) $^ $(LIBS) -lm -o $@
ecmtabgen: ecmtabgen.o mmapf.o ec_pubkey_fast.o
$(COMPILE) $^ $(LIBS) -o $@
filehex: filehex.o hex.o
$(COMPILE) $^ $(LIBS) -o $@
brainflayer: brainflayer.o $(OBJ_UTIL) $(OBJ_ALGO) \
secp256k1/.libs/libsecp256k1.a scrypt-jane/scrypt-jane.o
$(COMPILE) $^ $(LIBS) -o $@
clean:
rm -f $(BINARIES) $(OBJECTS)

149
bf/README.md Normal file
View file

@ -0,0 +1,149 @@
Brainflayer
===========
Brainflayer is a Proof-of-Concept brainwallet cracking tool that uses
[libsecp256k1](https://github.com/bitcoin/secp256k1) for pubkey generation.
It was originally released as part of my DEFCON talk about cracking brainwallets
([slides](https://rya.nc/dc23), [video](https://rya.nc/b6), [why](https://rya.nc/defcon-brainwallets.html)).
The name is a reference to [Mind Flayers](https://en.wikipedia.org/wiki/Illithid),
a race of monsters from the Dungeons & Dragons role-playing game. They eat
brains, psionically enslave people and look like lovecraftian horrors.
The current release is more than four times faster than the DEFCON release, and
many features have been added.
If brainflayer is useful to you, please get in touch to let me know. I'm very
interested in any research it's being used for, and I'm generally happy to
collaborate with academic groups.
Disclaimer
----------
Just because you *can* steal someone's money doesn't mean you *should*.
Stealing would make you a jerk. Don't be a jerk.
No support will be provided at this time, and I may ignore or close issues
requesting support without responding.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Usage
-----
### Basic
Precompute the bloom filter:
`hex2blf example.hex example.blf`
Run Brainflayer against it:
`brainflayer -v -b example.blf -i phraselist.txt`
or
`your_generator | brainflayer -v -b example.blf`
### Advanced
Brainflayer's design is heavily influenced by [Unix philosophy](https://en.wikipedia.org/wiki/Unix_philosophy).
It (mostly) does one thing: hunt for tasty brainwallets. A major feature it
does *not* have is generating candidate passwords/passphrases. There are plenty
of other great tools that do that, and brainflayer is happy to have you pipe
their output to it.
Unfortunately, brainflayer is not currently multithreaded. If you want to have
it keep multiple cores busy, you'll have to come up with a way to distribute
the work yourself (brainflayer's -n and -k options may help). In my testing,
brainflayer benefits significantly from hyperthreading, so you may want to
run two copies per physical core. Also worth noting is that brainflayer mmaps
its data files in shared memory, so additional brainflayer processes do not
use up that much additional RAM.
While not strictly required, it is *highly* recommended to use the following
options:
* `-m FILE` Load the ecmult table from `FILE` (generated with `ecmtabgen`)
rather than computing it on startup. This will allow multiple
brainflayer processes to share the same table in memory, and
signifigantly reduce startup time when using a large table.
* `-f FILE` Verify check bloom filter matches against `FILE`, a list of all
hash160s generated with
`sort -u example.hex | xxd -r -p > example.bin`
Enough addresses exist on the Bitcoin network to cause false
positives in the bloom filter, this option will suppress them.
Brainflayer supports a few other types of input via the `-t` option:
* `-t keccak` passphrases to be hashed with keccak256 (some ethereum tools)
* `-t priv` raw private keys - this can be used to support arbitrary
deterministic wallet schemes via an external program. Any trailing
data after the hex encoded private key will be included in
brainflayer's output as well, for reference. See also the `-I`
option if you want to crack a bunch of sequential keys, which has
special speed optimizations.
* `-t warp` salts or passwords/passphrases for WarpWallet
* `-t bwio` salts or passwords/passphrases for brainwallet.io
* `-t bv2` salts or passwords/passphrases for brainv2 - this one is *very* slow
on CPU, however the parameter choices make it a great target for GPUs
and FPGAs.
* `-t rush` passwords for password-protected rushwallets - pass the fragment (the
part of the url after the #) using `-r`. Almost all wrong passwords
will be rejected even without a bloom filter.
Address types can be specified with the `-c` option:
* `-c u` uncompressed addresses
* `-c c` compressed addresses
* `-c e` ethereum addresses
* `-c x` most signifigant bits of public point's x coordinate
It's possible to combine two or more of these, e.g. the default is `-c uc`.
An incremental private key brute force mode is available for fans of
[directory.io](http://www.directory.io/), try
`brainflayer -v -I 0000000000000000000000000000000000000000000000000000000000000001 -b example.blf`
See the output of `brainflayer -h` for more detailed usage info.
Also included is `blfchk` - you can pipe it hex encoded hash160 to check a
bloom filter file for. It's very fast - it can easily check millions of
hash160s per second. Not entirely sure what this is good for but I'm sure
you'll come up with something.
Building
--------
Should compile on Linux with `make` provided you have the required devel libs
installed (at least openssl and gpm are required along with libsecp256k1's
build dependencies). I really need to learn autotools. If you file an issue
about a build failure in libsecp256k1 I will close it.
Authors
-------
The bulk of Brainflayer was written by Ryan Castellucci. Nicolas Courtois and
Guangyan Song contributed the code in `ec_pubkey_fast.c` which more than
doubles the speed of public key computations compared with the stock secp256k1
library from Bitcoin. This code uses a much larger table for ec multiplication
and optimized routines for ec addition and doubling.

60
bf/algo/brainv2.c Normal file
View file

@ -0,0 +1,60 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#include <time.h>
#include <unistd.h>
#include <assert.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <signal.h>
#include <stdio.h>
#include <fcntl.h>
#include <errno.h>
#include <openssl/evp.h>
#include <openssl/sha.h>
// crypto.h used for the version
#include <openssl/crypto.h>
#include "../scrypt-jane/scrypt-jane.h"
#include "../hex.h"
#include "brainv2.h"
#define SALT_BITS_PER_THREAD 256
#define SALT_BYTES_PER_THREAD (SALT_BITS_PER_THREAD / 8)
#define THREADS 256
#define KEY_SIZE 128
#define first_scrypt(p, pl, s, ss, k, ks) \
scrypt(p, pl, s, ss, 13, 0, 6, k, ks)
#define middle_scrypt(p, pl, s, ss, k, ks) \
scrypt(p, pl, s, ss, 15, 0, 6, k, ks)
#define last_scrypt(p, pl, s, ss, k, ks) \
scrypt(p, pl, s, ss, 13, 0, 6, k, ks)
int brainv2(unsigned char *pass, size_t pass_sz,
unsigned char *salt, size_t salt_sz,
unsigned char *out) {
unsigned char key1[THREADS*SALT_BYTES_PER_THREAD*2];
unsigned char key2[THREADS*SALT_BYTES_PER_THREAD];
unsigned char key3[16];
int key1_sz = sizeof(key1);
int key2_sz = sizeof(key2);
int key3_sz = sizeof(key3);
int t;
first_scrypt(pass, pass_sz, salt, salt_sz, key1, key1_sz);
for (t = 0; t < THREADS; ++t) {
middle_scrypt(key1+((t*2+0)*SALT_BYTES_PER_THREAD), SALT_BYTES_PER_THREAD,
key1+((t*2+1)*SALT_BYTES_PER_THREAD), SALT_BYTES_PER_THREAD,
key2+(t*SALT_BYTES_PER_THREAD), SALT_BYTES_PER_THREAD);
}
last_scrypt(pass, pass_sz, key2, key2_sz, key3, key3_sz);
hex(key3, key3_sz, out, 33);
return 0;
}

8
bf/algo/brainv2.h Normal file
View file

@ -0,0 +1,8 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#ifndef __BRAINFLAYER_BRAINV2_H_
#define __BRAINFLAYER_BRAINV2_H_
int brainv2(unsigned char *, size_t, unsigned char *, size_t, unsigned char *);
/* vim: set ts=2 sw=2 et ai si: */
#endif /* __BRAINFLAYER_BRAINV2_H_ */

45
bf/algo/brainwalletio.c Normal file
View file

@ -0,0 +1,45 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#include <time.h>
#include <unistd.h>
#include <assert.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <signal.h>
#include <stdio.h>
#include <fcntl.h>
#include <errno.h>
#include <openssl/evp.h>
#include <openssl/sha.h>
// crypto.h used for the version
#include <openssl/crypto.h>
#include "../scrypt-jane/scrypt-jane.h"
#include "../hex.h"
#define _SCRYPT_N (1<<18)
#define _SCRYPT_r 8
#define _SCRYPT_p 1
#define jane_scrypt(p, pl, s, ss, k, ks) \
scrypt(p, pl, s, ss, 17, 3, 0, k, ks)
static SHA256_CTX sha256_ctx;
int brainwalletio(unsigned char *pass, size_t pass_sz,
unsigned char *salt, size_t salt_sz,
unsigned char *out) {
unsigned char seed1[32], seed2[65];
int seed1_sz = sizeof(seed1), seed2_sz = (sizeof(seed2) - 1);
jane_scrypt(pass, pass_sz, salt, salt_sz, seed1, seed1_sz);
hex(seed1, seed1_sz, seed2, seed2_sz);
SHA256_Init(&sha256_ctx);
SHA256_Update(&sha256_ctx, seed2, seed2_sz);
SHA256_Final(out, &sha256_ctx);
return 0;
}

8
bf/algo/brainwalletio.h Normal file
View file

@ -0,0 +1,8 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#ifndef __BRAINFLAYER_BRAINWALLETIO_H_
#define __BRAINFLAYER_BRAINWALLETIO_H_
int brainwalletio(unsigned char *, size_t, unsigned char *, size_t, unsigned char *);
/* vim: set ts=2 sw=2 et ai si: */
#endif /* __BRAINFLAYER_BRAINWALLETIO_H_ */

182
bf/algo/keccak.c Normal file
View file

@ -0,0 +1,182 @@
/*-
* Copyright (c) 2015 Taylor R. Campbell
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#define _POSIX_C_SOURCE 200809L
#include <stdint.h>
#include "keccak.h"
#define secret /* can't use in variable-time operations, should zero */
#define FOR5(X, STMT) do \
{ \
(X) = 0; (STMT); \
(X) = 1; (STMT); \
(X) = 2; (STMT); \
(X) = 3; (STMT); \
(X) = 4; (STMT); \
} while (0)
static inline secret uint64_t
rol64(secret uint64_t v, unsigned c)
{
return ((v << c) | (v >> (64 - c)));
}
static inline void
keccakf1600_theta(secret uint64_t A[25])
{
secret uint64_t C0, C1, C2, C3, C4;
unsigned y;
C0 = C1 = C2 = C3 = C4 = 0;
#pragma GCC diagnostic ignored "-pedantic"
FOR5(y, {
C0 ^= A[0 + 5*y];
C1 ^= A[1 + 5*y];
C2 ^= A[2 + 5*y];
C3 ^= A[3 + 5*y];
C4 ^= A[4 + 5*y];
});
FOR5(y, {
A[0 + 5*y] ^= C4 ^ rol64(C1, 1);
A[1 + 5*y] ^= C0 ^ rol64(C2, 1);
A[2 + 5*y] ^= C1 ^ rol64(C3, 1);
A[3 + 5*y] ^= C2 ^ rol64(C4, 1);
A[4 + 5*y] ^= C3 ^ rol64(C0, 1);
});
#pragma GCC diagnostic pop
}
static inline void
keccakf1600_rho_pi(secret uint64_t A[25])
{
secret uint64_t T, U;
/*
* Permute by (x,y) |---> (y, 2x + 3y mod 5) starting at (1,0),
* rotate the ith element by (i + 1)(i + 2)/2 mod 64.
*/
U = A[ 1]; T = U;
U = A[10]; A[10] = rol64(T, 1); T = U;
U = A[ 7]; A[ 7] = rol64(T, 3); T = U;
U = A[11]; A[11] = rol64(T, 6); T = U;
U = A[17]; A[17] = rol64(T, 10); T = U;
U = A[18]; A[18] = rol64(T, 15); T = U;
U = A[ 3]; A[ 3] = rol64(T, 21); T = U;
U = A[ 5]; A[ 5] = rol64(T, 28); T = U;
U = A[16]; A[16] = rol64(T, 36); T = U;
U = A[ 8]; A[ 8] = rol64(T, 45); T = U;
U = A[21]; A[21] = rol64(T, 55); T = U;
U = A[24]; A[24] = rol64(T, 2); T = U;
U = A[ 4]; A[ 4] = rol64(T, 14); T = U;
U = A[15]; A[15] = rol64(T, 27); T = U;
U = A[23]; A[23] = rol64(T, 41); T = U;
U = A[19]; A[19] = rol64(T, 56); T = U;
U = A[13]; A[13] = rol64(T, 8); T = U;
U = A[12]; A[12] = rol64(T, 25); T = U;
U = A[ 2]; A[ 2] = rol64(T, 43); T = U;
U = A[20]; A[20] = rol64(T, 62); T = U;
U = A[14]; A[14] = rol64(T, 18); T = U;
U = A[22]; A[22] = rol64(T, 39); T = U;
U = A[ 9]; A[ 9] = rol64(T, 61); T = U;
U = A[ 6]; A[ 6] = rol64(T, 20); T = U;
A[ 1] = rol64(T, 44);
}
static inline void
keccakf1600_chi(secret uint64_t A[25])
{
secret uint64_t B0, B1, B2, B3, B4;
unsigned y;
#pragma GCC diagnostic ignored "-pedantic"
FOR5(y, {
B0 = A[0 + 5*y];
B1 = A[1 + 5*y];
B2 = A[2 + 5*y];
B3 = A[3 + 5*y];
B4 = A[4 + 5*y];
A[0 + 5*y] ^= ~B1 & B2;
A[1 + 5*y] ^= ~B2 & B3;
A[2 + 5*y] ^= ~B3 & B4;
A[3 + 5*y] ^= ~B4 & B0;
A[4 + 5*y] ^= ~B0 & B1;
});
#pragma GCC diagnostic pop
}
static void
keccakf1600_round(secret uint64_t A[25])
{
keccakf1600_theta(A);
keccakf1600_rho_pi(A);
keccakf1600_chi(A);
}
void
keccakf1600(secret uint64_t A[25])
{
/*
* RC[i] = \sum_{j = 0,...,6} rc(j + 7i) 2^(2^j - 1),
* rc(t) = (x^t mod x^8 + x^6 + x^5 + x^4 + 1) mod x in GF(2)[x]
*/
static const uint64_t RC[24] = {
0x0000000000000001ULL,
0x0000000000008082ULL,
0x800000000000808aULL,
0x8000000080008000ULL,
0x000000000000808bULL,
0x0000000080000001ULL,
0x8000000080008081ULL,
0x8000000000008009ULL,
0x000000000000008aULL,
0x0000000000000088ULL,
0x0000000080008009ULL,
0x000000008000000aULL,
0x000000008000808bULL,
0x800000000000008bULL,
0x8000000000008089ULL,
0x8000000000008003ULL,
0x8000000000008002ULL,
0x8000000000000080ULL,
0x000000000000800aULL,
0x800000008000000aULL,
0x8000000080008081ULL,
0x8000000000008080ULL,
0x0000000080000001ULL,
0x8000000080008008ULL,
};
unsigned i;
for (i = 0; i < 24; i++) {
keccakf1600_round(A);
A[0] ^= RC[i];
}
}

34
bf/algo/keccak.h Normal file
View file

@ -0,0 +1,34 @@
/*-
* Copyright (c) 2015 Taylor R. Campbell
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef KECCAK_H
#define KECCAK_H
#include <stdint.h>
void keccakf1600(uint64_t A[25]);
#endif /* KECCAK_H */

674
bf/algo/sha3.c Normal file
View file

@ -0,0 +1,674 @@
/*-
* Copyright (c) 2015 Taylor R. Campbell
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* SHA-3: FIPS-202, Permutation-Based Hash and Extendable-Ouptut Functions
*/
#define _POSIX_C_SOURCE 200809L
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include "keccak.h"
#include "sha3.h"
#define MIN(a,b) ((a) < (b) ? (a) : (b))
void *(*volatile sha3_explicit_memset_impl)(void *, int, size_t) = &memset;
static void *
explicit_memset(void *buf, int c, size_t n)
{
return (*sha3_explicit_memset_impl)(buf, c, n);
}
static inline uint64_t
le64dec(const void *buf)
{
const uint8_t *p = buf;
return (((uint64_t)p[0]) |
((uint64_t)p[1] << 8) |
((uint64_t)p[2] << 16) |
((uint64_t)p[3] << 24) |
((uint64_t)p[4] << 32) |
((uint64_t)p[5] << 40) |
((uint64_t)p[6] << 48) |
((uint64_t)p[7] << 56));
}
static inline void
le64enc(void *buf, uint64_t v)
{
uint8_t *p = buf;
*p++ = v; v >>= 8;
*p++ = v; v >>= 8;
*p++ = v; v >>= 8;
*p++ = v; v >>= 8;
*p++ = v; v >>= 8;
*p++ = v; v >>= 8;
*p++ = v; v >>= 8;
*p++ = v;
}
/*
* Common body. All the SHA-3 functions share code structure. They
* differ only in the size of the chunks they split the message into:
* for digest size d, they are split into chunks of 200 - d bytes.
*/
static inline unsigned
sha3_rate(unsigned d)
{
const unsigned cw = 2*d/8; /* capacity in words */
return 25 - cw;
}
static void
sha3_init(struct sha3 *C, unsigned rw)
{
unsigned iw;
C->nb = 8*rw;
for (iw = 0; iw < 25; iw++)
C->A[iw] = 0;
}
static void
sha3_update(struct sha3 *C, const uint8_t *data, size_t len, unsigned rw)
{
uint64_t T;
unsigned ib, iw; /* index of byte/word */
assert(0 < C->nb);
/* If there's a partial word, try to fill it. */
if ((C->nb % 8) != 0) {
T = 0;
for (ib = 0; ib < MIN(len, C->nb % 8); ib++)
T |= (uint64_t)data[ib] << (8*ib);
C->A[rw - (C->nb + 7)/8] ^= T << (8*(8 - (C->nb % 8)));
C->nb -= ib;
data += ib;
len -= ib;
/* If we filled the buffer, permute now. */
if (C->nb == 0) {
keccakf1600(C->A);
C->nb = 8*rw;
}
/* If that exhausted the input, we're done. */
if (len == 0)
return;
}
/* At a word boundary. Fill any partial buffer. */
assert((C->nb % 8) == 0);
if (C->nb < 8*rw) {
for (iw = 0; iw < MIN(len, C->nb)/8; iw++)
C->A[rw - C->nb/8 + iw] ^= le64dec(data + 8*iw);
C->nb -= 8*iw;
data += 8*iw;
len -= 8*iw;
/* If we filled the buffer, permute now. */
if (C->nb == 0) {
keccakf1600(C->A);
C->nb = 8*rw;
} else {
/* Otherwise, less than a word left. */
assert(len < 8);
goto partial;
}
}
/* At a buffer boundary. Absorb input one buffer at a time. */
assert(C->nb == 8*rw);
while (8*rw <= len) {
for (iw = 0; iw < rw; iw++)
C->A[iw] ^= le64dec(data + 8*iw);
keccakf1600(C->A);
data += 8*rw;
len -= 8*rw;
}
/* Partially fill the buffer with as many words as we can. */
for (iw = 0; iw < len/8; iw++)
C->A[rw - C->nb/8 + iw] ^= le64dec(data + 8*iw);
C->nb -= 8*iw;
data += 8*iw;
len -= 8*iw;
partial:
/* Partially fill the last word with as many bytes as we can. */
assert(len < 8);
assert(0 < C->nb);
assert((C->nb % 8) == 0);
T = 0;
for (ib = 0; ib < len; ib++)
T |= (uint64_t)data[ib] << (8*ib);
C->A[rw - C->nb/8] ^= T;
C->nb -= ib;
assert(0 < C->nb);
}
static inline void
sha3_or_keccak_final(uint8_t *h, unsigned d, struct sha3 *C, unsigned rw, uint64_t padding)
{
unsigned nw, iw;
assert(d <= 8*25);
assert(0 < C->nb);
/* Append 01, pad with 10*1 up to buffer boundary, LSB first. */
nw = (C->nb + 7)/8;
assert(0 < nw);
assert(nw <= rw);
C->A[rw - nw] ^= padding << (8*(8*nw - C->nb));
C->A[rw - 1] ^= 0x8000000000000000ULL;
/* Permute one last time. */
keccakf1600(C->A);
/* Reveal the first 8d bits of state, forget 1600-8d of them. */
for (iw = 0; iw < d/8; iw++)
le64enc(h + 8*iw, C->A[iw]);
h += 8*iw;
d -= 8*iw;
if (0 < d) {
/* For SHA3-224, we need to expose a partial word. */
uint64_t T = C->A[iw];
do {
*h++ = T & 0xff;
T >>= 8;
} while (--d);
}
(void)explicit_memset(C->A, 0, sizeof C->A);
C->nb = 0;
}
static void
sha3_final(uint8_t *h, unsigned d, struct sha3 *C, unsigned rw)
{
sha3_or_keccak_final(h, d, C, rw, 0x06);
}
static void
keccak_final(uint8_t *h, unsigned d, struct sha3 *C, unsigned rw)
{
sha3_or_keccak_final(h, d, C, rw, 0x01);
}
static void
shake_final(uint8_t *h, unsigned d, struct sha3 *C, unsigned rw)
{
unsigned nw, iw;
assert(0 < C->nb);
/* Append 1111, pad with 10*1 up to buffer boundary, LSB first. */
nw = (C->nb + 7)/8;
assert(0 < nw);
assert(nw <= rw);
C->A[rw - nw] ^= (uint64_t)0x1f << (8*(8*nw - C->nb));
C->A[rw - 1] ^= 0x8000000000000000ULL;
/* Permute, reveal first rw words of state, repeat. */
while (8*rw <= d) {
keccakf1600(C->A);
for (iw = 0; iw < rw; iw++)
le64enc(h + 8*iw, C->A[iw]);
h += 8*iw;
d -= 8*iw;
}
/*
* If 8*rw (the output rate in bytes) does not divide d, more
* words are wanted: permute again and reveal a little more.
*/
if (0 < d) {
keccakf1600(C->A);
for (iw = 0; iw < d/8; iw++)
le64enc(h + 8*iw, C->A[iw]);
h += 8*iw;
d -= 8*iw;
/*
* If 8 does not divide d, more bytes are wanted:
* reveal them.
*/
if (0 < d) {
uint64_t T = C->A[iw];
do {
*h++ = T & 0xff;
T >>= 8;
} while (--d);
}
}
(void)explicit_memset(C->A, 0, sizeof C->A);
C->nb = 0;
}
void
SHA3_224_Init(SHA3_224_CTX *C)
{
sha3_init(&C->C224, sha3_rate(SHA3_224_DIGEST_LENGTH));
}
void
SHA3_224_Update(SHA3_224_CTX *C, const uint8_t *data, size_t len)
{
sha3_update(&C->C224, data, len, sha3_rate(SHA3_224_DIGEST_LENGTH));
}
void
SHA3_224_Final(uint8_t h[SHA3_224_DIGEST_LENGTH], SHA3_224_CTX *C)
{
sha3_final(h, SHA3_224_DIGEST_LENGTH, &C->C224,
sha3_rate(SHA3_224_DIGEST_LENGTH));
}
void
SHA3_256_Init(SHA3_256_CTX *C)
{
sha3_init(&C->C256, sha3_rate(SHA3_256_DIGEST_LENGTH));
}
void
SHA3_256_Update(SHA3_256_CTX *C, const uint8_t *data, size_t len)
{
sha3_update(&C->C256, data, len, sha3_rate(SHA3_256_DIGEST_LENGTH));
}
void
SHA3_256_Final(uint8_t h[SHA3_256_DIGEST_LENGTH], SHA3_256_CTX *C)
{
sha3_final(h, SHA3_256_DIGEST_LENGTH, &C->C256,
sha3_rate(SHA3_256_DIGEST_LENGTH));
}
void
SHA3_384_Init(SHA3_384_CTX *C)
{
sha3_init(&C->C384, sha3_rate(SHA3_384_DIGEST_LENGTH));
}
void
SHA3_384_Update(SHA3_384_CTX *C, const uint8_t *data, size_t len)
{
sha3_update(&C->C384, data, len, sha3_rate(SHA3_384_DIGEST_LENGTH));
}
void
SHA3_384_Final(uint8_t h[SHA3_384_DIGEST_LENGTH], SHA3_384_CTX *C)
{
sha3_final(h, SHA3_384_DIGEST_LENGTH, &C->C384,
sha3_rate(SHA3_384_DIGEST_LENGTH));
}
void
SHA3_512_Init(SHA3_512_CTX *C)
{
sha3_init(&C->C512, sha3_rate(SHA3_512_DIGEST_LENGTH));
}
void
SHA3_512_Update(SHA3_512_CTX *C, const uint8_t *data, size_t len)
{
sha3_update(&C->C512, data, len, sha3_rate(SHA3_512_DIGEST_LENGTH));
}
void
SHA3_512_Final(uint8_t h[SHA3_512_DIGEST_LENGTH], SHA3_512_CTX *C)
{
sha3_final(h, SHA3_512_DIGEST_LENGTH, &C->C512,
sha3_rate(SHA3_512_DIGEST_LENGTH));
}
void
SHAKE128_Init(SHAKE128_CTX *C)
{
sha3_init(&C->C128, sha3_rate(128/8));
}
void
SHAKE128_Update(SHAKE128_CTX *C, const uint8_t *data, size_t len)
{
sha3_update(&C->C128, data, len, sha3_rate(128/8));
}
void
SHAKE128_Final(uint8_t *h, size_t d, SHAKE128_CTX *C)
{
shake_final(h, d, &C->C128, sha3_rate(128/8));
}
void
SHAKE256_Init(SHAKE256_CTX *C)
{
sha3_init(&C->C256, sha3_rate(256/8));
}
void
SHAKE256_Update(SHAKE256_CTX *C, const uint8_t *data, size_t len)
{
sha3_update(&C->C256, data, len, sha3_rate(256/8));
}
void
SHAKE256_Final(uint8_t *h, size_t d, SHAKE256_CTX *C)
{
shake_final(h, d, &C->C256, sha3_rate(256/8));
}
void
KECCAK_256_Final(uint8_t h[SHA3_256_DIGEST_LENGTH], SHA3_256_CTX *C)
{
keccak_final(h, SHA3_256_DIGEST_LENGTH, &C->C256,
sha3_rate(SHA3_256_DIGEST_LENGTH));
}
void
KECCAK_384_Final(uint8_t h[SHA3_384_DIGEST_LENGTH], SHA3_384_CTX *C)
{
keccak_final(h, SHA3_384_DIGEST_LENGTH, &C->C384,
sha3_rate(SHA3_384_DIGEST_LENGTH));
}
void
KECCAK_512_Final(uint8_t h[SHA3_512_DIGEST_LENGTH], SHA3_512_CTX *C)
{
keccak_final(h, SHA3_512_DIGEST_LENGTH, &C->C512,
sha3_rate(SHA3_512_DIGEST_LENGTH));
}
static void
sha3_selftest_prng(void *buf, size_t len, uint32_t seed)
{
uint8_t *p = buf;
size_t n = len;
uint32_t t, a, b;
a = 0xdead4bad * seed;
b = 1;
while (n--) {
t = a + b;
*p++ = t >> 24;
a = b;
b = t;
}
}
int
SHA3_Selftest(void)
{
const uint8_t d224_0[] = { /* SHA3-224(0-bit) */
0x6b,0x4e,0x03,0x42,0x36,0x67,0xdb,0xb7,
0x3b,0x6e,0x15,0x45,0x4f,0x0e,0xb1,0xab,
0xd4,0x59,0x7f,0x9a,0x1b,0x07,0x8e,0x3f,
0x5b,0x5a,0x6b,0xc7,
};
const uint8_t d256_0[] = { /* SHA3-256(0-bit) */
0xa7,0xff,0xc6,0xf8,0xbf,0x1e,0xd7,0x66,
0x51,0xc1,0x47,0x56,0xa0,0x61,0xd6,0x62,
0xf5,0x80,0xff,0x4d,0xe4,0x3b,0x49,0xfa,
0x82,0xd8,0x0a,0x4b,0x80,0xf8,0x43,0x4a,
};
const uint8_t d384_0[] = { /* SHA3-384(0-bit) */
0x0c,0x63,0xa7,0x5b,0x84,0x5e,0x4f,0x7d,
0x01,0x10,0x7d,0x85,0x2e,0x4c,0x24,0x85,
0xc5,0x1a,0x50,0xaa,0xaa,0x94,0xfc,0x61,
0x99,0x5e,0x71,0xbb,0xee,0x98,0x3a,0x2a,
0xc3,0x71,0x38,0x31,0x26,0x4a,0xdb,0x47,
0xfb,0x6b,0xd1,0xe0,0x58,0xd5,0xf0,0x04,
};
const uint8_t d512_0[] = { /* SHA3-512(0-bit) */
0xa6,0x9f,0x73,0xcc,0xa2,0x3a,0x9a,0xc5,
0xc8,0xb5,0x67,0xdc,0x18,0x5a,0x75,0x6e,
0x97,0xc9,0x82,0x16,0x4f,0xe2,0x58,0x59,
0xe0,0xd1,0xdc,0xc1,0x47,0x5c,0x80,0xa6,
0x15,0xb2,0x12,0x3a,0xf1,0xf5,0xf9,0x4c,
0x11,0xe3,0xe9,0x40,0x2c,0x3a,0xc5,0x58,
0xf5,0x00,0x19,0x9d,0x95,0xb6,0xd3,0xe3,
0x01,0x75,0x85,0x86,0x28,0x1d,0xcd,0x26,
};
const uint8_t shake128_0_41[] = { /* SHAKE128(0-bit, 41) */
0x7f,0x9c,0x2b,0xa4,0xe8,0x8f,0x82,0x7d,
0x61,0x60,0x45,0x50,0x76,0x05,0x85,0x3e,
0xd7,0x3b,0x80,0x93,0xf6,0xef,0xbc,0x88,
0xeb,0x1a,0x6e,0xac,0xfa,0x66,0xef,0x26,
0x3c,0xb1,0xee,0xa9,0x88,0x00,0x4b,0x93,0x10,
};
const uint8_t shake256_0_73[] = { /* SHAKE256(0-bit, 73) */
0x46,0xb9,0xdd,0x2b,0x0b,0xa8,0x8d,0x13,
0x23,0x3b,0x3f,0xeb,0x74,0x3e,0xeb,0x24,
0x3f,0xcd,0x52,0xea,0x62,0xb8,0x1b,0x82,
0xb5,0x0c,0x27,0x64,0x6e,0xd5,0x76,0x2f,
0xd7,0x5d,0xc4,0xdd,0xd8,0xc0,0xf2,0x00,
0xcb,0x05,0x01,0x9d,0x67,0xb5,0x92,0xf6,
0xfc,0x82,0x1c,0x49,0x47,0x9a,0xb4,0x86,
0x40,0x29,0x2e,0xac,0xb3,0xb7,0xc4,0xbe,
0x14,0x1e,0x96,0x61,0x6f,0xb1,0x39,0x57,0x69,
};
const uint8_t d224_1600[] = { /* SHA3-224(200 * 0xa3) */
0x93,0x76,0x81,0x6a,0xba,0x50,0x3f,0x72,
0xf9,0x6c,0xe7,0xeb,0x65,0xac,0x09,0x5d,
0xee,0xe3,0xbe,0x4b,0xf9,0xbb,0xc2,0xa1,
0xcb,0x7e,0x11,0xe0,
};
const uint8_t d256_1600[] = { /* SHA3-256(200 * 0xa3) */
0x79,0xf3,0x8a,0xde,0xc5,0xc2,0x03,0x07,
0xa9,0x8e,0xf7,0x6e,0x83,0x24,0xaf,0xbf,
0xd4,0x6c,0xfd,0x81,0xb2,0x2e,0x39,0x73,
0xc6,0x5f,0xa1,0xbd,0x9d,0xe3,0x17,0x87,
};
const uint8_t d384_1600[] = { /* SHA3-384(200 * 0xa3) */
0x18,0x81,0xde,0x2c,0xa7,0xe4,0x1e,0xf9,
0x5d,0xc4,0x73,0x2b,0x8f,0x5f,0x00,0x2b,
0x18,0x9c,0xc1,0xe4,0x2b,0x74,0x16,0x8e,
0xd1,0x73,0x26,0x49,0xce,0x1d,0xbc,0xdd,
0x76,0x19,0x7a,0x31,0xfd,0x55,0xee,0x98,
0x9f,0x2d,0x70,0x50,0xdd,0x47,0x3e,0x8f,
};
const uint8_t d512_1600[] = { /* SHA3-512(200 * 0xa3) */
0xe7,0x6d,0xfa,0xd2,0x20,0x84,0xa8,0xb1,
0x46,0x7f,0xcf,0x2f,0xfa,0x58,0x36,0x1b,
0xec,0x76,0x28,0xed,0xf5,0xf3,0xfd,0xc0,
0xe4,0x80,0x5d,0xc4,0x8c,0xae,0xec,0xa8,
0x1b,0x7c,0x13,0xc3,0x0a,0xdf,0x52,0xa3,
0x65,0x95,0x84,0x73,0x9a,0x2d,0xf4,0x6b,
0xe5,0x89,0xc5,0x1c,0xa1,0xa4,0xa8,0x41,
0x6d,0xf6,0x54,0x5a,0x1c,0xe8,0xba,0x00,
};
const uint8_t shake128_1600_41[] = { /* SHAKE128(200 * 0xa3, 41) */
0x13,0x1a,0xb8,0xd2,0xb5,0x94,0x94,0x6b,
0x9c,0x81,0x33,0x3f,0x9b,0xb6,0xe0,0xce,
0x75,0xc3,0xb9,0x31,0x04,0xfa,0x34,0x69,
0xd3,0x91,0x74,0x57,0x38,0x5d,0xa0,0x37,
0xcf,0x23,0x2e,0xf7,0x16,0x4a,0x6d,0x1e,0xb4,
};
const uint8_t shake256_1600_73[] = { /* SHAKE256(200 * 0xa3, 73) */
0xcd,0x8a,0x92,0x0e,0xd1,0x41,0xaa,0x04,
0x07,0xa2,0x2d,0x59,0x28,0x86,0x52,0xe9,
0xd9,0xf1,0xa7,0xee,0x0c,0x1e,0x7c,0x1c,
0xa6,0x99,0x42,0x4d,0xa8,0x4a,0x90,0x4d,
0x2d,0x70,0x0c,0xaa,0xe7,0x39,0x6e,0xce,
0x96,0x60,0x44,0x40,0x57,0x7d,0xa4,0xf3,
0xaa,0x22,0xae,0xb8,0x85,0x7f,0x96,0x1c,
0x4c,0xd8,0xe0,0x6f,0x0a,0xe6,0x61,0x0b,
0x10,0x48,0xa7,0xf6,0x4e,0x10,0x74,0xcd,0x62,
};
const uint8_t d0[] = {
0x6c,0x02,0x1a,0xc6,0x65,0xaf,0x80,0xfb,
0x52,0xe6,0x2d,0x27,0xe5,0x02,0x88,0x84,
0xec,0x1c,0x0c,0xe7,0x0b,0x94,0x55,0x83,
0x19,0xf2,0xbf,0x09,0x86,0xeb,0x1a,0xbb,
0xc3,0x0d,0x1c,0xef,0x22,0xfe,0xc5,0x4c,
0x45,0x90,0x66,0x14,0x00,0x6e,0xc8,0x79,
0xdf,0x1e,0x02,0xbd,0x75,0xe9,0x60,0xd8,
0x60,0x39,0x85,0xc9,0xc4,0xee,0x33,0xab,
};
const unsigned mlen[6] = { 0, 3, 128, 129, 255, 1024 };
uint8_t m[1024], d[73];
SHA3_224_CTX sha3224;
SHA3_256_CTX sha3256;
SHA3_384_CTX sha3384;
SHA3_512_CTX sha3512;
SHAKE128_CTX shake128;
SHAKE256_CTX shake256;
SHA3_512_CTX ctx;
unsigned mi;
/*
* NIST test vectors from
* <http://csrc.nist.gov/groups/ST/toolkit/examples.html#aHashing>:
* 0-bit, 1600-bit repeated 0xa3 (= 0b10100011).
*/
SHA3_224_Init(&sha3224);
SHA3_224_Final(d, &sha3224);
if (memcmp(d, d224_0, 28) != 0)
return -1;
SHA3_256_Init(&sha3256);
SHA3_256_Final(d, &sha3256);
if (memcmp(d, d256_0, 32) != 0)
return -1;
SHA3_384_Init(&sha3384);
SHA3_384_Final(d, &sha3384);
if (memcmp(d, d384_0, 48) != 0)
return -1;
SHA3_512_Init(&sha3512);
SHA3_512_Final(d, &sha3512);
if (memcmp(d, d512_0, 64) != 0)
return -1;
SHAKE128_Init(&shake128);
SHAKE128_Final(d, 41, &shake128);
if (memcmp(d, shake128_0_41, 41) != 0)
return -1;
SHAKE256_Init(&shake256);
SHAKE256_Final(d, 73, &shake256);
if (memcmp(d, shake256_0_73, 73) != 0)
return -1;
(void)memset(m, 0xa3, 200);
SHA3_224_Init(&sha3224);
SHA3_224_Update(&sha3224, m, 200);
SHA3_224_Final(d, &sha3224);
if (memcmp(d, d224_1600, 28) != 0)
return -1;
SHA3_256_Init(&sha3256);
SHA3_256_Update(&sha3256, m, 200);
SHA3_256_Final(d, &sha3256);
if (memcmp(d, d256_1600, 32) != 0)
return -1;
SHA3_384_Init(&sha3384);
SHA3_384_Update(&sha3384, m, 200);
SHA3_384_Final(d, &sha3384);
if (memcmp(d, d384_1600, 48) != 0)
return -1;
SHA3_512_Init(&sha3512);
SHA3_512_Update(&sha3512, m, 200);
SHA3_512_Final(d, &sha3512);
if (memcmp(d, d512_1600, 64) != 0)
return -1;
SHAKE128_Init(&shake128);
SHAKE128_Update(&shake128, m, 200);
SHAKE128_Final(d, 41, &shake128);
if (memcmp(d, shake128_1600_41, 41) != 0)
return -1;
SHAKE256_Init(&shake256);
SHAKE256_Update(&shake256, m, 200);
SHAKE256_Final(d, 73, &shake256);
if (memcmp(d, shake256_1600_73, 73) != 0)
return -1;
/*
* Hand-crufted test vectors with unaligned message lengths.
*/
SHA3_512_Init(&ctx);
for (mi = 0; mi < 6; mi++) {
sha3_selftest_prng(m, mlen[mi], (224/8)*mlen[mi]);
SHA3_224_Init(&sha3224);
SHA3_224_Update(&sha3224, m, mlen[mi]);
SHA3_224_Final(d, &sha3224);
SHA3_512_Update(&ctx, d, 224/8);
}
for (mi = 0; mi < 6; mi++) {
sha3_selftest_prng(m, mlen[mi], (256/8)*mlen[mi]);
SHA3_256_Init(&sha3256);
SHA3_256_Update(&sha3256, m, mlen[mi]);
SHA3_256_Final(d, &sha3256);
SHA3_512_Update(&ctx, d, 256/8);
}
for (mi = 0; mi < 6; mi++) {
sha3_selftest_prng(m, mlen[mi], (384/8)*mlen[mi]);
SHA3_384_Init(&sha3384);
SHA3_384_Update(&sha3384, m, mlen[mi]);
SHA3_384_Final(d, &sha3384);
SHA3_512_Update(&ctx, d, 384/8);
}
for (mi = 0; mi < 6; mi++) {
sha3_selftest_prng(m, mlen[mi], (512/8)*mlen[mi]);
SHA3_512_Init(&sha3512);
SHA3_512_Update(&sha3512, m, mlen[mi]);
SHA3_512_Final(d, &sha3512);
SHA3_512_Update(&ctx, d, 512/8);
}
SHA3_512_Final(d, &ctx);
if (memcmp(d, d0, 64) != 0)
return -1;
return 0;
}

88
bf/algo/sha3.h Normal file
View file

@ -0,0 +1,88 @@
/*-
* Copyright (c) 2015 Taylor R. Campbell
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef SHA3_H
#define SHA3_H
#include <stddef.h>
#include <stdint.h>
struct sha3 {
uint64_t A[25];
unsigned nb; /* number of bytes remaining to fill buffer */
};
typedef struct { struct sha3 C224; } SHA3_224_CTX;
typedef struct { struct sha3 C256; } SHA3_256_CTX;
typedef struct { struct sha3 C384; } SHA3_384_CTX;
typedef struct { struct sha3 C512; } SHA3_512_CTX;
typedef struct { struct sha3 C128; } SHAKE128_CTX;
typedef struct { struct sha3 C256; } SHAKE256_CTX;
#define SHA3_224_DIGEST_LENGTH 28
#define SHA3_256_DIGEST_LENGTH 32
#define SHA3_384_DIGEST_LENGTH 48
#define SHA3_512_DIGEST_LENGTH 64
void SHA3_224_Init(SHA3_224_CTX *);
void SHA3_224_Update(SHA3_224_CTX *, const uint8_t *, size_t);
void SHA3_224_Final(uint8_t[SHA3_224_DIGEST_LENGTH], SHA3_224_CTX *);
void SHA3_256_Init(SHA3_256_CTX *);
void SHA3_256_Update(SHA3_256_CTX *, const uint8_t *, size_t);
void SHA3_256_Final(uint8_t[SHA3_256_DIGEST_LENGTH], SHA3_256_CTX *);
void SHA3_384_Init(SHA3_384_CTX *);
void SHA3_384_Update(SHA3_384_CTX *, const uint8_t *, size_t);
void SHA3_384_Final(uint8_t[SHA3_384_DIGEST_LENGTH], SHA3_384_CTX *);
void SHA3_512_Init(SHA3_512_CTX *);
void SHA3_512_Update(SHA3_512_CTX *, const uint8_t *, size_t);
void SHA3_512_Final(uint8_t[SHA3_512_DIGEST_LENGTH], SHA3_512_CTX *);
void SHAKE128_Init(SHAKE128_CTX *);
void SHAKE128_Update(SHAKE128_CTX *, const uint8_t *, size_t);
void SHAKE128_Final(uint8_t *, size_t, SHAKE128_CTX *);
void SHAKE256_Init(SHAKE256_CTX *);
void SHAKE256_Update(SHAKE256_CTX *, const uint8_t *, size_t);
void SHAKE256_Final(uint8_t *, size_t, SHAKE256_CTX *);
#define KECCAK_256_Init SHA3_256_Init
#define KECCAK_256_Update SHA3_256_Update
void KECCAK_256_Final(uint8_t[SHA3_256_DIGEST_LENGTH], SHA3_256_CTX *);
#define KECCAK_384_Init SHA3_384_Init
#define KECCAK_384_Update SHA3_384_Update
void KECCAK_384_Final(uint8_t[SHA3_384_DIGEST_LENGTH], SHA3_384_CTX *);
#define KECCAK_512_Init SHA3_512_Init
#define KECCAK_512_Update SHA3_512_Update
void KECCAK_512_Final(uint8_t[SHA3_512_DIGEST_LENGTH], SHA3_512_CTX *);
int SHA3_Selftest(void);
#endif /* SHA3_H */

54
bf/algo/warpwallet.c Normal file
View file

@ -0,0 +1,54 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#include <time.h>
#include <unistd.h>
#include <assert.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <signal.h>
#include <stdio.h>
#include <fcntl.h>
#include <errno.h>
#include <openssl/evp.h>
#include <openssl/sha.h>
// crypto.h used for the version
#include <openssl/crypto.h>
#include "../scrypt-jane/scrypt-jane.h"
#define _PBKDF2_i (1<<16)
#define _SCRYPT_N (1<<18)
#define _SCRYPT_r 8
#define _SCRYPT_p 1
#define openssl_pbkdf2(p, pl, s, ss, k, ks) \
PKCS5_PBKDF2_HMAC(p, pl, s, ss, _PBKDF2_i, EVP_sha256(), ks, k)
/*
#define libscrypt(p, pl, s, ss, k, ks) \
libscrypt_scrypt(p, pl, s, ss, _SCRYPT_N, _SCRYPT_r, _SCRYPT_p, k, ks)
*/
#define jane_scrypt(p, pl, s, ss, k, ks) \
scrypt(p, pl, s, ss, 17, 3, 0, k, ks)
int warpwallet(unsigned char *pass, size_t pass_sz,
unsigned char *salt, size_t salt_sz,
unsigned char *out) {
unsigned char seed1[32], seed2[32];
int i, seed_sz = 32;
pass[pass_sz] = salt[salt_sz] = 1;
//if ((ret = libscrypt(pass, pass_sz+1, salt, salt_sz+1, seed1, seed_sz)) != 0) return ret;
jane_scrypt(pass, pass_sz+1, salt, salt_sz+1, seed1, seed_sz);
pass[pass_sz] = salt[salt_sz] = 2;
openssl_pbkdf2(pass, pass_sz+1, salt, salt_sz+1, seed2, seed_sz);
// xor the scrypt and pbkdf2 output together
for (i = 0; i < 32; ++i) { out[i] = seed1[i] ^ seed2[i]; }
return 0;
}

8
bf/algo/warpwallet.h Normal file
View file

@ -0,0 +1,8 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#ifndef __BRAINFLAYER_WARPWALLET_H_
#define __BRAINFLAYER_WARPWALLET_H_
int warpwallet(unsigned char *, size_t, unsigned char *, size_t, unsigned char *);
/* vim: set ts=2 sw=2 et ai si: */
#endif /* __BRAINFLAYER_WARPWALLET_H_ */

72
bf/blfchk.c Normal file
View file

@ -0,0 +1,72 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#include <time.h>
#include <unistd.h>
#include <assert.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <signal.h>
#include <stdio.h>
#include <fcntl.h>
#include <errno.h>
#include <arpa/inet.h> /* for ntohl/htonl */
#include <sys/time.h>
#include <sys/wait.h>
#include <sys/types.h>
#include "hex.h"
#include "bloom.h"
#include "mmapf.h"
#include "hash160.h"
#include "hsearchf.h"
int main(int argc, char **argv) {
int ret;
hash160_t hash;
char *line = NULL;
size_t line_sz = 0;
unsigned char buf[128];
unsigned char *bloom, *bloomfile, *hashfile;
FILE *ifile = stdin, *ofile = stdout, *hfile = NULL;
mmapf_ctx bloom_mmapf;
if (argc < 2 || argc > 3) {
fprintf(stderr, "Usage: %s BLOOM_FILTER_FILE HASH_FILE\n", argv[0]);
return 1;
}
bloomfile = argv[1];
if ((ret = mmapf(&bloom_mmapf, bloomfile, BLOOM_SIZE, MMAPF_RNDRD)) != MMAPF_OKAY) {
fprintf(stderr, "failed to open bloom filter '%s': %s\n", bloomfile, mmapf_strerror(ret));
return 1;
} else if (bloom_mmapf.mem == NULL) {
fprintf(stderr, "got NULL pointer trying to set up bloom filter\n");
return 1;
}
bloom = bloom_mmapf.mem;
if (argc == 3) {
hashfile = argv[2];
hfile = fopen(hashfile, "r");
}
while (getline(&line, &line_sz, ifile) > 0) {
unhex(line, strlen(line), hash.uc, sizeof(hash.uc));
if (bloom_chk_hash160(bloom, hash.ul)) {
if (hfile && !hsearchf(hfile, &hash)) {
//fprintf(ofile, "%s (false positive)\n", hex(hash.uc, sizeof(hash.uc), buf, sizeof(buf)));
continue;
}
//fprintf(ofile, "%s\n", hex(hash.uc, sizeof(hash.uc), buf, sizeof(buf)));
fprintf(ofile, "%s", line);
}
}
return 0;
}
/* vim: set ts=2 sw=2 et ai si: */

43
bf/bloom.c Normal file
View file

@ -0,0 +1,43 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#include <unistd.h>
#include <string.h>
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include "bloom.h"
#include "mmapf.h"
void bloom_set_hash160(unsigned char *bloom, uint32_t *h) {
unsigned int t;
t = BH00(h); BLOOM_SET_BIT(t);
t = BH01(h); BLOOM_SET_BIT(t);
t = BH02(h); BLOOM_SET_BIT(t);
t = BH03(h); BLOOM_SET_BIT(t);
t = BH04(h); BLOOM_SET_BIT(t);
t = BH05(h); BLOOM_SET_BIT(t);
t = BH06(h); BLOOM_SET_BIT(t);
t = BH07(h); BLOOM_SET_BIT(t);
t = BH08(h); BLOOM_SET_BIT(t);
t = BH09(h); BLOOM_SET_BIT(t);
t = BH10(h); BLOOM_SET_BIT(t);
t = BH11(h); BLOOM_SET_BIT(t);
t = BH12(h); BLOOM_SET_BIT(t);
t = BH13(h); BLOOM_SET_BIT(t);
t = BH14(h); BLOOM_SET_BIT(t);
t = BH15(h); BLOOM_SET_BIT(t);
t = BH16(h); BLOOM_SET_BIT(t);
t = BH17(h); BLOOM_SET_BIT(t);
t = BH18(h); BLOOM_SET_BIT(t);
t = BH19(h); BLOOM_SET_BIT(t);
}
/*
int bloom_save(unsigned char *filename, unsigned char *bloom);
*/
/* vim: set ts=2 sw=2 et ai si: */

68
bf/bloom.h Normal file
View file

@ -0,0 +1,68 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#ifndef __BRAINFLAYER_BLOOM_H_
#define __BRAINFLAYER_BLOOM_H_
#include <stdint.h>
/* 2^32 bits */
#define BLOOM_SIZE (512*1024*1024)
#define BLOOM_SET_BIT(N) (bloom[(N)>>3] = bloom[(N)>>3] | (1<<((N)&7)))
#define BLOOM_GET_BIT(N) ( ( bloom[(N)>>3]>>((N)&7) )&1)
#define BH00(N) (N[0])
#define BH01(N) (N[1])
#define BH02(N) (N[2])
#define BH03(N) (N[3])
#define BH04(N) (N[4])
#define BH05(N) (N[0]<<16|N[1]>>16)
#define BH06(N) (N[1]<<16|N[2]>>16)
#define BH07(N) (N[2]<<16|N[3]>>16)
#define BH08(N) (N[3]<<16|N[4]>>16)
#define BH09(N) (N[4]<<16|N[0]>>16)
#define BH10(N) (N[0]<< 8|N[1]>>24)
#define BH11(N) (N[1]<< 8|N[2]>>24)
#define BH12(N) (N[2]<< 8|N[3]>>24)
#define BH13(N) (N[3]<< 8|N[4]>>24)
#define BH14(N) (N[4]<< 8|N[0]>>24)
#define BH15(N) (N[0]<<24|N[1]>> 8)
#define BH16(N) (N[1]<<24|N[2]>> 8)
#define BH17(N) (N[2]<<24|N[3]>> 8)
#define BH18(N) (N[3]<<24|N[4]>> 8)
#define BH19(N) (N[4]<<24|N[0]>> 8)
void bloom_set_hash160(unsigned char *, uint32_t *);
inline unsigned int bloom_chk_hash160(unsigned char *bloom, uint32_t *h) {
unsigned int t;
t = BH00(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH01(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH02(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH03(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH04(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH05(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH06(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH07(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH08(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH09(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH10(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH11(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH12(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH13(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH14(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH15(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH16(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH17(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH18(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
t = BH19(h); if (BLOOM_GET_BIT(t) == 0) { return 0; }
return 1;
}
//#define bloom_new() malloc(536870912)
//unsigned char * bloom_load(unsigned char *);
//int bloom_save(unsigned char *, unsigned char *);
/* vim: set ts=2 sw=2 et ai si: */
#endif /* __BRAINFLAYER_BLOOM_H_ */

880
bf/brainflayer.c Normal file
View file

@ -0,0 +1,880 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#include <time.h>
#include <unistd.h>
#include <assert.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <signal.h>
#include <stdio.h>
#include <fcntl.h>
#include <errno.h>
#include <openssl/sha.h>
#include <sys/time.h>
#include <sys/wait.h>
#include <sys/types.h>
#include <sys/sysinfo.h>
#include "ripemd160_256.h"
#include "ec_pubkey_fast.h"
#include "hex.h"
#include "bloom.h"
#include "mmapf.h"
#include "hash160.h"
#include "hsearchf.h"
#include "algo/brainv2.h"
#include "algo/warpwallet.h"
#include "algo/brainwalletio.h"
#include "algo/sha3.h"
// raise this if you really want, but quickly diminishing returns
#define BATCH_MAX 4096
static int brainflayer_is_init = 0;
typedef struct pubhashfn_s {
void (*fn)(hash160_t *, const unsigned char *);
char id;
} pubhashfn_t;
static unsigned char *mem;
static mmapf_ctx bloom_mmapf;
static unsigned char *bloom = NULL;
static unsigned char *unhexed = NULL;
static size_t unhexed_sz = 4096;
#define bail(code, ...) \
do { \
fprintf(stderr, __VA_ARGS__); \
exit(code); \
} while (0)
#define chkmalloc(S) _chkmalloc(S, __FILE__, __LINE__)
static void * _chkmalloc(size_t size, unsigned char *file, unsigned int line) {
void *ptr = malloc(size);
if (ptr == NULL) {
bail(1, "malloc(%zu) failed at %s:%u: %s\n", size, file, line, strerror(errno));
}
return ptr;
}
#define chkrealloc(P, S) _chkrealloc(P, S, __FILE__, __LINE__);
static void * _chkrealloc(void *ptr, size_t size, unsigned char *file, unsigned int line) {
void *ptr2 = realloc(ptr, size);
if (ptr2 == NULL) {
bail(1, "realloc(%p, %zu) failed at %s:%u: %s\n", ptr, size, file, line, strerror(errno));
}
return ptr2;
}
uint64_t getns() {
uint64_t ns;
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
ns = ts.tv_nsec;
ns += ts.tv_sec * 1000000000ULL;
return ns;
}
static inline void brainflayer_init_globals() {
/* only initialize stuff once */
if (!brainflayer_is_init) {
/* initialize buffers */
mem = chkmalloc(4096);
unhexed = chkmalloc(unhexed_sz);
/* set the flag */
brainflayer_is_init = 1;
}
}
// function pointers
static int (*input2priv)(unsigned char *, unsigned char *, size_t);
/* bitcoin uncompressed address */
static void uhash160(hash160_t *h, const unsigned char *upub) {
SHA256_CTX ctx;
unsigned char hash[SHA256_DIGEST_LENGTH];
SHA256_Init(&ctx);
SHA256_Update(&ctx, upub, 65);
SHA256_Final(hash, &ctx);
ripemd160_256(hash, h->uc);
}
/* bitcoin compressed address */
static void chash160(hash160_t *h, const unsigned char *upub) {
SHA256_CTX ctx;
unsigned char cpub[33];
unsigned char hash[SHA256_DIGEST_LENGTH];
/* quick and dirty public key compression */
cpub[0] = 0x02 | (upub[64] & 0x01);
memcpy(cpub + 1, upub + 1, 32);
SHA256_Init(&ctx);
SHA256_Update(&ctx, cpub, 33);
SHA256_Final(hash, &ctx);
ripemd160_256(hash, h->uc);
}
/* ethereum address */
static void ehash160(hash160_t *h, const unsigned char *upub) {
SHA3_256_CTX ctx;
unsigned char hash[SHA256_DIGEST_LENGTH];
/* compute hash160 for uncompressed public key */
/* keccak_256_last160(pub) */
KECCAK_256_Init(&ctx);
KECCAK_256_Update(&ctx, upub+1, 64);
KECCAK_256_Final(hash, &ctx);
memcpy(h->uc, hash+12, 20);
}
/* msb of x coordinate of public key */
static void xhash160(hash160_t *h, const unsigned char *upub) {
memcpy(h->uc, upub+1, 20);
}
static int pass2priv(unsigned char *priv, unsigned char *pass, size_t pass_sz) {
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, pass, pass_sz);
SHA256_Final(priv, &ctx);
return 0;
}
static int keccak2priv(unsigned char *priv, unsigned char *pass, size_t pass_sz) {
SHA3_256_CTX ctx;
KECCAK_256_Init(&ctx);
KECCAK_256_Update(&ctx, pass, pass_sz);
KECCAK_256_Final(priv, &ctx);
return 0;
}
/* ether.camp "2031 passes of SHA-3 (Keccak)" */
static int camp2priv(unsigned char *priv, unsigned char *pass, size_t pass_sz) {
SHA3_256_CTX ctx;
int i;
KECCAK_256_Init(&ctx);
KECCAK_256_Update(&ctx, pass, pass_sz);
KECCAK_256_Final(priv, &ctx);
for (i = 1; i < 2031; ++i) {
KECCAK_256_Init(&ctx);
KECCAK_256_Update(&ctx, priv, 32);
KECCAK_256_Final(priv, &ctx);
}
return 0;
}
static int sha32priv(unsigned char *priv, unsigned char *pass, size_t pass_sz) {
SHA3_256_CTX ctx;
SHA3_256_Init(&ctx);
SHA3_256_Update(&ctx, pass, pass_sz);
SHA3_256_Final(priv, &ctx);
return 0;
}
/*
static int dicap2hash160(unsigned char *pass, size_t pass_sz) {
SHA3_256_CTX ctx;
int i, ret;
KECCAK_256_Init(&ctx);
KECCAK_256_Update(&ctx, pass, pass_sz);
KECCAK_256_Final(priv256, &ctx);
for (i = 0; i < 16384; ++i) {
KECCAK_256_Init(&ctx);
KECCAK_256_Update(&ctx, priv256, 32);
KECCAK_256_Final(priv256, &ctx);
}
for (;;) {
ret = priv2hash160(priv256);
if (hash160_uncmp.uc[0] == 0) { break; }
KECCAK_256_Init(&ctx);
KECCAK_256_Update(&ctx, priv256, 32);
KECCAK_256_Final(priv256, &ctx);
}
return ret;
}
*/
static int rawpriv2priv(unsigned char *priv, unsigned char *rawpriv, size_t rawpriv_sz) {
memcpy(priv, rawpriv, rawpriv_sz);
return 0;
}
static unsigned char *kdfsalt;
static size_t kdfsalt_sz;
static int warppass2priv(unsigned char *priv, unsigned char *pass, size_t pass_sz) {
int ret;
if ((ret = warpwallet(pass, pass_sz, kdfsalt, kdfsalt_sz, priv)) != 0) return ret;
pass[pass_sz] = 0;
return 0;
}
static int bwiopass2priv(unsigned char *priv, unsigned char *pass, size_t pass_sz) {
int ret;
if ((ret = brainwalletio(pass, pass_sz, kdfsalt, kdfsalt_sz, priv)) != 0) return ret;
pass[pass_sz] = 0;
return 0;
}
static int brainv2pass2priv(unsigned char *priv, unsigned char *pass, size_t pass_sz) {
unsigned char hexout[33];
int ret;
if ((ret = brainv2(pass, pass_sz, kdfsalt, kdfsalt_sz, hexout)) != 0) return ret;
pass[pass_sz] = 0;
return pass2priv(priv, hexout, sizeof(hexout)-1);
}
static unsigned char *kdfpass;
static size_t kdfpass_sz;
static int warpsalt2priv(unsigned char *priv, unsigned char *salt, size_t salt_sz) {
int ret;
if ((ret = warpwallet(kdfpass, kdfpass_sz, salt, salt_sz, priv)) != 0) return ret;
salt[salt_sz] = 0;
return 0;
}
static int bwiosalt2priv(unsigned char *priv, unsigned char *salt, size_t salt_sz) {
int ret;
if ((ret = brainwalletio(kdfpass, kdfpass_sz, salt, salt_sz, priv)) != 0) return ret;
salt[salt_sz] = 0;
return 0;
}
static int brainv2salt2priv(unsigned char *priv, unsigned char *salt, size_t salt_sz) {
unsigned char hexout[33];
int ret;
if ((ret = brainv2(kdfpass, kdfpass_sz, salt, salt_sz, hexout)) != 0) return ret;
salt[salt_sz] = 0;
return pass2priv(priv, hexout, sizeof(hexout)-1);
}
static unsigned char rushchk[5];
static int rush2priv(unsigned char *priv, unsigned char *pass, size_t pass_sz) {
SHA256_CTX ctx;
unsigned char hash[SHA256_DIGEST_LENGTH];
unsigned char userpasshash[SHA256_DIGEST_LENGTH*2+1];
SHA256_Init(&ctx);
SHA256_Update(&ctx, pass, pass_sz);
SHA256_Final(hash, &ctx);
hex(hash, sizeof(hash), userpasshash, sizeof(userpasshash));
SHA256_Init(&ctx);
// kdfsalt should be the fragment up to the !
SHA256_Update(&ctx, kdfsalt, kdfsalt_sz);
SHA256_Update(&ctx, userpasshash, 64);
SHA256_Final(priv, &ctx);
// early exit if the checksum doesn't match
if (memcmp(priv, rushchk, sizeof(rushchk)) != 0) { return -1; }
return 0;
}
inline static int priv_incr(unsigned char *upub, unsigned char *priv) {
int sz;
secp256k1_ec_pubkey_incr(upub, &sz, priv);
return 0;
}
inline static void priv2pub(unsigned char *upub, const unsigned char *priv) {
int sz;
secp256k1_ec_pubkey_create_precomp(upub, &sz, priv);
}
inline static void fprintresult(FILE *f, hash160_t *hash,
unsigned char compressed,
unsigned char *type,
unsigned char *input) {
unsigned char hexed0[41];
fprintf(f, "%s:%c:%s:%s\n",
hex(hash->uc, 20, hexed0, sizeof(hexed0)),
compressed,
type,
input);
}
struct {
float alpha, ilines_rate, ilines_rate_avg;
uint64_t report_mask;
uint64_t time_last, time_curr, time_delta;
uint64_t time_start, time_elapsed;
uint64_t ilines_last, ilines_curr, ilines_delta;
uint64_t olines;
int batch_stopped;
} stats;
void gen_stats(void) {
stats.time_curr = getns();
stats.time_delta = stats.time_curr - stats.time_last;
stats.time_elapsed = stats.time_curr - stats.time_start;
stats.time_last = stats.time_curr;
stats.ilines_delta = stats.ilines_curr - stats.ilines_last;
stats.ilines_last = stats.ilines_curr;
stats.ilines_rate = (stats.ilines_delta * 1.0e9) / (stats.time_delta * 1.0);
if (stats.batch_stopped < BATCH_MAX) {
/* report overall average on last status update */
stats.ilines_rate_avg = (stats.ilines_curr * 1.0e9) / (stats.time_elapsed * 1.0);
} else if (stats.ilines_rate_avg < 0) {
stats.ilines_rate_avg = stats.ilines_rate;
/* target reporting frequency to about once every five seconds */
} else if (stats.time_delta < 2500000000) {
stats.report_mask = (stats.report_mask << 1) | 1;
stats.ilines_rate_avg = stats.ilines_rate; /* reset EMA */
} else if (stats.time_delta > 10000000000) {
stats.report_mask >>= 1;
stats.ilines_rate_avg = stats.ilines_rate; /* reset EMA */
} else {
/* exponetial moving average */
stats.ilines_rate_avg = stats.alpha
* stats.ilines_rate
+ (1 - stats.alpha)
* stats.ilines_rate_avg;
}
}
void print_stats(void) {
fprintf(stderr,
"rate: %9.2f p/s"
" found: %5zu/%-10zu"
" elapsed: %8.3f s",
stats.ilines_rate_avg,
stats.olines,
stats.ilines_curr,
stats.time_elapsed / 1.0e9
);
fflush(stderr);
}
void init_stats(void) {
stats.time_start = stats.time_last = getns();
stats.olines = stats.ilines_last = stats.ilines_curr = 0;
stats.ilines_rate_avg = -1;
stats.alpha = 0.500;
stats.report_mask = 0;
stats.batch_stopped = -1;
}
void sig_stats(int signo){
if(signo == SIGUSR1) print_stats();
}
void usage(unsigned char *name) {
printf("Usage: %s [OPTION]...\n\n\
-a open output file in append mode\n\
-b FILE check for matches against bloom filter FILE\n\
-f FILE verify matches against sorted hash160s in FILE\n\
-i FILE read from FILE instead of stdin\n\
-o FILE write to FILE instead of stdout\n\
-c TYPES use TYPES for public key to hash160 computation\n\
multiple can be specified, for example the default\n\
is 'uc', which will check for both uncompressed\n\
and compressed addresses using Bitcoin's algorithm\n\
u - uncompressed address\n\
c - compressed address\n\
e - ethereum address\n\
x - most signifigant bits of x coordinate\n\
-t TYPE inputs are TYPE - supported types:\n\
sha256 (default) - classic brainwallet\n\
sha3 - sha3-256\n\
priv - raw private keys (requires -x)\n\
warp - WarpWallet (supports -s or -p)\n\
bwio - brainwallet.io (supports -s or -p)\n\
bv2 - brainv2 (supports -s or -p) VERY SLOW\n\
rush - rushwallet (requires -r) FAST\n\
keccak - keccak256 (ethercamp/old ethaddress)\n\
camp2 - keccak256 * 2031 (new ethercamp)\n\
-x treat input as hex encoded\n\
-s SALT use SALT for salted input types (default: none)\n\
-p PASSPHRASE use PASSPHRASE for salted input types, inputs\n\
will be treated as salts\n\
-r FRAGMENT use FRAGMENT for cracking rushwallet passphrase\n\
-I HEXPRIVKEY incremental private key cracking mode, starting\n\
at HEXPRIVKEY (supports -n) FAST\n\
-k K skip the first K lines of input\n\
-n K/N use only the Kth of every N input lines\n\
-B batch size for affine transformations\n\
must be a power of 2 (default/max: %d)\n\
-w WINDOW_SIZE window size for ecmult table (default: 16)\n\
uses about 3 * 2^w KiB memory on startup, but\n\
only about 2^w KiB once the table is built\n\
-m FILE load ecmult table from FILE\n\
the ecmtabgen tool can build such a table\n\
-v verbose - display cracking progress\n\
-h show this help\n", name, BATCH_MAX);
//q, --quiet suppress non-error messages
exit(1);
}
int main(int argc, char **argv) {
FILE *ifile = stdin;
FILE *ofile = stdout;
FILE *ffile = NULL;
int ret, c, i, j;
int64_t raw_lines = -1;
int skipping = 0, tty = 0;
unsigned char modestr[64];
int spok = 0, aopt = 0, vopt = 0, wopt = 16, xopt = 0;
int nopt_mod = 0, nopt_rem = 0, Bopt = 0;
uint64_t kopt = 0;
unsigned char *bopt = NULL, *iopt = NULL, *oopt = NULL;
unsigned char *topt = NULL, *sopt = NULL, *popt = NULL;
unsigned char *mopt = NULL, *fopt = NULL, *ropt = NULL;
unsigned char *Iopt = NULL, *copt = NULL;
unsigned char priv[64];
hash160_t hash160;
pubhashfn_t pubhashfn[8];
memset(pubhashfn, 0, sizeof(pubhashfn));
int batch_stopped = -1;
char *batch_line[BATCH_MAX];
size_t batch_line_sz[BATCH_MAX];
int batch_line_read[BATCH_MAX];
unsigned char batch_priv[BATCH_MAX][32];
unsigned char batch_upub[BATCH_MAX][65];
while ((c = getopt(argc, argv, "avxb:hi:k:f:m:n:o:p:s:r:c:t:w:I:B:")) != -1) {
switch (c) {
case 'a':
aopt = 1; // open output file in append mode
break;
case 'k':
kopt = strtoull(optarg, NULL, 10); // skip first k lines of input
skipping = 1;
break;
case 'n':
// only try the rem'th of every mod lines (one indexed)
nopt_rem = atoi(optarg) - 1;
optarg = strchr(optarg, '/');
if (optarg != NULL) { nopt_mod = atoi(optarg+1); }
skipping = 1;
break;
case 'B':
Bopt = atoi(optarg);
break;
case 'w':
if (wopt > 1) wopt = atoi(optarg);
break;
case 'm':
mopt = optarg; // table file
wopt = 1; // auto
break;
case 'v':
vopt = 1; // verbose
break;
case 'b':
bopt = optarg; // bloom filter file
break;
case 'f':
fopt = optarg; // full filter file
break;
case 'i':
iopt = optarg; // input file
break;
case 'o':
oopt = optarg; // output file
break;
case 'x':
xopt = 1; // input is hex encoded
break;
case 's':
sopt = optarg; // salt
break;
case 'p':
popt = optarg; // passphrase
break;
case 'r':
ropt = optarg; // rushwallet
break;
case 'c':
copt = optarg; // type of hash160
break;
case 't':
topt = optarg; // type of input
break;
case 'I':
Iopt = optarg; // start key for incremental
xopt = 1; // input is hex encoded
break;
case 'h':
// show help
usage(argv[0]);
return 0;
case '?':
// show error
return 1;
default:
// should never be reached...
printf("got option '%c' (%d)\n", c, c);
return 1;
}
}
if (optind < argc) {
if (optind == 1 && argc == 2) {
// older versions of brainflayer had the bloom filter file as a
// single optional argument, this keeps compatibility with that
bopt = argv[1];
} else {
fprintf(stderr, "Invalid arguments:\n");
while (optind < argc) {
fprintf(stderr, " '%s'\n", argv[optind++]);
}
exit(1);
}
}
if (nopt_rem != 0 || nopt_mod != 0) {
// note that nopt_rem has had one subtracted at option parsing
if (nopt_rem >= nopt_mod) {
bail(1, "Invalid '-n' argument, remainder '%d' must be <= modulus '%d'\n", nopt_rem+1, nopt_mod);
} else if (nopt_rem < 0) {
bail(1, "Invalid '-n' argument, remainder '%d' must be > 0\n", nopt_rem+1);
} else if (nopt_mod < 1) {
bail(1, "Invalid '-n' argument, modulus '%d' must be > 0\n", nopt_mod);
}
}
if (wopt < 1 || wopt > 28) {
bail(1, "Invalid window size '%d' - must be >= 1 and <= 28\n", wopt);
} else {
// very rough sanity check of window size
struct sysinfo info;
sysinfo(&info);
uint64_t sysram = info.mem_unit * info.totalram;
if (3584LLU*(1<<wopt) > sysram) {
bail(1, "Not enough ram for requested window size '%d'\n", wopt);
}
}
if (Bopt) { // if unset, will be set later
if (Bopt < 1 || Bopt > BATCH_MAX) {
bail(1, "Invalid '-B' argument, batch size '%d' - must be >= 1 and <= %d\n", Bopt, BATCH_MAX);
} else if (Bopt & (Bopt - 1)) { // https://graphics.stanford.edu/~seander/bithacks.html#DetermineIfPowerOf2
bail(1, "Invalid '-B' argument, batch size '%d' is not a power of 2\n", Bopt);
}
}
if (Iopt) {
if (strlen(Iopt) != 64) {
bail(1, "The starting key passed to the '-I' must be 64 hex digits exactly\n");
}
if (topt) {
bail(1, "Cannot specify input type in incremental mode\n");
}
topt = "priv";
// normally, getline would allocate the batch_line entries, but we need to
// do this to give the processing loop somewhere to write to in incr mode
for (i = 0; i < BATCH_MAX; ++i) {
batch_line[i] = Iopt;
}
unhex(Iopt, sizeof(priv)*2, priv, sizeof(priv));
skipping = 1;
if (!nopt_mod) { nopt_mod = 1; };
}
/* handle copt */
if (copt == NULL) { copt = "uc"; }
i = 0;
while (copt[i]) {
switch (copt[i]) {
case 'u':
pubhashfn[i].fn = &uhash160;
break;
case 'c':
pubhashfn[i].fn = &chash160;
break;
case 'e':
pubhashfn[i].fn = &ehash160;
break;
case 'x':
pubhashfn[i].fn = &xhash160;
break;
default:
bail(1, "Unknown hash160 type '%c'.\n", copt[i]);
}
if (strchr(copt + i + 1, copt[i])) {
bail(1, "Duplicate hash160 type '%c'.\n", copt[i]);
}
pubhashfn[i].id = copt[i];
++i;
}
/* handle topt */
if (topt == NULL) { topt = "sha256"; }
if (strcmp(topt, "sha256") == 0) {
input2priv = &pass2priv;
} else if (strcmp(topt, "priv") == 0) {
if (!xopt) {
bail(1, "raw private key input requires -x");
}
input2priv = &rawpriv2priv;
} else if (strcmp(topt, "warp") == 0) {
if (!Bopt) { Bopt = 1; } // don't batch transform for slow input hashes by default
spok = 1;
input2priv = popt ? &warpsalt2priv : &warppass2priv;
} else if (strcmp(topt, "bwio") == 0) {
if (!Bopt) { Bopt = 1; } // don't batch transform for slow input hashes by default
spok = 1;
input2priv = popt ? &bwiosalt2priv : &bwiopass2priv;
} else if (strcmp(topt, "bv2") == 0) {
if (!Bopt) { Bopt = 1; } // don't batch transform for slow input hashes by default
spok = 1;
input2priv = popt ? &brainv2salt2priv : &brainv2pass2priv;
} else if (strcmp(topt, "rush") == 0) {
input2priv = &rush2priv;
} else if (strcmp(topt, "camp2") == 0) {
input2priv = &camp2priv;
} else if (strcmp(topt, "keccak") == 0) {
input2priv = &keccak2priv;
} else if (strcmp(topt, "sha3") == 0) {
input2priv = &sha32priv;
// } else if (strcmp(topt, "dicap") == 0) {
// input2priv = &dicap2priv;
} else {
bail(1, "Unknown input type '%s'.\n", topt);
}
if (spok) {
if (sopt && popt) {
bail(1, "Cannot specify both a salt and a passphrase\n");
}
if (popt) {
kdfpass = popt;
kdfpass_sz = strlen(popt);
} else {
if (sopt) {
kdfsalt = sopt;
kdfsalt_sz = strlen(kdfsalt);
} else {
kdfsalt = chkmalloc(0);
kdfsalt_sz = 0;
}
}
} else {
if (popt) {
bail(1, "Specifying a passphrase not supported with input type '%s'\n", topt);
} else if (sopt) {
bail(1, "Specifying a salt not supported with this input type '%s'\n", topt);
}
}
if (ropt) {
if (input2priv != &rush2priv) {
bail(1, "Specifying a url fragment only supported with input type 'rush'\n");
}
kdfsalt = ropt;
kdfsalt_sz = strlen(kdfsalt) - sizeof(rushchk)*2;
if (kdfsalt[kdfsalt_sz-1] != '!') {
bail(1, "Invalid rushwallet url fragment '%s'\n", kdfsalt);
}
unhex(kdfsalt+kdfsalt_sz, sizeof(rushchk)*2, rushchk, sizeof(rushchk));
kdfsalt[kdfsalt_sz] = '\0';
} else if (input2priv == &rush2priv) {
bail(1, "The '-r' option is required for rushwallet.\n");
}
snprintf(modestr, sizeof(modestr), xopt ? "(hex)%s" : "%s", topt);
if (bopt) {
if ((ret = mmapf(&bloom_mmapf, bopt, BLOOM_SIZE, MMAPF_RNDRD)) != MMAPF_OKAY) {
bail(1, "failed to open bloom filter '%s': %s\n", bopt, mmapf_strerror(ret));
} else if (bloom_mmapf.mem == NULL) {
bail(1, "got NULL pointer trying to set up bloom filter\n");
}
bloom = bloom_mmapf.mem;
}
if (fopt) {
if (!bopt) {
bail(1, "The '-f' option must be used with a bloom filter\n");
}
if ((ffile = fopen(fopt, "r")) == NULL) {
bail(1, "failed to open '%s' for reading: %s\n", fopt, strerror(errno));
}
}
if (iopt) {
if ((ifile = fopen(iopt, "r")) == NULL) {
bail(1, "failed to open '%s' for reading: %s\n", iopt, strerror(errno));
}
// increases readahead window, don't really care if it fails
posix_fadvise(fileno(ifile), 0, 0, POSIX_FADV_SEQUENTIAL);
}
if (oopt && (ofile = fopen(oopt, (aopt ? "a" : "w"))) == NULL) {
bail(1, "failed to open '%s' for writing: %s\n", oopt, strerror(errno));
}
/* line buffer output */
setvbuf(ofile, NULL, _IOLBF, 0);
/* line buffer stderr */
setvbuf(stderr, NULL, _IOLBF, 0);
if (vopt && ofile == stdout && isatty(fileno(stdout))) { tty = 1; }
brainflayer_init_globals();
if (secp256k1_ec_pubkey_precomp_table(wopt, mopt) != 0) {
bail(1, "failed to initialize precomputed table\n");
}
if (secp256k1_ec_pubkey_batch_init(BATCH_MAX) != 0) {
bail(1, "failed to initialize batch point conversion structures\n");
}
if (vopt) {
init_stats();
if(signal(SIGUSR1, sig_stats) == SIG_ERR){
fprintf(stderr, "Can't set sig_stat");
fflush(stderr);
}
}
// set default batch size
if (!Bopt) { Bopt = BATCH_MAX; }
for (;;) {
if (Iopt) {
if (skipping) {
priv_add_uint32(priv, nopt_rem + kopt);
skipping = 0;
}
secp256k1_ec_pubkey_batch_incr(Bopt, nopt_mod, batch_upub, batch_priv, priv);
memcpy(priv, batch_priv[Bopt-1], 32);
priv_add_uint32(priv, nopt_mod);
batch_stopped = Bopt;
stats.batch_stopped = batch_stopped;
} else {
for (i = 0; i < Bopt; ++i) {
if ((batch_line_read[i] = getline(&batch_line[i], &batch_line_sz[i], ifile)-1) > -1) {
if (skipping) {
++raw_lines;
if (kopt && raw_lines < kopt) { --i; continue; }
if (nopt_mod && raw_lines % nopt_mod != nopt_rem) { --i; continue; }
}
} else {
break;
}
batch_line[i][batch_line_read[i]] = 0;
if (xopt) {
if (batch_line_read[i] / 2 > unhexed_sz) {
unhexed_sz = batch_line_read[i];
unhexed = chkrealloc(unhexed, unhexed_sz);
}
// rewrite the input line from hex
unhex(batch_line[i], batch_line_read[i], unhexed, unhexed_sz);
if (input2priv(batch_priv[i], unhexed, batch_line_read[i]/2) != 0) {
fprintf(stderr, "input2priv failed! continuing...\n");
}
} else {
if (input2priv(batch_priv[i], batch_line[i], batch_line_read[i]) != 0) {
fprintf(stderr, "input2priv failed! continuing...\n");
}
}
}
// batch compute the public keys
secp256k1_ec_pubkey_batch_create(Bopt, batch_upub, batch_priv);
// save ending value from read loop
batch_stopped = i;
stats.batch_stopped = i;
}
// loop over the public keys
for (i = 0; i < batch_stopped; ++i) {
j = 0;
if (bloom) { /* crack mode */
// loop over pubkey hash functions
while (pubhashfn[j].fn != NULL) {
pubhashfn[j].fn(&hash160, batch_upub[i]);
if (bloom_chk_hash160(bloom, hash160.ul)) {
if (!fopt || hsearchf(ffile, &hash160)) {
if (tty) { fprintf(ofile, "\033[0K"); }
// reformat/populate the line if required
if (Iopt) {
hex(batch_priv[i], 32, batch_line[i], 65);
}
fprintresult(ofile, &hash160, pubhashfn[j].id, modestr, batch_line[i]);
++stats.olines;
}
}
++j;
}
} else { /* generate mode */
// reformat/populate the line if required
if (Iopt) {
hex(batch_priv[i], 32, batch_line[i], 65);
}
while (pubhashfn[j].fn != NULL) {
pubhashfn[j].fn(&hash160, batch_upub[i]);
fprintresult(ofile, &hash160, pubhashfn[j].id, modestr, batch_line[i]);
++j;
}
}
}
// end public key processing loop
// start stats
if (vopt) {
stats.ilines_curr += stats.batch_stopped;
if (stats.batch_stopped < BATCH_MAX || (stats.ilines_curr & stats.report_mask) == 0) {
gen_stats();
}
}
// end stats
// main loop exit condition
if (batch_stopped < Bopt) {
if (vopt) { fprintf(stderr, "\n"); }
break;
}
}
return 0;
}
/* vim: set ts=2 sw=2 et ai si: */

24
bf/crack.h Normal file
View file

@ -0,0 +1,24 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#ifndef __BRAINFLAYER_CRACK_H_
#define __BRAINFLAYER_CRACK_H_
#include <openssl/sha.h>
#include <openssl/ripemd.h>
#define PWF_BUF_SZ 1024
typedef union hash160_u {
unsigned char uc[RIPEMD160_DIGEST_LENGTH];
uint32_t ul[RIPEMD160_DIGEST_LENGTH>>2];
} hash160_t;
typedef struct keydata_u {
int state;
unsigned char[PWF_BUF_SZ] password;
unsigned char[SHA256_DIGEST_LENGTH] priv;
unsigned char[RIPEMD160_DIGEST_LENGTH] uaddr;
unsigned char[RIPEMD160_DIGEST_LENGTH] caddr;
} keydata_t;
/* vim: set ts=2 sw=2 et ai si: */
#endif /* __BRAINFLAYER_CRACK_H_ */

13
bf/dldummy.c Normal file
View file

@ -0,0 +1,13 @@
/* gets rid of warnings about opnessl using dlopen when statically linked */
#include <stdlib.h>
static char *dlerrstr = "dynamic loader not available (using dldummy)";
//void * dlopen(const char *filename, int flat) { return NULL; }
void * dlopen(void) { return NULL; }
char * dlerror(void) { return dlerrstr; }
void * dlsym(void) { return NULL; }
int dlclose(void) { return -1; }
/* this is supposed to set some null pointers on error, but fuck it */
int dladdr(void) { return -1; }

539
bf/ec_pubkey_fast.c Normal file
View file

@ -0,0 +1,539 @@
/* Copyright (c) 2015 Nicolas Courtois, Guangyan Song, Ryan Castellucci, All Rights Reserved */
#include "ec_pubkey_fast.h"
#include <unistd.h>
#include <string.h>
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include "secp256k1/src/libsecp256k1-config.h"
#include "secp256k1/include/secp256k1.h"
#include "secp256k1/src/util.h"
#include "secp256k1/src/num_impl.h"
#include "secp256k1/src/field_impl.h"
#include "secp256k1/src/field_10x26_impl.h"
#include "secp256k1/src/scalar_impl.h"
#include "secp256k1/src/group_impl.h"
#include "secp256k1/src/ecmult_gen_impl.h"
#include "secp256k1/src/ecmult.h"
#include "secp256k1/src/eckey_impl.h"
static int secp256k1_eckey_pubkey_parse(secp256k1_ge_t *elem, const unsigned char *pub, int size);
#include "mmapf.h"
#undef ASSERT
#define READBIT(A, B) ((A >> (B & 7)) & 1)
#define SETBIT(T, B, V) (T = V ? T | (1<<B) : T & ~(1<<B))
int n_windows = 0;
int n_values;
secp256k1_gej_t nums_gej;
secp256k1_ge_t *prec;
int remmining = 0;
int WINDOW_SIZE = 0;
size_t MMAP_SIZE;
mmapf_ctx prec_mmapf;
int secp256k1_ec_pubkey_precomp_table_save(int window_size, unsigned char *filename) {
int fd, ret;
size_t records;
FILE *dest;
if ((ret = secp256k1_ec_pubkey_precomp_table(window_size, NULL)) < 0)
return ret;
if ((fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0660)) < 0)
return fd;
records = n_windows*n_values;
dest = fdopen(fd, "w");
if (fwrite(prec, sizeof(secp256k1_ge_t), n_windows*n_values, dest) != records)
return -1;
return 0;
}
int secp256k1_ec_pubkey_precomp_table(int window_size, unsigned char *filename) {
int ret;
struct stat sb;
size_t prec_sz;
secp256k1_gej_t gj; // base point in jacobian coordinates
secp256k1_gej_t *table;
if (filename) {
if (stat(filename, &sb) == 0) {
if (!S_ISREG(sb.st_mode))
return -100;
} else {
return -101;
}
}
// try to find a window size that matched the file size
for (;;) {
WINDOW_SIZE = window_size;
n_values = 1 << window_size;
if (256 % window_size == 0) {
n_windows = (256 / window_size);
} else {
n_windows = (256 / window_size) + 1;
}
remmining = 256 % window_size;
prec_sz = n_windows*n_values*sizeof(secp256k1_ge_t);
if (!filename || sb.st_size <= prec_sz)
break;
++window_size;
}
if ((ret = mmapf(&prec_mmapf, filename, prec_sz, MMAPF_RNDRD)) != MMAPF_OKAY) {
fprintf(stderr, "failed to open ecmult table '%s': %s\n", filename, mmapf_strerror(ret));
exit(1);
} else if (prec_mmapf.mem == NULL) {
fprintf(stderr, "got NULL pointer from mmapf\n");
exit(1);
}
prec = prec_mmapf.mem;
if (filename) { return 0; }
table = malloc(n_windows*n_values*sizeof(secp256k1_gej_t));
secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
//fprintf(stderr, "%d %d %d %d %zu\n", window_size, n_windows, n_values, remmining, prec_sz);
static const unsigned char nums_b32[33] = "The scalar for this x is unknown";
secp256k1_fe_t nums_x;
secp256k1_ge_t nums_ge;
VERIFY_CHECK(secp256k1_fe_set_b32(&nums_x, nums_b32));
VERIFY_CHECK(secp256k1_ge_set_xo_var(&nums_ge, &nums_x, 0));
secp256k1_gej_set_ge(&nums_gej, &nums_ge);
/* Add G to make the bits in x uniformly distributed. */
secp256k1_gej_add_ge_var(&nums_gej, &nums_gej, &secp256k1_ge_const_g, NULL);
secp256k1_gej_t gbase;
secp256k1_gej_t numsbase;
gbase = gj; /* (2^w_size)^num_of_windows * G */
numsbase = nums_gej; /* 2^num_of_windows * nums. */
for (int j = 0; j < n_windows; j++) {
//[number of windows][each value from 0 - (2^window_size - 1)]
table[j*n_values] = numsbase;
for (int i = 1; i < n_values; i++) {
secp256k1_gej_add_var(&table[j*n_values + i], &table[j*n_values + i - 1], &gbase, NULL);
}
for (int i = 0; i < window_size; i++) {
secp256k1_gej_double_var(&gbase, &gbase, NULL);
}
/* Multiply numbase by 2. */
secp256k1_gej_double_var(&numsbase, &numsbase, NULL);
if (j == n_windows-2) {
/* In the last iteration, numsbase is (1 - 2^j) * nums instead. */
secp256k1_gej_neg(&numsbase, &numsbase);
secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
}
}
secp256k1_ge_set_all_gej_var(n_windows*n_values, prec, table, 0);
free(table);
return 0;
}
static void secp256k1_ecmult_gen2(secp256k1_gej_t *r, const unsigned char *seckey){
unsigned char a[256];
for (int j = 0; j < 32; j++) {
for (int i = 0; i < 8; i++) {
a[i+j*8] = READBIT(seckey[31-j], i);
}
}
r->infinity = 1;
int bits;
for (int j = 0; j < n_windows; j++) {
if (j == n_windows -1 && remmining != 0) {
bits = 0;
for (int i = 0; i < remmining; i++) {
SETBIT(bits,i,a[i + j * WINDOW_SIZE]);
}
} else {
bits = 0;
for (int i = 0; i < WINDOW_SIZE; i++) {
SETBIT(bits,i,a[i + j * WINDOW_SIZE]);
}
}
#if 1
secp256k1_gej_add_ge_var(r, r, &prec[j*n_values + bits], NULL);
#else
secp256k1_gej_add_ge(r, r, &prec[j*n_values + bits]);
#endif
}
}
#ifdef USE_BL_ARITHMETIC
static void secp256k1_gej_add_ge_bl(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_ge_t *b, secp256k1_fe_t *rzr) {
secp256k1_fe_t z1z1, /*z1,*/ u2, x1, y1, t0, s2, h, hh, i, j, t1, rr, v, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11;
// 7M + 4S + 2 normalize + 22 mul_int/add/negate
if (a->infinity) {
VERIFY_CHECK(rzr == NULL);
secp256k1_gej_set_ge(r, b);
return;
}
if (b->infinity) {
if (rzr) {
secp256k1_fe_set_int(rzr, 1);
}
*r = *a;
return;
}
r->infinity = 0;
x1 = a->x; secp256k1_fe_normalize_weak(&x1);
y1 = a->y; secp256k1_fe_normalize_weak(&y1);
secp256k1_fe_sqr(&z1z1, &a->z); // z1z1 = z1^2
secp256k1_fe_mul(&u2, &b->x, &z1z1); // u2 = x2*z1z1
secp256k1_fe_mul(&t0, &a->z, &z1z1); // t0 = z1*z1z1
secp256k1_fe_mul(&s2, &b->y, &t0); // s2 = y2 * t0
secp256k1_fe_negate(&h, &x1, 1); secp256k1_fe_add(&h, &u2); // h = u2-x1 (3)
secp256k1_fe_sqr(&hh,&h); // hh = h^2
i = hh; secp256k1_fe_mul_int(&i,4); // i = 4*hh
if (secp256k1_fe_normalizes_to_zero_var(&h)) {
if (secp256k1_fe_normalizes_to_zero_var(&i)) {
secp256k1_gej_double_var(r, a, rzr);
} else {
if (rzr) {
secp256k1_fe_set_int(rzr, 0);
}
r->infinity = 1;
}
return;
}
secp256k1_fe_mul(&j,&h,&i); // j = h*i
secp256k1_fe_negate(&t1, &y1, 1); secp256k1_fe_add(&t1, &s2); // t1 = s2-y1
rr = t1; secp256k1_fe_mul_int(&rr, 2); // rr = 2 * t1;
secp256k1_fe_mul(&v, &x1, &i); // v = x1 * i
secp256k1_fe_sqr(&t2, &rr); // t2 = rr^2
t3 = v; secp256k1_fe_mul_int(&t3, 2); // t3 = 2*v
secp256k1_fe_negate(&t4, &j, 1); secp256k1_fe_add(&t4, &t2); // t4 = t2 - j
secp256k1_fe_negate(&r->x, &t3, 2); secp256k1_fe_add(&r->x, &t4); // x3 = t4 - t3;
//secp256k1_fe_normalize_weak(&r->x);
secp256k1_fe_negate(&t5, &r->x, 6); secp256k1_fe_add(&t5, &v); // t5 = v - x3
secp256k1_fe_mul(&t6,&y1,&j); // t6 = y1 * j
t7 = t6; secp256k1_fe_mul_int(&t7,2); // t7 = 2*t6;
secp256k1_fe_mul(&t8,&rr,&t5); // t8 = rr* t5;
secp256k1_fe_negate(&r->y, &t7, 2); secp256k1_fe_add(&r->y,&t8); // y3 = t8-t7
//secp256k1_fe_normalize_weak(&r->y);
t9 = h; secp256k1_fe_add(&t9, &a->z); // t9 = z1 + h
secp256k1_fe_sqr(&t10, &t9); // t10 = t9^2
secp256k1_fe_negate(&t11, &z1z1, 1); secp256k1_fe_add(&t11, &t10); // t11 = t10-z1z1
secp256k1_fe_negate(&r->z, &hh, 1); secp256k1_fe_add(&r->z, &t11); // z3 = t11 - hh
}
static void secp256k1_ecmult_gen_bl(secp256k1_gej_t *r, const unsigned char *seckey){
unsigned char a[256];
for (int j = 0; j < 32; j++){
for (int i = 0; i < 8; i++){
a[i+j*8] = READBIT(seckey[31-j], i);
}
}
r->infinity = 1;
int bits;
for (int j = 0; j < n_windows; j++) {
if (j == n_windows -1 && remmining != 0) {
bits = 0;
for (int i = 0; i < remmining; i++) {
SETBIT(bits,i,a[i + j * WINDOW_SIZE]);
}
//bits = secp256k1_scalar_get_bits2(a, j * WINDOW_SIZE, remmining);
} else {
bits = 0;
for (int i = 0; i < WINDOW_SIZE; i++) {
SETBIT(bits,i,a[i + j * WINDOW_SIZE]);
}
//bits = secp256k1_scalar_get_bits2(a, j * WINDOW_SIZE, WINDOW_SIZE);
}
secp256k1_gej_add_ge_bl(r, r, &prec[j*n_values + bits], NULL);
}
}
#endif
int secp256k1_ec_pubkey_create_precomp(unsigned char *pub_chr, int *pub_chr_sz, const unsigned char *seckey) {
secp256k1_gej_t pj;
secp256k1_ge_t p;
#ifdef USE_BL_ARITHMETIC
secp256k1_ecmult_gen_bl(&pj, seckey);
#else
secp256k1_ecmult_gen2(&pj, seckey);
#endif
secp256k1_ge_set_gej(&p, &pj);
*pub_chr_sz = 65;
pub_chr[0] = 4;
secp256k1_fe_normalize_var(&p.x);
secp256k1_fe_normalize_var(&p.y);
secp256k1_fe_get_b32(pub_chr + 1, &p.x);
secp256k1_fe_get_b32(pub_chr + 33, &p.y);
return 0;
}
static secp256k1_gej_t *batchpj;
static secp256k1_ge_t *batchpa;
static secp256k1_fe_t *batchaz;
static secp256k1_fe_t *batchai;
int secp256k1_ec_pubkey_batch_init(unsigned int num) {
if (!batchpj) { batchpj = malloc(sizeof(secp256k1_gej_t)*num); }
if (!batchpa) { batchpa = malloc(sizeof(secp256k1_ge_t)*num); }
if (!batchaz) { batchaz = malloc(sizeof(secp256k1_fe_t)*num); }
if (!batchai) { batchai = malloc(sizeof(secp256k1_fe_t)*num); }
if (batchpj == NULL || batchpa == NULL || batchaz == NULL || batchai == NULL) {
return 1;
} else {
return 0;
}
}
void secp256k1_ge_set_all_gej_static(int num, secp256k1_ge_t *batchpa, secp256k1_gej_t *batchpj) {
size_t i;
for (i = 0; i < num; i++) {
batchaz[i] = batchpj[i].z;
}
secp256k1_fe_inv_all_var(num, batchai, batchaz);
for (i = 0; i < num; i++) {
secp256k1_ge_set_gej_zinv(&batchpa[i], &batchpj[i], &batchai[i]);
}
}
// call secp256k1_ec_pubkey_batch_init first or you get segfaults
int secp256k1_ec_pubkey_batch_incr(unsigned int num, unsigned int skip, unsigned char (*pub)[65], unsigned char (*sec)[32], unsigned char start[32]) {
// some of the values could be reused between calls, but dealing with the data
// structures is a pain, and with a reasonable batch size, the perf difference
// is tiny
int i;
unsigned char b32[32];
secp256k1_scalar_t priv, incr_s;
secp256k1_gej_t temp;
secp256k1_ge_t incr_a;
/* load staring private key */
secp256k1_scalar_set_b32(&priv, start, NULL);
/* fill first private */
secp256k1_scalar_get_b32(sec[0], &priv);
/* set up increments */
secp256k1_scalar_set_int(&incr_s, skip);
secp256k1_scalar_get_b32(b32, &incr_s);
#ifdef USE_BL_ARITHMETIC
secp256k1_ecmult_gen_bl(&temp, b32);
secp256k1_ecmult_gen_bl(&batchpj[0], start);
#else
secp256k1_ecmult_gen2(&temp, b32);
secp256k1_ecmult_gen2(&batchpj[0], start);
#endif
/* get affine public point for incrementing */
secp256k1_ge_set_gej_var(&incr_a, &temp);
for (i = 1; i < num; ++i) {
/* increment and write private key */
secp256k1_scalar_add(&priv, &priv, &incr_s);
secp256k1_scalar_get_b32(sec[i], &priv);
/* increment public key */
secp256k1_gej_add_ge_var(&batchpj[i], &batchpj[i-1], &incr_a, NULL);
}
/* convert all jacobian coordinates to affine */
secp256k1_ge_set_all_gej_static(num, batchpa, batchpj);
/* write out formatted public key */
for (i = 0; i < num; ++i) {
secp256k1_fe_normalize_var(&batchpa[i].x);
secp256k1_fe_normalize_var(&batchpa[i].y);
pub[i][0] = 0x04;
secp256k1_fe_get_b32(pub[i] + 1, &batchpa[i].x);
secp256k1_fe_get_b32(pub[i] + 33, &batchpa[i].y);
}
return 0;
}
// call secp256k1_ec_pubkey_batch_init first or you get segfaults
int secp256k1_ec_pubkey_batch_create(unsigned int num, unsigned char (*pub)[65], unsigned char (*sec)[32]) {
int i;
/* generate jacobian coordinates */
for (i = 0; i < num; ++i) {
#ifdef USE_BL_ARITHMETIC
secp256k1_ecmult_gen_bl(&batchpj[i], sec[i]);
#else
secp256k1_ecmult_gen2(&batchpj[i], sec[i]);
#endif
}
/* convert all jacobian coordinates to affine */
secp256k1_ge_set_all_gej_static(num, batchpa, batchpj);
/* write out formatted public key */
for (i = 0; i < num; ++i) {
secp256k1_fe_normalize_var(&batchpa[i].x);
secp256k1_fe_normalize_var(&batchpa[i].y);
pub[i][0] = 0x04;
secp256k1_fe_get_b32(pub[i] + 1, &batchpa[i].x);
secp256k1_fe_get_b32(pub[i] + 33, &batchpa[i].y);
}
return 0;
}
int secp256k1_scalar_add_b32(void * out, void * a, void *b) {
secp256k1_scalar_t tmp_a, tmp_b;
secp256k1_scalar_set_b32(&tmp_a, a, NULL);
secp256k1_scalar_set_b32(&tmp_b, b, NULL);
secp256k1_scalar_add(&tmp_a, &tmp_a, &tmp_b);
secp256k1_scalar_get_b32(out, &tmp_a);
return 0;
}
inline static void _priv_add(unsigned char *priv, unsigned char add, int p) {
priv[p] += add;
if (priv[p] < add) {
priv[--p] += 1;
while (p) {
if (priv[p] == 0) {
priv[--p] += 1;
} else {
break;
}
}
}
}
void priv_add_uint8(unsigned char *priv, unsigned char add) {
_priv_add(priv, add, 31);
}
void priv_add_uint32(unsigned char *priv, unsigned int add) {
int p = 31;
while (add) {
_priv_add(priv, add & 255, p--);
add >>= 8;
}
}
typedef struct {
secp256k1_gej_t pubj;
secp256k1_ge_t inc;
secp256k1_gej_t incj;
unsigned int n;
} pubkey_incr_t;
pubkey_incr_t pubkey_incr_ctx;
int secp256k1_ec_pubkey_incr_init(unsigned char *seckey, unsigned int add) {
unsigned char incr_priv[32];
memset(incr_priv, 0, sizeof(incr_priv));
memset(&pubkey_incr_ctx, 0, sizeof(pubkey_incr_ctx));
priv_add_uint32(incr_priv, add);
pubkey_incr_ctx.n = add;
#ifdef USE_BL_ARITHMETIC
secp256k1_ecmult_gen_bl(&pubkey_incr_ctx.pubj, seckey);
secp256k1_ecmult_gen_bl(&pubkey_incr_ctx.incj, incr_priv);
#else
secp256k1_ecmult_gen2(&pubkey_incr_ctx.pubj, seckey);
secp256k1_ecmult_gen2(&pubkey_incr_ctx.incj, incr_priv);
#endif
secp256k1_ge_set_gej(&pubkey_incr_ctx.inc, &pubkey_incr_ctx.incj);
return 0;
}
int secp256k1_ec_pubkey_incr(unsigned char *pub_chr, int *pub_chr_sz, unsigned char *seckey) {
secp256k1_ge_t p;
priv_add_uint32(seckey, pubkey_incr_ctx.n);
#ifdef USE_BL_ARITHMETIC
secp256k1_gej_add_ge_bl(&pubkey_incr_ctx.pubj, &pubkey_incr_ctx.pubj, &pubkey_incr_ctx.inc, NULL);
#else
secp256k1_gej_add_ge_var(&pubkey_incr_ctx.pubj, &pubkey_incr_ctx.pubj, &pubkey_incr_ctx.inc, NULL);
#endif
secp256k1_ge_set_gej(&p, &pubkey_incr_ctx.pubj);
*pub_chr_sz = 65;
pub_chr[0] = 4;
secp256k1_fe_normalize_var(&p.x);
secp256k1_fe_normalize_var(&p.y);
secp256k1_fe_get_b32(pub_chr + 1, &p.x);
secp256k1_fe_get_b32(pub_chr + 33, &p.y);
return 0;
}
void * secp256k1_ec_priv_to_gej(unsigned char *priv) {
secp256k1_gej_t *gej = malloc(sizeof(secp256k1_gej_t));
#ifdef USE_BL_ARITHMETIC
secp256k1_ecmult_gen_bl(gej, priv);
#else
secp256k1_ecmult_gen2(gej, priv);
#endif
return gej;
}
int secp256k1_ec_pubkey_add_gej(unsigned char *pub_chr, int *pub_chr_sz, void *add) {
secp256k1_ge_t in;
secp256k1_ge_t p;
secp256k1_gej_t out;
secp256k1_eckey_pubkey_parse(&in, pub_chr, *pub_chr_sz);
#ifdef USE_BL_ARITHMETIC
secp256k1_gej_add_ge_bl(&out, (secp256k1_gej_t *)add, &in, NULL);
#else
secp256k1_gej_add_ge_var(&out, (secp256k1_gej_t *)add, &in, NULL);
#endif
secp256k1_ge_set_gej(&p, &out);
*pub_chr_sz = 65;
pub_chr[0] = 4;
secp256k1_fe_normalize_var(&p.x);
secp256k1_fe_normalize_var(&p.y);
secp256k1_fe_get_b32(pub_chr + 1, &p.x);
secp256k1_fe_get_b32(pub_chr + 33, &p.y);
return 0;
}
/* vim: set ts=2 sw=2 et ai si: */

22
bf/ec_pubkey_fast.h Normal file
View file

@ -0,0 +1,22 @@
/* Copyright (c) 2015 Nicolas Courtois, Guangyan Song, Ryan Castellucci, All Rights Reserved */
#ifndef __EC_PUBKEY_FAST_H_
#define __EC_PUBKEY_FAST_H_
int secp256k1_ec_pubkey_precomp_table_save(int, unsigned char *);
int secp256k1_ec_pubkey_precomp_table(int, unsigned char *);
int secp256k1_ec_pubkey_create_precomp(unsigned char *, int *, const unsigned char *);
int secp256k1_ec_pubkey_incr_init(unsigned char *, unsigned int);
int secp256k1_ec_pubkey_incr(unsigned char *, int *, unsigned char *);
int secp256k1_scalar_add_b32(void *, void *, void *);
void priv_add_uint8(unsigned char *, unsigned char);
void priv_add_uint32(unsigned char *, unsigned int);
void * secp256k1_ec_priv_to_gej(unsigned char *);
int secp256k1_ec_pubkey_add_gej(unsigned char *, int *, void *);
int secp256k1_ec_pubkey_batch_init(unsigned int);
int secp256k1_ec_pubkey_batch_create(unsigned int, unsigned char (*)[65], unsigned char (*)[32]);
int secp256k1_ec_pubkey_batch_incr(unsigned int, unsigned int, unsigned char (*)[65], unsigned char (*)[32], unsigned char[32]);
#endif//__EC_PUBKEY_FAST_H_

29
bf/ecmtabgen.c Normal file
View file

@ -0,0 +1,29 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#include <unistd.h>
#include <assert.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <signal.h>
#include <stdio.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "ec_pubkey_fast.h"
int main(int argc, char **argv) {
int ret;
if (argc != 3) {
fprintf(stderr, "[!] Usage: %s window_size tablefile.tab\n", argv[0]);
exit(1);
}
if ((ret = secp256k1_ec_pubkey_precomp_table_save(atoi(argv[1]), argv[2])) < 0)
fprintf(stderr, "[!] Failed to write tablefile '%s'\n", argv[2]);
return ret;
}
/* vim: set ts=2 sw=2 et ai si: */

49
bf/filehex.c Normal file
View file

@ -0,0 +1,49 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include "hex.h"
void filehex(FILE *ifile, const unsigned char *filename) {
unsigned char hexed[65];
unsigned char buf[65536];
size_t offset;
int r, i, buf_pos, i_max;
offset = buf_pos = 0;
while ((r = fread(buf + buf_pos, 1, 256, ifile)) > 0) {
i_max = r + buf_pos - 31;
for (i = 0; i < i_max; ++i, ++offset) {
printf("%s:%s,%zu\n", hex(buf+i, 32, hexed, 65), filename, offset);
}
memcpy(buf, buf+i, buf_pos = 31);
}
}
int main(int argc, char **argv) {
int i;
FILE *ifile;
/*
if (argc > 1) {
fprintf(stderr, "Usage: %s\n", argv[0]);
return 1;
}*/
if (argc == 1) {
filehex(stdin, "STDIN");
} else {
for (i = 1; i < argc; ++i) {
if ((ifile = fopen(argv[i], "r")) != NULL) {
filehex(ifile, argv[i]);
fclose(ifile);
}
}
}
return 0;
}
/* vim: set ts=2 sw=2 et ai si: */

14
bf/hash160.h Normal file
View file

@ -0,0 +1,14 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#ifndef __BRAINFLAYER_HASH160_H_
#define __BRAINFLAYER_HASH160_H_
#include <openssl/sha.h>
#include <openssl/ripemd.h>
typedef union hash160_u {
unsigned char uc[RIPEMD160_DIGEST_LENGTH];
uint32_t ul[RIPEMD160_DIGEST_LENGTH>>2];
} hash160_t;
/* vim: set ts=2 sw=2 et ai si: */
#endif /* __BRAINFLAYER_HASH160_H_ */

20
bf/hex.c Normal file
View file

@ -0,0 +1,20 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#include <stdint.h>
#include <stddef.h>
#include <stdio.h>
#include "hex.h"
unsigned char *
hex(unsigned char *buf, size_t buf_sz,
unsigned char *hexed, size_t hexed_sz) {
int i, j;
--hexed_sz;
for (i = j = 0; i < buf_sz && j < hexed_sz; ++i, j += 2) {
snprintf(hexed+j, 3, "%02x", buf[i]);
}
hexed[j] = 0; // null terminate
return hexed;
}
/* vim: set ts=2 sw=2 et ai si: */

30
bf/hex.h Normal file
View file

@ -0,0 +1,30 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#ifndef __BRAINFLAYER_HEX_H_
#define __BRAINFLAYER_HEX_H_
static const unsigned char unhex_tab[80] = {
0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0xee,
0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static inline unsigned char *
unhex(unsigned char *str, size_t str_sz,
unsigned char *unhexed, size_t unhexed_sz) {
int i, j;
for (i = j = 0; i < str_sz && j < unhexed_sz; i += 2, ++j) {
unhexed[j] = (unhex_tab[str[i+0]&0x4f] & 0xf0)|
(unhex_tab[str[i+1]&0x4f] & 0x0f);
}
return unhexed;
}
unsigned char * hex(unsigned char *, size_t, unsigned char *, size_t);
#endif /* __BRAINFLAYER_HEX_H_ */
/* vim: set ts=2 sw=2 et ai si: */

114
bf/hex2blf.c Normal file
View file

@ -0,0 +1,114 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#include <unistd.h>
#include <assert.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <signal.h>
#include <stdio.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <arpa/inet.h> /* for ntohl/htonl */
#include <math.h> /* pow/exp */
#include "hex.h"
#include "bloom.h"
#include "hash160.h"
const double k_hashes = 20;
const double m_bits = 4294967296;
int main(int argc, char **argv) {
hash160_t hash;
int i;
double pct;
struct stat sb;
unsigned char *bloom, *hashfile, *bloomfile;
FILE *f, *b;
size_t line_sz = 1024, line_ct = 0;
char *line;
double err_rate;
if (argc != 3) {
fprintf(stderr, "[!] Usage: %s hashfile.hex bloomfile.blf\n", argv[0]);
exit(1);
}
hashfile = argv[1];
bloomfile = argv[2];
if ((f = fopen(hashfile, "r")) == NULL) {
fprintf(stderr, "[!] Failed to open hash160 file '%s'\n", hashfile);
exit(1);
}
if ((bloom = malloc(BLOOM_SIZE)) == NULL) {
fprintf(stderr, "[!] malloc failed (bloom filter)\n");
exit(1);
}
if (stat(bloomfile, &sb) == 0) {
if (!S_ISREG(sb.st_mode) || sb.st_size != BLOOM_SIZE) {
fprintf(stderr, "[!] Bloom filter file '%s' is not the correct size (%ju != %d)\n", bloomfile, sb.st_size, BLOOM_SIZE);
exit(1);
}
if ((b = fopen(bloomfile, "r+")) == NULL) {
fprintf(stderr, "[!] Failed to open bloom filter file '%s' for read/write\n", bloomfile);
exit(1);
}
fprintf(stderr, "[*] Reading existing bloom filter from '%s'...\n", bloomfile);
if ((fread(bloom, BLOOM_SIZE, 1, b)) != 1 || (fseek(b, 0, SEEK_SET)) != 0) {
fprintf(stderr, "[!] Failed to read existing boom filter from '%s'\n", bloomfile);
exit(1);
}
} else {
/* Assume the file didn't exist - yes there is a race condition */
if ((b = fopen(bloomfile, "w+")) == NULL) {
fprintf(stderr, "[!] Failed to create bloom filter file '%s'\n", bloomfile);
exit(1);
}
// start it empty
fprintf(stderr, "[*] Initializing bloom filter...\n");
memset(bloom, 0, BLOOM_SIZE);
}
if ((line = malloc(line_sz+1)) == NULL) {
fprintf(stderr, "[!] malloc failed (line buffer)\n");
exit(1);
}
i = 0;
stat(hashfile, &sb);
fprintf(stderr, "[*] Loading hash160s from '%s' \033[s 0.0%%", hashfile);
while (getline(&line, &line_sz, f) > 0) {
++line_ct;
unhex(line, strlen(line), hash.uc, sizeof(hash.uc));
bloom_set_hash160(bloom, hash.ul);
if ((++i & 0x3ffff) == 0) {
pct = 100.0 * ftell(f) / sb.st_size;
fprintf(stderr, "\033[u%5.1f%%", pct);
fflush(stderr);
}
}
fprintf(stderr, "\033[u 100.0%%\n");
err_rate = pow(1 - exp(-k_hashes * line_ct / m_bits), k_hashes);
fprintf(stderr, "[*] Loaded %zu hashes, false positive rate: ~%.3e (1 in ~%.3e)\n", line_ct, err_rate, 1/err_rate);
fprintf(stderr, "[*] Writing bloom filter to '%s'...\n", bloomfile);
if ((fwrite(bloom, BLOOM_SIZE, 1, b)) != 1) {
fprintf(stderr, "[!] Failed to write bloom filter file '%s'\n", bloomfile);
exit(1);
}
fprintf(stderr, "[+] Success!\n");
return 0;
}
/* vim: set ts=2 sw=2 et ai si: */

33
bf/hexln.c Normal file
View file

@ -0,0 +1,33 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#include <stdlib.h>
#include <stdio.h>
#include "hex.h"
int main(int argc, char **argv) {
char *line = NULL;
size_t line_sz = 0, buf_sz = 2;
ssize_t line_read;
unsigned char *buf = malloc(buf_sz);
if (argc > 1) {
fprintf(stderr, "Usage: %s\n", argv[0]);
return 1;
}
while ((line_read = getline(&line, &line_sz, stdin)) > 0) {
while (line_sz * 2 > buf_sz) {
buf_sz *= 2;
buf = realloc(buf, buf_sz);
}
if (buf == NULL) {
fprintf(stderr, "memory error\n");
return 1;
}
printf("%s\n", hex(line, line_read - 1, buf, buf_sz));
}
return 0;
}
/* vim: set ts=2 sw=2 et ai si: */

97
bf/hsearchf.c Normal file
View file

@ -0,0 +1,97 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#include <unistd.h>
#include <string.h>
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <arpa/inet.h> /* for ntohl/htonl */
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include "hex.h"
#include "hash160.h"
#include "hsearchf.h"
#define HSEARCHF_DEBUG 0
#define HASHLEN RIPEMD160_DIGEST_LENGTH
#define RESULT(R) do { \
res = R; \
goto hsearchf_result; \
} while (0)
#define DO_MEMCMP(H) memcmp(H.uc, hash->uc, HASHLEN)
// use fadvise to do a readbehind
#define READ_AT(X, H) do { \
posix_fadvise(fileno(f), ((X*HASHLEN)&0xfffff000)-4096, 8192, POSIX_FADV_WILLNEED); \
if ((ret = fseek(f, X * HASHLEN, 0)) != 0) { return -1; } \
if ((ret = fread(H.uc, HASHLEN, 1, f)) != 1) { return -1; } \
++i; \
} while (0)
// interpolation search
int hsearchf(FILE *f, hash160_t *hash) {
int ret, res = 0, i = 0;
size_t file_sz;
struct stat sb;
hash160_t low_h, mid_h, high_h;
int64_t low_e, mid_e, high_e, entries;
int64_t vlow, vhigh, vtarget;
#if HSEARCHF_DEBUG > 0
unsigned char hexed[64];
#endif
if ((ret = fstat(fileno(f), &sb)) != 0) { return -1; }
file_sz = sb.st_size;
entries = file_sz / HASHLEN;
low_e = 0;
high_e = entries - 1;
vtarget = ntohl(hash->ul[0]);
memset(low_h.uc, 0x00, HASHLEN);
memset(high_h.uc, 0xff, HASHLEN);
// this tries to minimize reads, but does a few extra comparisons
while (low_e != high_e &&
memcmp(hash->uc, low_h.uc, HASHLEN) > 0 &&
memcmp(hash->uc, high_h.uc, HASHLEN) < 0) {
vlow = ntohl(low_h.ul[0]); vhigh = ntohl(high_h.ul[0]);
mid_e = low_e + (vtarget - vlow) * (high_e - low_e) / (vhigh - vlow);
READ_AT(mid_e, mid_h);
ret = DO_MEMCMP(mid_h);
#if HSEARCHF_DEBUG > 1
fprintf(stderr, "target %s checking %9jd %9jd %9jd",
hex(hash->uc, HASHLEN, hexed, sizeof(hexed)), low_e, mid_e, high_e);
fprintf(stderr, " got %s %11d %2d\n",
hex(mid_h.uc, HASHLEN, hexed, sizeof(hexed)), ret, i);
#endif
if (ret == 0) {
RESULT(1);
} else if (ret < 0) {
low_e = mid_e + 1;
READ_AT(low_e, low_h);
if (DO_MEMCMP(low_h) == 0) { RESULT(1); }
} else { // ret > 0
high_e = mid_e - 1;
READ_AT(high_e, high_h);
if (DO_MEMCMP(high_h) == 0) { RESULT(1); }
}
}
hsearchf_result:
#if HSEARCHF_DEBUG > 0
fprintf(stderr, "target: %s reads: %3d result: %d\n", hex(hash->uc, HASHLEN, hexed, sizeof(hexed)), i, res);
#endif
return res;
}
/* vim: set ts=2 sw=2 et ai si: */

8
bf/hsearchf.h Normal file
View file

@ -0,0 +1,8 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#ifndef __BRAINFLAYER_HSEARCHF_H_
#define __BRAINFLAYER_HSEARCHF_H_
int hsearchf(FILE *, hash160_t *);
/* vim: set ts=2 sw=2 et ai si: */
#endif /* __BRAINFLAYER_HSEARCHF_H_ */

139
bf/mmapf.c Normal file
View file

@ -0,0 +1,139 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#define _LARGEFILE_SOURCE
#define _LARGEFILE64_SOURCE
#include <unistd.h>
#include <string.h>
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include "mmapf.h"
static char *errstr[] = {
"Unknown error",
"Not a regular file",
"Incorrect file size",
""
};
char * mmapf_strerror(int errnum) {
if (errnum < MMAPF_EXFIRST) {
return strerror(errnum);
} else if (errnum < MMAPF_EXLAST) {
return errstr[errnum-MMAPF_EXFIRST];
} else {
return errstr[0];
}
}
int munmapf(mmapf_ctx *ctx) {
// TODO error checking
if (ctx->fd >= 0) {
msync(ctx->mem, ctx->file_sz, MS_SYNC);
fsync(ctx->fd);
close(ctx->fd);
}
if (ctx->mem != NULL) {
munmap(ctx->mem, ctx->mmap_sz);
}
ctx->file_sz = 0;
ctx->mmap_sz = 0;
ctx->mem = NULL;
ctx->fd = -1;
return 0;
}
int mmapf(mmapf_ctx *ctx, const unsigned char *filename, size_t size, int flags) {
size_t page_sz = sysconf(_SC_PAGESIZE);
struct stat sb;
int mmode = 0, mflags = 0, madv = 0;
int fmode = 0, fadv = 0;
int ret, fd;
// initialize
ctx->mem = NULL;
ctx->fd = -1;
ctx->file_sz = size;
// round up to the next multiple of the page size
ctx->mmap_sz = size % page_sz ? (size/page_sz+1)*page_sz : size;
mflags |= flags & MMAPF_COW ? MAP_PRIVATE : MAP_SHARED;
if (flags & MMAPF_RW) {
mmode |= PROT_READ|PROT_WRITE;
fmode |= O_RDWR;
} else if (flags & MMAPF_RD) {
mflags |= MAP_NORESERVE;
mmode |= PROT_READ;
fmode |= O_RDONLY;
} else if (flags & MMAPF_WR) {
mmode |= PROT_WRITE;
fmode |= O_WRONLY;
}
if (flags & MMAPF_CR) { fmode |= O_CREAT; }
if (flags & MMAPF_EX) { fmode |= O_EXCL; }
if (flags & MMAPF_PRE) { mflags |= MAP_POPULATE; }
if (flags & MMAPF_NOREUSE) { fadv |= POSIX_FADV_NOREUSE; }
if (flags & MMAPF_RND) { fadv |= POSIX_FADV_RANDOM; madv |= POSIX_MADV_RANDOM; }
if (flags & MMAPF_SEQ) { fadv |= POSIX_FADV_SEQUENTIAL; madv |= POSIX_MADV_SEQUENTIAL; }
if (flags & MMAPF_DONTNEED) { fadv |= POSIX_FADV_DONTNEED; madv |= POSIX_MADV_DONTNEED; }
if (flags & MMAPF_WILLNEED) {
fadv |= POSIX_FADV_WILLNEED;
// seems to fail on anonymous maps
if (filename) { madv |= POSIX_MADV_WILLNEED; }
}
if (!filename) {
ctx->mem = mmap(NULL, ctx->mmap_sz, mmode, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
} else {
if (stat(filename, &sb) == 0) { // file exists
if (!S_ISREG(sb.st_mode)) { return MMAPF_ENREG; } // not a regular file
if (sb.st_size != size) { return MMAPF_ESIZE; } // wrong size
if ((fd = open64(filename, fmode)) < 0) { return errno; } // open failed
} else if (flags & MMAPF_CR) { // file missing, but creation requested
if ((fd = open64(filename, fmode)) < 0) { return errno; } // open failed
if ((ret = posix_fallocate(fd, 0, size)) != 0) {
// EBADF is returned on an unsupported filesystem, ignore it
if (ret != EBADF) { return ret; }
}
} else { // file missing, creation *not* requested
return ENOENT;
}
//if ((ret = posix_fadvise(fd, 0, size, fadv)) != 0) { return ret; }
posix_fadvise(fd, 0, size, fadv); // ignore result
ctx->mem = mmap(NULL, ctx->mmap_sz, mmode, mflags, fd, 0);
}
if (ctx->mem == MAP_FAILED) {
return errno;
} else if (ctx->mem == NULL) {
return ENOMEM;
}
if ((ret = posix_madvise(ctx->mem, ctx->mmap_sz, madv)) != 0) {
munmap(ctx->mem, ctx->mmap_sz);
ctx->mem = NULL;
return ret;
}
#ifdef MADV_HUGEPAGE
// reduce overhead for large mappings
if (size > (1<<26)) { madvise(ctx->mem, ctx->mmap_sz, MADV_HUGEPAGE); }
#endif
#ifdef MADV_DONTDUMP
// don't include in a core dump
madvise(ctx->mem, ctx->mmap_sz, MADV_DONTDUMP);
#endif
return MMAPF_OKAY;
}

45
bf/mmapf.h Normal file
View file

@ -0,0 +1,45 @@
/* Copyright (c) 2015 Ryan Castellucci, All Rights Reserved */
#ifndef __MMAPF_H_
#define __MMAPF_H_
typedef struct {
void *mem;
size_t mmap_sz;
size_t file_sz;
int fd;
} mmapf_ctx;
// file flags
#define MMAPF_RD 0x0001
#define MMAPF_WR 0x0002
#define MMAPF_RW (MMAPF_RD|MMAPF_WR)
#define MMAPF_CR 0x0004
#define MMAPF_EX 0x0008
// advise flags
#define MMAPF_RND 0x0100
#define MMAPF_SEQ 0x0200
#define MMAPF_PRE 0x0400
#define MMAPF_COW 0x0800
#define MMAPF_NOREUSE 0x1000
#define MMAPF_WILLNEED 0x2000
#define MMAPF_DONTNEED 0x4000
// convenience
#define MMAPF_RNDRD (MMAPF_RD|MMAPF_RND|MMAPF_PRE|MMAPF_WILLNEED)
#define MMAPF_RNDUP (MMAPF_RW|MMAPF_RND|MMAPF_PRE|MMAPF_WILLNEED)
#define MMAPF_SEQCR (MMAPF_WR|MMAPF_SEQ|MMAPF_CR|MMAPF_EX|MMAPF_NOREUSE|MMAPF_DONTNEED)
// returns
#define MMAPF_OKAY 0
#define MMAPF_EXFIRST 1000
#define MMAPF_ENREG 1001
#define MMAPF_ESIZE 1002
#define MMAPF_EXLAST 1003
char * mmapf_strerror(int);
int mmapf(mmapf_ctx *, const unsigned char *, size_t, int);
int munmapf(mmapf_ctx *);
/* vim: set ts=2 sw=2 et ai si: */
#endif /* __MMAPF_H_ */

286
bf/ripemd160_256.c Normal file
View file

@ -0,0 +1,286 @@
#define _RIPEMD160_C_ 1
#include "ripemd160_256.h"
// adapted by Pieter Wuille in 2012; all changes are in the public domain
// modified by Ryan Castellucci in 2015; all changes are in the public domain
/*
*
* RIPEMD160.c : RIPEMD-160 implementation
*
* Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
*
* ===================================================================
* The contents of this file are dedicated to the public domain. To
* the extent that dedication to the public domain is not available,
* everyone is granted a worldwide, perpetual, royalty-free,
* non-exclusive license to exercise all rights associated with the
* contents of this file for any purpose whatsoever.
* No rights are reserved.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* ===================================================================
*
* Country of origin: Canada
*
* This implementation (written in C) is based on an implementation the author
* wrote in Python.
*
* This implementation was written with reference to the RIPEMD-160
* specification, which is available at:
* http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/
*
* It is also documented in the _Handbook of Applied Cryptography_, as
* Algorithm 9.55. It's on page 30 of the following PDF file:
* http://www.cacr.math.uwaterloo.ca/hac/about/chap9.pdf
*
* The RIPEMD-160 specification doesn't really tell us how to do padding, but
* since RIPEMD-160 is inspired by MD4, you can use the padding algorithm from
* RFC 1320.
*
* According to http://www.users.zetnet.co.uk/hopwood/crypto/scan/md.html:
* "RIPEMD-160 is big-bit-endian, little-byte-endian, and left-justified."
*/
#include <stdint.h>
#include <string.h>
#define RIPEMD160_DIGEST_SIZE 20
#define BLOCK_SIZE 64
/* cyclic left-shift the 32-bit word n left by s bits */
#define ROL(s, n) (((n) << (s)) | ((n) >> (32-(s))))
/* Initial values for the chaining variables.
* This is just 0123456789ABCDEFFEDCBA9876543210F0E1D2C3 in little-endian. */
static const uint32_t initial_h[5] = { 0x67452301u, 0xEFCDAB89u, 0x98BADCFEu, 0x10325476u, 0xC3D2E1F0u };
/* Ordering of message words. Based on the permutations rho(i) and pi(i), defined as follows:
*
* rho(i) := { 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8 }[i] 0 <= i <= 15
*
* pi(i) := 9*i + 5 (mod 16)
*
* Line | Round 1 | Round 2 | Round 3 | Round 4 | Round 5
* -------+-----------+-----------+-----------+-----------+-----------
* left | id | rho | rho^2 | rho^3 | rho^4
* right | pi | rho pi | rho^2 pi | rho^3 pi | rho^4 pi
*/
/* Left line */
static const uint8_t RL[5][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, /* Round 1: id */
{ 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8 }, /* Round 2: rho */
{ 3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12 }, /* Round 3: rho^2 */
{ 1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2 }, /* Round 4: rho^3 */
{ 4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13 } /* Round 5: rho^4 */
};
/* Right line */
static const uint8_t RR[5][16] = {
{ 5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12 }, /* Round 1: pi */
{ 6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2 }, /* Round 2: rho pi */
{ 15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13 }, /* Round 3: rho^2 pi */
{ 8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14 }, /* Round 4: rho^3 pi */
{ 12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11 } /* Round 5: rho^4 pi */
};
/*
* Shifts - Since we don't actually re-order the message words according to
* the permutations above (we could, but it would be slower), these tables
* come with the permutations pre-applied.
*/
/* Shifts, left line */
static const uint8_t SL[5][16] = {
{ 11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8 }, /* Round 1 */
{ 7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12 }, /* Round 2 */
{ 11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5 }, /* Round 3 */
{ 11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12 }, /* Round 4 */
{ 9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6 } /* Round 5 */
};
/* Shifts, right line */
static const uint8_t SR[5][16] = {
{ 8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6 }, /* Round 1 */
{ 9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11 }, /* Round 2 */
{ 9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5 }, /* Round 3 */
{ 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8 }, /* Round 4 */
{ 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11 } /* Round 5 */
};
/* static padding for 256 bit input */
static const uint8_t pad256[32] = {
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* length 256 bits, little endian uint64_t */
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
/* Boolean functions */
#define F1(x, y, z) ((x) ^ (y) ^ (z))
#define F2(x, y, z) (((x) & (y)) | (~(x) & (z)))
#define F3(x, y, z) (((x) | ~(y)) ^ (z))
#define F4(x, y, z) (((x) & (z)) | ((y) & ~(z)))
#define F5(x, y, z) ((x) ^ ((y) | ~(z)))
/* Round constants, left line */
static const uint32_t KL[5] = {
0x00000000u, /* Round 1: 0 */
0x5A827999u, /* Round 2: floor(2**30 * sqrt(2)) */
0x6ED9EBA1u, /* Round 3: floor(2**30 * sqrt(3)) */
0x8F1BBCDCu, /* Round 4: floor(2**30 * sqrt(5)) */
0xA953FD4Eu /* Round 5: floor(2**30 * sqrt(7)) */
};
/* Round constants, right line */
static const uint32_t KR[5] = {
0x50A28BE6u, /* Round 1: floor(2**30 * cubert(2)) */
0x5C4DD124u, /* Round 2: floor(2**30 * cubert(3)) */
0x6D703EF3u, /* Round 3: floor(2**30 * cubert(5)) */
0x7A6D76E9u, /* Round 4: floor(2**30 * cubert(7)) */
0x00000000u /* Round 5: 0 */
};
static inline void byteswap32(uint32_t *v)
{
union { uint32_t w; uint8_t b[4]; } x, y;
x.w = *v;
y.b[0] = x.b[3];
y.b[1] = x.b[2];
y.b[2] = x.b[1];
y.b[3] = x.b[0];
*v = y.w;
/* Wipe temporary variables */
x.w = y.w = 0;
}
static inline void byteswap_digest(uint32_t *p)
{
unsigned int i;
for (i = 0; i < 4; i++) {
byteswap32(p++);
byteswap32(p++);
byteswap32(p++);
byteswap32(p++);
}
}
/* The RIPEMD160 compression function. */
static inline void ripemd160_rawcompress(void *pbuf, void *ph)
{
uint8_t w, round;
uint32_t T;
uint32_t AL, BL, CL, DL, EL; /* left line */
uint32_t AR, BR, CR, DR, ER; /* right line */
uint32_t *buf = pbuf;
uint32_t *h = ph;
/* Byte-swap the buffer if we're on a big-endian machine */
#ifdef PCT_BIG_ENDIAN
byteswap_digest(buf);
#endif
/* initialize state */
memcpy(h, initial_h, RIPEMD160_DIGEST_SIZE);
/* Load the left and right lines with the initial state */
AL = AR = h[0];
BL = BR = h[1];
CL = CR = h[2];
DL = DR = h[3];
EL = ER = h[4];
/* Round 1 */
round = 0;
for (w = 0; w < 16; w++) { /* left line */
T = ROL(SL[round][w], AL + F1(BL, CL, DL) + buf[RL[round][w]] + KL[round]) + EL;
AL = EL; EL = DL; DL = ROL(10, CL); CL = BL; BL = T;
}
for (w = 0; w < 16; w++) { /* right line */
T = ROL(SR[round][w], AR + F5(BR, CR, DR) + buf[RR[round][w]] + KR[round]) + ER;
AR = ER; ER = DR; DR = ROL(10, CR); CR = BR; BR = T;
}
/* Round 2 */
round++;
for (w = 0; w < 16; w++) { /* left line */
T = ROL(SL[round][w], AL + F2(BL, CL, DL) + buf[RL[round][w]] + KL[round]) + EL;
AL = EL; EL = DL; DL = ROL(10, CL); CL = BL; BL = T;
}
for (w = 0; w < 16; w++) { /* right line */
T = ROL(SR[round][w], AR + F4(BR, CR, DR) + buf[RR[round][w]] + KR[round]) + ER;
AR = ER; ER = DR; DR = ROL(10, CR); CR = BR; BR = T;
}
/* Round 3 */
round++;
for (w = 0; w < 16; w++) { /* left line */
T = ROL(SL[round][w], AL + F3(BL, CL, DL) + buf[RL[round][w]] + KL[round]) + EL;
AL = EL; EL = DL; DL = ROL(10, CL); CL = BL; BL = T;
}
for (w = 0; w < 16; w++) { /* right line */
T = ROL(SR[round][w], AR + F3(BR, CR, DR) + buf[RR[round][w]] + KR[round]) + ER;
AR = ER; ER = DR; DR = ROL(10, CR); CR = BR; BR = T;
}
/* Round 4 */
round++;
for (w = 0; w < 16; w++) { /* left line */
T = ROL(SL[round][w], AL + F4(BL, CL, DL) + buf[RL[round][w]] + KL[round]) + EL;
AL = EL; EL = DL; DL = ROL(10, CL); CL = BL; BL = T;
}
for (w = 0; w < 16; w++) { /* right line */
T = ROL(SR[round][w], AR + F2(BR, CR, DR) + buf[RR[round][w]] + KR[round]) + ER;
AR = ER; ER = DR; DR = ROL(10, CR); CR = BR; BR = T;
}
/* Round 5 */
round++;
for (w = 0; w < 16; w++) { /* left line */
T = ROL(SL[round][w], AL + F5(BL, CL, DL) + buf[RL[round][w]] + KL[round]) + EL;
AL = EL; EL = DL; DL = ROL(10, CL); CL = BL; BL = T;
}
for (w = 0; w < 16; w++) { /* right line */
T = ROL(SR[round][w], AR + F1(BR, CR, DR) + buf[RR[round][w]] + KR[round]) + ER;
AR = ER; ER = DR; DR = ROL(10, CR); CR = BR; BR = T;
}
/* Final mixing stage */
T = h[1] + CL + DR;
h[1] = h[2] + DL + ER;
h[2] = h[3] + EL + AR;
h[3] = h[4] + AL + BR;
h[4] = h[0] + BL + CR;
h[0] = T;
/* Byte-swap the output if we're on a big-endian machine */
#ifdef PCT_BIG_ENDIAN
byteswap_digest(h);
#endif
}
void ripemd160_256(const void *in, void *out) {
unsigned char buf[64];
/* copy input data */
memcpy(buf + 0, in, 32);
/* append fixed padding */
memcpy(buf + 32, pad256, 32);
/* compute and output hash */
ripemd160_rawcompress(buf, out);
}

8
bf/ripemd160_256.h Normal file
View file

@ -0,0 +1,8 @@
#ifndef __RIPEMD160_256_H_
#define __RIPEMD160_256_H_
#include <stdint.h>
void ripemd160_256(const void *in, void *out);
#endif//__RIPEMD160_256_H_

3
bf/scrypt-jane/.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
scrypt_test
scrypt_speed
*.o

163
bf/scrypt-jane/README.md Normal file
View file

@ -0,0 +1,163 @@
This project provides a performant, flexible implementations of Colin Percival's [scrypt](http://www.tarsnap.com/scrypt.html).
# Features
## Modular Design
The code uses a modular (compile, not runtime) layout to allow new mixing & hash functions to be added easily. The base components (HMAC, PBKDF2, and scrypt) are static and will immediately work with any conforming mix or hash function.
## Supported Mix Functions
* [Salsa20/8](http://cr.yp.to/salsa20.html)
* [ChaCha20/8](http://cr.yp.to/chacha.html)
* [Salsa6420/8]()
I am not actually aware of any other candidates for a decent mix function. Salsa20/8 was nearly perfect, but its successor, ChaCha20/8, has better diffusion and is thus stronger, is potentially faster given advanced SIMD support (byte level shuffles, or a 32bit rotate), and is slightly cleaner to implement given that it requires no pre/post processing of data for SIMD implementations.
64-byte blocks are no longer assumed! Salsa6420/8 is a 'proof of concept' 64-bit version of Salsa20/8 with a 128 byte block, and rotation constants chosen to allow 32-bit word shuffles instead of rotations for two of the rotations which put it on par with ChaCha in terms of SSE implementation shortcuts.
## Supported Hash Functions
* SHA256/512
* [BLAKE256/512](https://www.131002.net/blake/)
* [Skein512](http://www.skein-hash.info/)
* [Keccak256/512](http://keccak.noekeon.org/) (SHA-3)
Hash function implementations, unlike mix functions, are not optimized. The PBKDF2 computations are relatively minor in the scrypt algorithm, so including CPU specific versions, or vastly unrolling loops, would serve little purpose while bloating the code, both source and binary, and making it more confusing to implement correctly.
Most (now only two!) of the SHA-3 candidates fall in to the "annoying to read/implement" category and have not been included yet. This will of course be moot once ~~BLAKE is chosen as SHA-3~~ Keccak is chosen as SHA-3. Well shit.
## CPU Adaptation
The mixing function specialization is selected at runtime based on what the CPU supports (well, x86/x86-64 for now, but theoretically any). On platforms where this is not needed, e.g. where packages are usually compiled from source, it can also select the most suitable implementation at compile time, cutting down on binary size.
For those who are familiar with the scrypt spec, the code specializes at the ROMix level, allowing all copy, and xor calls to be inlined efficiently. ***Update***: This is actually not as important as I switched from specializing at the mix() level and letting the compiler somewhat inefficiently inline block_copy and block_xor to specializing at ChunkMix(), where they can be inlined properly. I thought about specializing at ROMix(), but it would increase the complexity per mix function even more and would not present many more opportunities than what is generated by the compiler presently.
MSVC uses SSE intrinsics as opposed to inline assembly for the mix functions to allow the compiler to fully inline properly. Also, Visual Studio is not smart enough to allow inline assembly in 64-bit code.
## Self Testing
On first use, scrypt() runs a small series of tests to make sure the hash function, mix functions, and scrypt() itself, are generating correct results. It will exit() (or call a user defined fatal error function) should any of these tests fail.
Test vectors for individual mix and hash functions are generated from reference implementations. The only "official" test vectors for the full scrypt() are for SHA256 + Salsa20/8 of course; other combinations are generated from this code (once it works with all reference test vectors) and subject to change if any implementation errors are discovered.
# Performance (on an E5200 2.5GHZ)
Benchmarks are run _without_ allocating memory, i.e. allocating enough memory before the trials are run. Different allocators can have different costs and non-deterministic effects, which is not the point of comparing implementations. The only hash function compared will be SHA-256 to be comparable to Colin's reference implementation, and the hash function will generally be a fraction of a percent of noise in the overall result.
Three different scrypt settings are tested (the last two are from the scrypt paper):
* 'High Volume': N=4096, r=8, p=1, 4mb memory
* 'Interactive': N=16384, r=8, p=1, 16mb memory
* 'Non-Interactive': N=1048576, r=8, p=1, 1gb memory
__Note__: Benchmark settings are adjusted based on the underlying block size to keep memory usage consistent with default scrypt. This means Salsa64 has r=4 due to having a 128 byte block size. A 256 byte block size would have r=2, 512 byte block would have r=1, etc. Additionally, this means Salsa6420/8 is doing half the rounds/byte of default scrypt, but has 64 bit word mixing vs 32 bit, and thus does somewhat less overall mixing. Salsa6420/~10-12 would be needed to maintain equivalent overall mixing.
Cycle counts are in millions of cycles. All versions compiled with gcc 4.6.3, -O3. Sorted from fastest to slowest.
Scaling refers to how much more expensive 'Non-Interactive' is to compute than 'High Volume', normalized to "ideal" scaling (256x difficulty). Under 100% means it becomes easier to process as N grows, over 100% means it becomes more difficult to process as N grows.
<table>
<thead><tr><th>Implemenation</th><th>Algo</th><th>High Volume</th><th>Interactive</th><th>Non-Interactive</th><th>Scaling</th></tr></thead>
<tbody>
<tr><td>scrypt-jane SSSE3 64bit</td><td>Salsa6420/8 </td><td>18.2m</td><td> 75.6m</td><td>5120.0m</td><td>110.0%</td></tr>
<tr><td>scrypt-jane SSSE3 64bit</td><td>ChaCha20/8 </td><td>19.6m</td><td> 79.6m</td><td>5296.7m</td><td>105.6%</td></tr>
<tr><td>scrypt-jane SSSE3 32bit</td><td>ChaCha20/8 </td><td>19.8m</td><td> 80.3m</td><td>5346.1m</td><td>105.5%</td></tr>
<tr><td>scrypt-jane SSE2 64bit </td><td>Salsa6420/8 </td><td>19.8m</td><td> 82.1m</td><td>5529.2m</td><td>109.1%</td></tr>
<tr><td>scrypt-jane SSE2 64bit </td><td>Salsa20/8 </td><td>22.1m</td><td> 89.7m</td><td>5938.8m</td><td>105.0%</td></tr>
<tr><td>scrypt-jane SSE2 32bit </td><td>Salsa20/8 </td><td>22.3m</td><td> 90.6m</td><td>6011.0m</td><td>105.3%</td></tr>
<tr><td>scrypt-jane SSE2 64bit </td><td>ChaCha20/8 </td><td>23.9m</td><td> 96.8m</td><td>6399.7m</td><td>104.6%</td></tr>
<tr><td>scrypt-jane SSE2 32bit </td><td>ChaCha20/8 </td><td>24.2m</td><td> 98.3m</td><td>6500.7m</td><td>104.9%</td></tr>
<tr><td>*Reference SSE2 64bit* </td><td>Salsa20/8 </td><td>32.9m</td><td>135.2m</td><td>8881.6m</td><td>105.5%</td></tr>
<tr><td>*Reference SSE2 32bit* </td><td>Salsa20/8 </td><td>33.0m</td><td>134.4m</td><td>8885.2m</td><td>105.2%</td></tr>
</tbody>
</table>
* scrypt-jane Salsa6420/8-SSSE3 is ~1.80x faster than reference Salsa20/8-SSE2 for High Volume, but drops to 1.73x faster for 'Non-Interactive' instead of remaining constant
* scrypt-jane ChaCha20/8-SSSE3 is ~1.67x faster than reference Salsa20/8-SSE2
* scrypt-jane Salsa20/8-SSE2 is ~1.48x faster than reference Salsa20/8-SSE2
# Performance (on a slightly noisy E3-1270 3.4GHZ)
All versions compiled with gcc 4.4.7, -O3. Sorted from fastest to slowest.
<table>
<thead><tr><th>Implemenation</th><th>Algo</th><th>High Volume</th><th>Interactive</th><th>Non-Interactive</th><th>Scaling</th></tr></thead>
<tbody>
<tr><td>scrypt-jane AVX 64bit </td><td>Salsa6420/8 </td><td>11.8m</td><td> 52.5m</td><td>3848.6m</td><td>127.4%</td></tr>
<tr><td>scrypt-jane SSSE3 64bit </td><td>Salsa6420/8 </td><td>13.3m</td><td> 57.9m</td><td>4176.6m</td><td>122.7%</td></tr>
<tr><td>scrypt-jane SSE2 64bit </td><td>Salsa6420/8 </td><td>14.2m</td><td> 61.1m</td><td>4382.4m</td><td>120.6%</td></tr>
<tr><td>scrypt-jane AVX 64bit </td><td>ChaCha20/8 </td><td>18.0m</td><td> 77.4m</td><td>5396.8m</td><td>117.1%</td></tr>
<tr><td>scrypt-jane AVX 32bit </td><td>ChaCha20/8 </td><td>18.3m</td><td> 82.1m</td><td>5421.8m</td><td>115.7%</td></tr>
<tr><td>scrypt-jane SSSE3 64bit </td><td>ChaCha20/8 </td><td>19.0m</td><td> 81.3m</td><td>5600.7m</td><td>115.1%</td></tr>
<tr><td>scrypt-jane AVX 64bit </td><td>Salsa20/8 </td><td>19.0m</td><td> 81.2m</td><td>5610.6m</td><td>115.3%</td></tr>
<tr><td>scrypt-jane AVX 32bit </td><td>Salsa20/8 </td><td>19.0m</td><td> 81.3m</td><td>5621.6m</td><td>115.6%</td></tr>
<tr><td>scrypt-jane SSSE3 32bit </td><td>ChaCha20/8 </td><td>19.1m</td><td> 81.8m</td><td>5621.6m</td><td>115.0%</td></tr>
<tr><td>scrypt-jane SSE2 64bit </td><td>Salsa20/8 </td><td>19.5m</td><td> 83.8m</td><td>5772.9m</td><td>115.6%</td></tr>
<tr><td>scrypt-jane SSE2 32bit </td><td>Salsa20/8 </td><td>19.6m</td><td> 84.0m</td><td>5793.9m</td><td>115.5%</td></tr>
<tr><td>*Reference SSE2/AVX 64bit* </td><td>Salsa20/8 </td><td>21.5m</td><td> 90.4m</td><td>6147.1m</td><td>111.7%</td></tr>
<tr><td>*Reference SSE2/AVX 32bit* </td><td>Salsa20/8 </td><td>22.3m</td><td> 94.0m</td><td>6267.7m</td><td>110.0%</td></tr>
<tr><td>scrypt-jane SSE2 64bit </td><td>ChaCha20/8 </td><td>23.1m</td><td> 97.7m</td><td>6670.0m</td><td>112.8%</td></tr>
<tr><td>scrypt-jane SSE2 32bit </td><td>ChaCha20/8 </td><td>23.3m</td><td> 98.4m</td><td>6728.7m</td><td>112.8%</td></tr>
<tr><td>*Reference SSE2 64bit* </td><td>Salsa20/8 </td><td>30.4m</td><td>125.6m</td><td>8139.4m</td><td>104.6%</td></tr>
<tr><td>*Reference SSE2 32bit* </td><td>Salsa20/8 </td><td>30.0m</td><td>124.5m</td><td>8469.3m</td><td>110.3%</td></tr>
</tbody>
</table>
* scrypt-jane Salsa6420/8-AVX is 1.60x - 1.82x faster than reference Salsa20/8-SSE2/AVX
* scrypt-jane ChaCha20/8-AVX is 1.13x - 1.19x faster than reference Salsa20/8-SSE2/AVX
* scrypt-jane Salsa20/8-AVX is 1.09x - 1.13x faster than reference Salsa20/8-SSE2/AVX
# Building
[gcc,icc,clang] scrypt-jane.c -O3 -[m32,m64] -DSCRYPT_MIX -DSCRYPT_HASH -c
where SCRYPT_MIX is one of
* SCRYPT_SALSA
* SCRYPT_SALSA64 (no optimized 32-bit implementation)
* SCRYPT_CHACHA
and SCRYPT_HASH is one of
* SCRYPT_SHA256
* SCRYPT_SHA512
* SCRYPT_BLAKE256
* SCRYPT_BLAKE512
* SCRYPT_SKEIN512
* SCRYPT_KECCAK256
* SCRYPT_KECCAK512
e.g.
gcc scrypt-jane.c -O3 -DSCRYPT_CHACHA -DSCRYPT_BLAKE512 -c
gcc example.c scrypt-jane.o -o example
clang *may* need "-no-integrated-as" as some? versions don't support ".intel_syntax"
# Using
#include "scrypt-jane.h"
scrypt(password, password_len, salt, salt_len, Nfactor, pfactor, rfactor, out, want_bytes);
## scrypt parameters
* Nfactor: Increases CPU & Memory Hardness
* rfactor: Increases Memory Hardness
* pfactor: Increases CPU Hardness
In scrypt terms
* N = (1 << (Nfactor + 1)), which controls how many times to mix each chunk, and how many temporary chunks are used. Increasing N increases both CPU time and memory used.
* r = (1 << rfactor), which controls how many blocks are in a chunk (i.e., 2 * r blocks are in a chunk). Increasing r increases how much memory is used.
* p = (1 << pfactor), which controls how many passes to perform over the set of N chunks. Increasing p increases CPU time used.
I chose to use the log2 of each parameter as it is the common way to communicate settings (e.g. 2^20, not 1048576).
# License
Public Domain, or MIT

View file

@ -0,0 +1,28 @@
/*
pick the best algo at runtime or compile time?
----------------------------------------------
SCRYPT_CHOOSE_COMPILETIME (gcc only!)
SCRYPT_CHOOSE_RUNTIME
*/
#define SCRYPT_CHOOSE_RUNTIME
/*
hash function to use
-------------------------------
SCRYPT_BLAKE256
SCRYPT_BLAKE512
SCRYPT_SHA256
SCRYPT_SHA512
SCRYPT_SKEIN512
*/
//#define SCRYPT_SHA256
/*
block mixer to use
-----------------------------
SCRYPT_CHACHA
SCRYPT_SALSA
*/
//#define SCRYPT_SALSA

View file

@ -0,0 +1,162 @@
#define SCRYPT_MIX_BASE "ChaCha20/8"
typedef uint32_t scrypt_mix_word_t;
#define SCRYPT_WORDTO8_LE U32TO8_LE
#define SCRYPT_WORD_ENDIAN_SWAP U32_SWAP
#define SCRYPT_BLOCK_BYTES 64
#define SCRYPT_BLOCK_WORDS (SCRYPT_BLOCK_BYTES / sizeof(scrypt_mix_word_t))
/* must have these here in case block bytes is ever != 64 */
#include "scrypt-jane-romix-basic.h"
#include "scrypt-jane-mix_chacha-xop.h"
#include "scrypt-jane-mix_chacha-avx.h"
#include "scrypt-jane-mix_chacha-ssse3.h"
#include "scrypt-jane-mix_chacha-sse2.h"
#include "scrypt-jane-mix_chacha.h"
#if defined(SCRYPT_CHACHA_XOP)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_xop
#define SCRYPT_ROMIX_FN scrypt_ROMix_xop
#define SCRYPT_MIX_FN chacha_core_xop
#define SCRYPT_ROMIX_TANGLE_FN scrypt_romix_nop
#define SCRYPT_ROMIX_UNTANGLE_FN scrypt_romix_nop
#include "scrypt-jane-romix-template.h"
#endif
#if defined(SCRYPT_CHACHA_AVX)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_avx
#define SCRYPT_ROMIX_FN scrypt_ROMix_avx
#define SCRYPT_MIX_FN chacha_core_avx
#define SCRYPT_ROMIX_TANGLE_FN scrypt_romix_nop
#define SCRYPT_ROMIX_UNTANGLE_FN scrypt_romix_nop
#include "scrypt-jane-romix-template.h"
#endif
#if defined(SCRYPT_CHACHA_SSSE3)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_ssse3
#define SCRYPT_ROMIX_FN scrypt_ROMix_ssse3
#define SCRYPT_MIX_FN chacha_core_ssse3
#define SCRYPT_ROMIX_TANGLE_FN scrypt_romix_nop
#define SCRYPT_ROMIX_UNTANGLE_FN scrypt_romix_nop
#include "scrypt-jane-romix-template.h"
#endif
#if defined(SCRYPT_CHACHA_SSE2)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_sse2
#define SCRYPT_ROMIX_FN scrypt_ROMix_sse2
#define SCRYPT_MIX_FN chacha_core_sse2
#define SCRYPT_ROMIX_TANGLE_FN scrypt_romix_nop
#define SCRYPT_ROMIX_UNTANGLE_FN scrypt_romix_nop
#include "scrypt-jane-romix-template.h"
#endif
/* cpu agnostic */
#define SCRYPT_ROMIX_FN scrypt_ROMix_basic
#define SCRYPT_MIX_FN chacha_core_basic
#define SCRYPT_ROMIX_TANGLE_FN scrypt_romix_convert_endian
#define SCRYPT_ROMIX_UNTANGLE_FN scrypt_romix_convert_endian
#include "scrypt-jane-romix-template.h"
#if !defined(SCRYPT_CHOOSE_COMPILETIME)
static scrypt_ROMixfn
scrypt_getROMix(void) {
size_t cpuflags = detect_cpu();
#if defined(SCRYPT_CHACHA_XOP)
if (cpuflags & cpu_xop)
return scrypt_ROMix_xop;
else
#endif
#if defined(SCRYPT_CHACHA_AVX)
if (cpuflags & cpu_avx)
return scrypt_ROMix_avx;
else
#endif
#if defined(SCRYPT_CHACHA_SSSE3)
if (cpuflags & cpu_ssse3)
return scrypt_ROMix_ssse3;
else
#endif
#if defined(SCRYPT_CHACHA_SSE2)
if (cpuflags & cpu_sse2)
return scrypt_ROMix_sse2;
else
#endif
return scrypt_ROMix_basic;
}
#endif
#if defined(SCRYPT_TEST_SPEED)
static size_t
available_implementations(void) {
size_t cpuflags = detect_cpu();
size_t flags = 0;
#if defined(SCRYPT_CHACHA_XOP)
if (cpuflags & cpu_xop)
flags |= cpu_xop;
#endif
#if defined(SCRYPT_CHACHA_AVX)
if (cpuflags & cpu_avx)
flags |= cpu_avx;
#endif
#if defined(SCRYPT_CHACHA_SSSE3)
if (cpuflags & cpu_ssse3)
flags |= cpu_ssse3;
#endif
#if defined(SCRYPT_CHACHA_SSE2)
if (cpuflags & cpu_sse2)
flags |= cpu_sse2;
#endif
return flags;
}
#endif
static int
scrypt_test_mix(void) {
static const uint8_t expected[16] = {
0x48,0x2b,0x2d,0xb8,0xa1,0x33,0x22,0x73,0xcd,0x16,0xc4,0xb4,0xb0,0x7f,0xb1,0x8a,
};
int ret = 1;
size_t cpuflags = detect_cpu();
#if defined(SCRYPT_CHACHA_XOP)
if (cpuflags & cpu_xop)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_xop, scrypt_romix_nop, scrypt_romix_nop, expected);
#endif
#if defined(SCRYPT_CHACHA_AVX)
if (cpuflags & cpu_avx)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_avx, scrypt_romix_nop, scrypt_romix_nop, expected);
#endif
#if defined(SCRYPT_CHACHA_SSSE3)
if (cpuflags & cpu_ssse3)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_ssse3, scrypt_romix_nop, scrypt_romix_nop, expected);
#endif
#if defined(SCRYPT_CHACHA_SSE2)
if (cpuflags & cpu_sse2)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_sse2, scrypt_romix_nop, scrypt_romix_nop, expected);
#endif
#if defined(SCRYPT_CHACHA_BASIC)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_basic, scrypt_romix_convert_endian, scrypt_romix_convert_endian, expected);
#endif
return ret;
}

View file

@ -0,0 +1,48 @@
#if defined(SCRYPT_BLAKE512)
#include "scrypt-jane-hash_blake512.h"
#elif defined(SCRYPT_BLAKE256)
#include "scrypt-jane-hash_blake256.h"
#elif defined(SCRYPT_SHA512)
#include "scrypt-jane-hash_sha512.h"
#elif defined(SCRYPT_SHA256)
#include "scrypt-jane-hash_sha256.h"
#elif defined(SCRYPT_SKEIN512)
#include "scrypt-jane-hash_skein512.h"
#elif defined(SCRYPT_KECCAK512) || defined(SCRYPT_KECCAK256)
#include "scrypt-jane-hash_keccak.h"
#else
#define SCRYPT_HASH "ERROR"
#define SCRYPT_HASH_BLOCK_SIZE 64
#define SCRYPT_HASH_DIGEST_SIZE 64
typedef struct scrypt_hash_state_t { size_t dummy; } scrypt_hash_state;
typedef uint8_t scrypt_hash_digest[SCRYPT_HASH_DIGEST_SIZE];
static void scrypt_hash_init(scrypt_hash_state *S) {}
static void scrypt_hash_update(scrypt_hash_state *S, const uint8_t *in, size_t inlen) {}
static void scrypt_hash_finish(scrypt_hash_state *S, uint8_t *hash) {}
static const uint8_t scrypt_test_hash_expected[SCRYPT_HASH_DIGEST_SIZE] = {0};
#error must define a hash function!
#endif
#include "scrypt-jane-pbkdf2.h"
#define SCRYPT_TEST_HASH_LEN 257 /* (2 * largest block size) + 1 */
static int
scrypt_test_hash(void) {
scrypt_hash_state st;
scrypt_hash_digest hash, final;
uint8_t msg[SCRYPT_TEST_HASH_LEN];
size_t i;
for (i = 0; i < SCRYPT_TEST_HASH_LEN; i++)
msg[i] = (uint8_t)i;
scrypt_hash_init(&st);
for (i = 0; i < SCRYPT_TEST_HASH_LEN + 1; i++) {
scrypt_hash(hash, msg, i);
scrypt_hash_update(&st, hash, sizeof(hash));
}
scrypt_hash_finish(&st, final);
return scrypt_verify(final, scrypt_test_hash_expected, SCRYPT_HASH_DIGEST_SIZE);
}

View file

@ -0,0 +1,177 @@
#define SCRYPT_HASH "BLAKE-256"
#define SCRYPT_HASH_BLOCK_SIZE 64
#define SCRYPT_HASH_DIGEST_SIZE 32
typedef uint8_t scrypt_hash_digest[SCRYPT_HASH_DIGEST_SIZE];
const uint8_t blake256_sigma[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,
14,10, 4, 8, 9,15,13, 6, 1,12, 0, 2,11, 7, 5, 3,
11, 8,12, 0, 5, 2,15,13,10,14, 3, 6, 7, 1, 9, 4,
7, 9, 3, 1,13,12,11,14, 2, 6, 5,10, 4, 0,15, 8,
9, 0, 5, 7, 2, 4,10,15,14, 1,11,12, 6, 8, 3,13,
2,12, 6,10, 0,11, 8, 3, 4,13, 7, 5,15,14, 1, 9,
12, 5, 1,15,14,13, 4,10, 0, 7, 6, 3, 9, 2, 8,11,
13,11, 7,14,12, 1, 3, 9, 5, 0,15, 4, 8, 6, 2,10,
6,15,14, 9,11, 3, 0, 8,12, 2,13, 7, 1, 4,10, 5,
10, 2, 8, 4, 7, 6, 1, 5,15,11, 9,14, 3,12,13 ,0,
};
const uint32_t blake256_constants[16] = {
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344,0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89,
0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917
};
typedef struct scrypt_hash_state_t {
uint32_t H[8], T[2];
uint32_t leftover;
uint8_t buffer[SCRYPT_HASH_BLOCK_SIZE];
} scrypt_hash_state;
static void
blake256_blocks(scrypt_hash_state *S, const uint8_t *in, size_t blocks) {
const uint8_t *sigma, *sigma_end = blake256_sigma + (10 * 16);
uint32_t m[16], v[16], h[8], t[2];
uint32_t i;
for (i = 0; i < 8; i++) h[i] = S->H[i];
for (i = 0; i < 2; i++) t[i] = S->T[i];
while (blocks--) {
t[0] += 512;
t[1] += (t[0] < 512) ? 1 : 0;
for (i = 0; i < 8; i++) v[i ] = h[i];
for (i = 0; i < 4; i++) v[i + 8] = blake256_constants[i];
for (i = 0; i < 2; i++) v[i + 12] = blake256_constants[i+4] ^ t[0];
for (i = 0; i < 2; i++) v[i + 14] = blake256_constants[i+6] ^ t[1];
for (i = 0; i < 16; i++) m[i] = U8TO32_BE(&in[i * 4]);
in += 64;
#define G(a,b,c,d,e) \
v[a] += (m[sigma[e+0]] ^ blake256_constants[sigma[e+1]]) + v[b]; \
v[d] = ROTR32(v[d] ^ v[a],16); \
v[c] += v[d]; \
v[b] = ROTR32(v[b] ^ v[c],12); \
v[a] += (m[sigma[e+1]] ^ blake256_constants[sigma[e+0]]) + v[b]; \
v[d] = ROTR32(v[d] ^ v[a], 8); \
v[c] += v[d]; \
v[b] = ROTR32(v[b] ^ v[c], 7);
for (i = 0, sigma = blake256_sigma; i < 14; i++) {
G(0, 4, 8,12, 0);
G(1, 5, 9,13, 2);
G(2, 6,10,14, 4);
G(3, 7,11,15, 6);
G(0, 5,10,15, 8);
G(1, 6,11,12,10);
G(2, 7, 8,13,12);
G(3, 4, 9,14,14);
sigma += 16;
if (sigma == sigma_end)
sigma = blake256_sigma;
}
#undef G
for (i = 0; i < 8; i++) h[i] ^= (v[i] ^ v[i + 8]);
}
for (i = 0; i < 8; i++) S->H[i] = h[i];
for (i = 0; i < 2; i++) S->T[i] = t[i];
}
static void
scrypt_hash_init(scrypt_hash_state *S) {
S->H[0] = 0x6a09e667ULL;
S->H[1] = 0xbb67ae85ULL;
S->H[2] = 0x3c6ef372ULL;
S->H[3] = 0xa54ff53aULL;
S->H[4] = 0x510e527fULL;
S->H[5] = 0x9b05688cULL;
S->H[6] = 0x1f83d9abULL;
S->H[7] = 0x5be0cd19ULL;
S->T[0] = 0;
S->T[1] = 0;
S->leftover = 0;
}
static void
scrypt_hash_update(scrypt_hash_state *S, const uint8_t *in, size_t inlen) {
size_t blocks, want;
/* handle the previous data */
if (S->leftover) {
want = (SCRYPT_HASH_BLOCK_SIZE - S->leftover);
want = (want < inlen) ? want : inlen;
memcpy(S->buffer + S->leftover, in, want);
S->leftover += (uint32_t)want;
if (S->leftover < SCRYPT_HASH_BLOCK_SIZE)
return;
in += want;
inlen -= want;
blake256_blocks(S, S->buffer, 1);
}
/* handle the current data */
blocks = (inlen & ~(SCRYPT_HASH_BLOCK_SIZE - 1));
S->leftover = (uint32_t)(inlen - blocks);
if (blocks) {
blake256_blocks(S, in, blocks / SCRYPT_HASH_BLOCK_SIZE);
in += blocks;
}
/* handle leftover data */
if (S->leftover)
memcpy(S->buffer, in, S->leftover);
}
static void
scrypt_hash_finish(scrypt_hash_state *S, uint8_t *hash) {
uint32_t th, tl, bits;
bits = (S->leftover << 3);
tl = S->T[0] + bits;
th = S->T[1];
if (S->leftover == 0) {
S->T[0] = (uint32_t)0 - (uint32_t)512;
S->T[1] = (uint32_t)0 - (uint32_t)1;
} else if (S->T[0] == 0) {
S->T[0] = ((uint32_t)0 - (uint32_t)512) + bits;
S->T[1] = S->T[1] - 1;
} else {
S->T[0] -= (512 - bits);
}
S->buffer[S->leftover] = 0x80;
if (S->leftover <= 55) {
memset(S->buffer + S->leftover + 1, 0, 55 - S->leftover);
} else {
memset(S->buffer + S->leftover + 1, 0, 63 - S->leftover);
blake256_blocks(S, S->buffer, 1);
S->T[0] = (uint32_t)0 - (uint32_t)512;
S->T[1] = (uint32_t)0 - (uint32_t)1;
memset(S->buffer, 0, 56);
}
S->buffer[55] |= 1;
U32TO8_BE(S->buffer + 56, th);
U32TO8_BE(S->buffer + 60, tl);
blake256_blocks(S, S->buffer, 1);
U32TO8_BE(&hash[ 0], S->H[0]);
U32TO8_BE(&hash[ 4], S->H[1]);
U32TO8_BE(&hash[ 8], S->H[2]);
U32TO8_BE(&hash[12], S->H[3]);
U32TO8_BE(&hash[16], S->H[4]);
U32TO8_BE(&hash[20], S->H[5]);
U32TO8_BE(&hash[24], S->H[6]);
U32TO8_BE(&hash[28], S->H[7]);
}
static const uint8_t scrypt_test_hash_expected[SCRYPT_HASH_DIGEST_SIZE] = {
0xcc,0xa9,0x1e,0xa9,0x20,0x97,0x37,0x40,0x17,0xc0,0xa0,0x52,0x87,0xfc,0x08,0x20,
0x40,0xf5,0x81,0x86,0x62,0x75,0x78,0xb2,0x79,0xce,0xde,0x27,0x3c,0x7f,0x85,0xd8,
};

View file

@ -0,0 +1,181 @@
#define SCRYPT_HASH "BLAKE-512"
#define SCRYPT_HASH_BLOCK_SIZE 128
#define SCRYPT_HASH_DIGEST_SIZE 64
typedef uint8_t scrypt_hash_digest[SCRYPT_HASH_DIGEST_SIZE];
const uint8_t blake512_sigma[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,
14,10, 4, 8, 9,15,13, 6, 1,12, 0, 2,11, 7, 5, 3,
11, 8,12, 0, 5, 2,15,13,10,14, 3, 6, 7, 1, 9, 4,
7, 9, 3, 1,13,12,11,14, 2, 6, 5,10, 4, 0,15, 8,
9, 0, 5, 7, 2, 4,10,15,14, 1,11,12, 6, 8, 3,13,
2,12, 6,10, 0,11, 8, 3, 4,13, 7, 5,15,14, 1, 9,
12, 5, 1,15,14,13, 4,10, 0, 7, 6, 3, 9, 2, 8,11,
13,11, 7,14,12, 1, 3, 9, 5, 0,15, 4, 8, 6, 2,10,
6,15,14, 9,11, 3, 0, 8,12, 2,13, 7, 1, 4,10, 5,
10, 2, 8, 4, 7, 6, 1, 5,15,11, 9,14, 3,12,13 ,0,
};
const uint64_t blake512_constants[16] = {
0x243f6a8885a308d3ULL, 0x13198a2e03707344ULL, 0xa4093822299f31d0ULL, 0x082efa98ec4e6c89ULL,
0x452821e638d01377ULL, 0xbe5466cf34e90c6cULL, 0xc0ac29b7c97c50ddULL, 0x3f84d5b5b5470917ULL,
0x9216d5d98979fb1bULL, 0xd1310ba698dfb5acULL, 0x2ffd72dbd01adfb7ULL, 0xb8e1afed6a267e96ULL,
0xba7c9045f12c7f99ULL, 0x24a19947b3916cf7ULL, 0x0801f2e2858efc16ULL, 0x636920d871574e69ULL
};
typedef struct scrypt_hash_state_t {
uint64_t H[8], T[2];
uint32_t leftover;
uint8_t buffer[SCRYPT_HASH_BLOCK_SIZE];
} scrypt_hash_state;
static void
blake512_blocks(scrypt_hash_state *S, const uint8_t *in, size_t blocks) {
const uint8_t *sigma, *sigma_end = blake512_sigma + (10 * 16);
uint64_t m[16], v[16], h[8], t[2];
uint32_t i;
for (i = 0; i < 8; i++) h[i] = S->H[i];
for (i = 0; i < 2; i++) t[i] = S->T[i];
while (blocks--) {
t[0] += 1024;
t[1] += (t[0] < 1024) ? 1 : 0;
for (i = 0; i < 8; i++) v[i ] = h[i];
for (i = 0; i < 4; i++) v[i + 8] = blake512_constants[i];
for (i = 0; i < 2; i++) v[i + 12] = blake512_constants[i+4] ^ t[0];
for (i = 0; i < 2; i++) v[i + 14] = blake512_constants[i+6] ^ t[1];
for (i = 0; i < 16; i++) m[i] = U8TO64_BE(&in[i * 8]);
in += 128;
#define G(a,b,c,d,e) \
v[a] += (m[sigma[e+0]] ^ blake512_constants[sigma[e+1]]) + v[b]; \
v[d] = ROTR64(v[d] ^ v[a],32); \
v[c] += v[d]; \
v[b] = ROTR64(v[b] ^ v[c],25); \
v[a] += (m[sigma[e+1]] ^ blake512_constants[sigma[e+0]]) + v[b]; \
v[d] = ROTR64(v[d] ^ v[a],16); \
v[c] += v[d]; \
v[b] = ROTR64(v[b] ^ v[c],11);
for (i = 0, sigma = blake512_sigma; i < 16; i++) {
G(0, 4, 8,12, 0);
G(1, 5, 9,13, 2);
G(2, 6,10,14, 4);
G(3, 7,11,15, 6);
G(0, 5,10,15, 8);
G(1, 6,11,12,10);
G(2, 7, 8,13,12);
G(3, 4, 9,14,14);
sigma += 16;
if (sigma == sigma_end)
sigma = blake512_sigma;
}
#undef G
for (i = 0; i < 8; i++) h[i] ^= (v[i] ^ v[i + 8]);
}
for (i = 0; i < 8; i++) S->H[i] = h[i];
for (i = 0; i < 2; i++) S->T[i] = t[i];
}
static void
scrypt_hash_init(scrypt_hash_state *S) {
S->H[0] = 0x6a09e667f3bcc908ULL;
S->H[1] = 0xbb67ae8584caa73bULL;
S->H[2] = 0x3c6ef372fe94f82bULL;
S->H[3] = 0xa54ff53a5f1d36f1ULL;
S->H[4] = 0x510e527fade682d1ULL;
S->H[5] = 0x9b05688c2b3e6c1fULL;
S->H[6] = 0x1f83d9abfb41bd6bULL;
S->H[7] = 0x5be0cd19137e2179ULL;
S->T[0] = 0;
S->T[1] = 0;
S->leftover = 0;
}
static void
scrypt_hash_update(scrypt_hash_state *S, const uint8_t *in, size_t inlen) {
size_t blocks, want;
/* handle the previous data */
if (S->leftover) {
want = (SCRYPT_HASH_BLOCK_SIZE - S->leftover);
want = (want < inlen) ? want : inlen;
memcpy(S->buffer + S->leftover, in, want);
S->leftover += (uint32_t)want;
if (S->leftover < SCRYPT_HASH_BLOCK_SIZE)
return;
in += want;
inlen -= want;
blake512_blocks(S, S->buffer, 1);
}
/* handle the current data */
blocks = (inlen & ~(SCRYPT_HASH_BLOCK_SIZE - 1));
S->leftover = (uint32_t)(inlen - blocks);
if (blocks) {
blake512_blocks(S, in, blocks / SCRYPT_HASH_BLOCK_SIZE);
in += blocks;
}
/* handle leftover data */
if (S->leftover)
memcpy(S->buffer, in, S->leftover);
}
static void
scrypt_hash_finish(scrypt_hash_state *S, uint8_t *hash) {
uint64_t th, tl;
size_t bits;
bits = (S->leftover << 3);
tl = S->T[0] + bits;
th = S->T[1];
if (S->leftover == 0) {
S->T[0] = (uint64_t)0 - (uint64_t)1024;
S->T[1] = (uint64_t)0 - (uint64_t)1;
} else if (S->T[0] == 0) {
S->T[0] = ((uint64_t)0 - (uint64_t)1024) + bits;
S->T[1] = S->T[1] - 1;
} else {
S->T[0] -= (1024 - bits);
}
S->buffer[S->leftover] = 0x80;
if (S->leftover <= 111) {
memset(S->buffer + S->leftover + 1, 0, 111 - S->leftover);
} else {
memset(S->buffer + S->leftover + 1, 0, 127 - S->leftover);
blake512_blocks(S, S->buffer, 1);
S->T[0] = (uint64_t)0 - (uint64_t)1024;
S->T[1] = (uint64_t)0 - (uint64_t)1;
memset(S->buffer, 0, 112);
}
S->buffer[111] |= 1;
U64TO8_BE(S->buffer + 112, th);
U64TO8_BE(S->buffer + 120, tl);
blake512_blocks(S, S->buffer, 1);
U64TO8_BE(&hash[ 0], S->H[0]);
U64TO8_BE(&hash[ 8], S->H[1]);
U64TO8_BE(&hash[16], S->H[2]);
U64TO8_BE(&hash[24], S->H[3]);
U64TO8_BE(&hash[32], S->H[4]);
U64TO8_BE(&hash[40], S->H[5]);
U64TO8_BE(&hash[48], S->H[6]);
U64TO8_BE(&hash[56], S->H[7]);
}
static const uint8_t scrypt_test_hash_expected[SCRYPT_HASH_DIGEST_SIZE] = {
0x2f,0x9d,0x5b,0xbe,0x24,0x0d,0x63,0xd3,0xa0,0xac,0x4f,0xd3,0x01,0xc0,0x23,0x6f,
0x6d,0xdf,0x6e,0xfb,0x60,0x6f,0xa0,0x74,0xdf,0x9f,0x25,0x65,0xb6,0x11,0x0a,0x83,
0x23,0x96,0xba,0x91,0x68,0x4b,0x85,0x15,0x13,0x54,0xba,0x19,0xf3,0x2c,0x5a,0x4a,
0x1f,0x78,0x31,0x02,0xc9,0x1e,0x56,0xc4,0x54,0xca,0xf9,0x8f,0x2c,0x7f,0x85,0xac
};

View file

@ -0,0 +1,168 @@
#if defined(SCRYPT_KECCAK256)
#define SCRYPT_HASH "Keccak-256"
#define SCRYPT_HASH_DIGEST_SIZE 32
#else
#define SCRYPT_HASH "Keccak-512"
#define SCRYPT_HASH_DIGEST_SIZE 64
#endif
#define SCRYPT_KECCAK_F 1600
#define SCRYPT_KECCAK_C (SCRYPT_HASH_DIGEST_SIZE * 8 * 2) /* 256=512, 512=1024 */
#define SCRYPT_KECCAK_R (SCRYPT_KECCAK_F - SCRYPT_KECCAK_C) /* 256=1088, 512=576 */
#define SCRYPT_HASH_BLOCK_SIZE (SCRYPT_KECCAK_R / 8)
typedef uint8_t scrypt_hash_digest[SCRYPT_HASH_DIGEST_SIZE];
typedef struct scrypt_hash_state_t {
uint64_t state[SCRYPT_KECCAK_F / 64];
uint32_t leftover;
uint8_t buffer[SCRYPT_HASH_BLOCK_SIZE];
} scrypt_hash_state;
static const uint64_t keccak_round_constants[24] = {
0x0000000000000001ull, 0x0000000000008082ull,
0x800000000000808aull, 0x8000000080008000ull,
0x000000000000808bull, 0x0000000080000001ull,
0x8000000080008081ull, 0x8000000000008009ull,
0x000000000000008aull, 0x0000000000000088ull,
0x0000000080008009ull, 0x000000008000000aull,
0x000000008000808bull, 0x800000000000008bull,
0x8000000000008089ull, 0x8000000000008003ull,
0x8000000000008002ull, 0x8000000000000080ull,
0x000000000000800aull, 0x800000008000000aull,
0x8000000080008081ull, 0x8000000000008080ull,
0x0000000080000001ull, 0x8000000080008008ull
};
static void
keccak_block(scrypt_hash_state *S, const uint8_t *in) {
size_t i;
uint64_t *s = S->state, t[5], u[5], v, w;
/* absorb input */
for (i = 0; i < SCRYPT_HASH_BLOCK_SIZE / 8; i++, in += 8)
s[i] ^= U8TO64_LE(in);
for (i = 0; i < 24; i++) {
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0] = s[0] ^ s[5] ^ s[10] ^ s[15] ^ s[20];
t[1] = s[1] ^ s[6] ^ s[11] ^ s[16] ^ s[21];
t[2] = s[2] ^ s[7] ^ s[12] ^ s[17] ^ s[22];
t[3] = s[3] ^ s[8] ^ s[13] ^ s[18] ^ s[23];
t[4] = s[4] ^ s[9] ^ s[14] ^ s[19] ^ s[24];
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
u[0] = t[4] ^ ROTL64(t[1], 1);
u[1] = t[0] ^ ROTL64(t[2], 1);
u[2] = t[1] ^ ROTL64(t[3], 1);
u[3] = t[2] ^ ROTL64(t[4], 1);
u[4] = t[3] ^ ROTL64(t[0], 1);
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
s[0] ^= u[0]; s[5] ^= u[0]; s[10] ^= u[0]; s[15] ^= u[0]; s[20] ^= u[0];
s[1] ^= u[1]; s[6] ^= u[1]; s[11] ^= u[1]; s[16] ^= u[1]; s[21] ^= u[1];
s[2] ^= u[2]; s[7] ^= u[2]; s[12] ^= u[2]; s[17] ^= u[2]; s[22] ^= u[2];
s[3] ^= u[3]; s[8] ^= u[3]; s[13] ^= u[3]; s[18] ^= u[3]; s[23] ^= u[3];
s[4] ^= u[4]; s[9] ^= u[4]; s[14] ^= u[4]; s[19] ^= u[4]; s[24] ^= u[4];
/* rho pi: b[..] = rotl(a[..], ..) */
v = s[ 1];
s[ 1] = ROTL64(s[ 6], 44);
s[ 6] = ROTL64(s[ 9], 20);
s[ 9] = ROTL64(s[22], 61);
s[22] = ROTL64(s[14], 39);
s[14] = ROTL64(s[20], 18);
s[20] = ROTL64(s[ 2], 62);
s[ 2] = ROTL64(s[12], 43);
s[12] = ROTL64(s[13], 25);
s[13] = ROTL64(s[19], 8);
s[19] = ROTL64(s[23], 56);
s[23] = ROTL64(s[15], 41);
s[15] = ROTL64(s[ 4], 27);
s[ 4] = ROTL64(s[24], 14);
s[24] = ROTL64(s[21], 2);
s[21] = ROTL64(s[ 8], 55);
s[ 8] = ROTL64(s[16], 45);
s[16] = ROTL64(s[ 5], 36);
s[ 5] = ROTL64(s[ 3], 28);
s[ 3] = ROTL64(s[18], 21);
s[18] = ROTL64(s[17], 15);
s[17] = ROTL64(s[11], 10);
s[11] = ROTL64(s[ 7], 6);
s[ 7] = ROTL64(s[10], 3);
s[10] = ROTL64( v, 1);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
v = s[ 0]; w = s[ 1]; s[ 0] ^= (~w) & s[ 2]; s[ 1] ^= (~s[ 2]) & s[ 3]; s[ 2] ^= (~s[ 3]) & s[ 4]; s[ 3] ^= (~s[ 4]) & v; s[ 4] ^= (~v) & w;
v = s[ 5]; w = s[ 6]; s[ 5] ^= (~w) & s[ 7]; s[ 6] ^= (~s[ 7]) & s[ 8]; s[ 7] ^= (~s[ 8]) & s[ 9]; s[ 8] ^= (~s[ 9]) & v; s[ 9] ^= (~v) & w;
v = s[10]; w = s[11]; s[10] ^= (~w) & s[12]; s[11] ^= (~s[12]) & s[13]; s[12] ^= (~s[13]) & s[14]; s[13] ^= (~s[14]) & v; s[14] ^= (~v) & w;
v = s[15]; w = s[16]; s[15] ^= (~w) & s[17]; s[16] ^= (~s[17]) & s[18]; s[17] ^= (~s[18]) & s[19]; s[18] ^= (~s[19]) & v; s[19] ^= (~v) & w;
v = s[20]; w = s[21]; s[20] ^= (~w) & s[22]; s[21] ^= (~s[22]) & s[23]; s[22] ^= (~s[23]) & s[24]; s[23] ^= (~s[24]) & v; s[24] ^= (~v) & w;
/* iota: a[0,0] ^= round constant */
s[0] ^= keccak_round_constants[i];
}
}
static void
scrypt_hash_init(scrypt_hash_state *S) {
memset(S, 0, sizeof(*S));
}
static void
scrypt_hash_update(scrypt_hash_state *S, const uint8_t *in, size_t inlen) {
size_t want;
/* handle the previous data */
if (S->leftover) {
want = (SCRYPT_HASH_BLOCK_SIZE - S->leftover);
want = (want < inlen) ? want : inlen;
memcpy(S->buffer + S->leftover, in, want);
S->leftover += (uint32_t)want;
if (S->leftover < SCRYPT_HASH_BLOCK_SIZE)
return;
in += want;
inlen -= want;
keccak_block(S, S->buffer);
}
/* handle the current data */
while (inlen >= SCRYPT_HASH_BLOCK_SIZE) {
keccak_block(S, in);
in += SCRYPT_HASH_BLOCK_SIZE;
inlen -= SCRYPT_HASH_BLOCK_SIZE;
}
/* handle leftover data */
S->leftover = (uint32_t)inlen;
if (S->leftover)
memcpy(S->buffer, in, S->leftover);
}
static void
scrypt_hash_finish(scrypt_hash_state *S, uint8_t *hash) {
size_t i;
S->buffer[S->leftover] = 0x01;
memset(S->buffer + (S->leftover + 1), 0, SCRYPT_HASH_BLOCK_SIZE - (S->leftover + 1));
S->buffer[SCRYPT_HASH_BLOCK_SIZE - 1] |= 0x80;
keccak_block(S, S->buffer);
for (i = 0; i < SCRYPT_HASH_DIGEST_SIZE; i += 8) {
U64TO8_LE(&hash[i], S->state[i / 8]);
}
}
#if defined(SCRYPT_KECCAK256)
static const uint8_t scrypt_test_hash_expected[SCRYPT_HASH_DIGEST_SIZE] = {
0x26,0xb7,0x10,0xb3,0x66,0xb1,0xd1,0xb1,0x25,0xfc,0x3e,0xe3,0x1e,0x33,0x1d,0x19,
0x94,0xaa,0x63,0x7a,0xd5,0x77,0x29,0xb4,0x27,0xe9,0xe0,0xf4,0x19,0xba,0x68,0xea,
};
#else
static const uint8_t scrypt_test_hash_expected[SCRYPT_HASH_DIGEST_SIZE] = {
0x17,0xc7,0x8c,0xa0,0xd9,0x08,0x1d,0xba,0x8a,0xc8,0x3e,0x07,0x90,0xda,0x91,0x88,
0x25,0xbd,0xd3,0xf8,0x78,0x4a,0x8d,0x5e,0xe4,0x96,0x9c,0x01,0xf3,0xeb,0xdc,0x12,
0xea,0x35,0x57,0xba,0x94,0xb8,0xe9,0xb9,0x27,0x45,0x0a,0x48,0x5c,0x3d,0x69,0xf0,
0xdb,0x22,0x38,0xb5,0x52,0x22,0x29,0xea,0x7a,0xb2,0xe6,0x07,0xaa,0x37,0x4d,0xe6,
};
#endif

View file

@ -0,0 +1,135 @@
#define SCRYPT_HASH "SHA-2-256"
#define SCRYPT_HASH_BLOCK_SIZE 64
#define SCRYPT_HASH_DIGEST_SIZE 32
typedef uint8_t scrypt_hash_digest[SCRYPT_HASH_DIGEST_SIZE];
typedef struct scrypt_hash_state_t {
uint32_t H[8];
uint64_t T;
uint32_t leftover;
uint8_t buffer[SCRYPT_HASH_BLOCK_SIZE];
} scrypt_hash_state;
static const uint32_t sha256_constants[64] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
#define Ch(x,y,z) (z ^ (x & (y ^ z)))
#define Maj(x,y,z) (((x | y) & z) | (x & y))
#define S0(x) (ROTR32(x, 2) ^ ROTR32(x, 13) ^ ROTR32(x, 22))
#define S1(x) (ROTR32(x, 6) ^ ROTR32(x, 11) ^ ROTR32(x, 25))
#define G0(x) (ROTR32(x, 7) ^ ROTR32(x, 18) ^ (x >> 3))
#define G1(x) (ROTR32(x, 17) ^ ROTR32(x, 19) ^ (x >> 10))
#define W0(in,i) (U8TO32_BE(&in[i * 4]))
#define W1(i) (G1(w[i - 2]) + w[i - 7] + G0(w[i - 15]) + w[i - 16])
#define STEP(i) \
t1 = S0(r[0]) + Maj(r[0], r[1], r[2]); \
t0 = r[7] + S1(r[4]) + Ch(r[4], r[5], r[6]) + sha256_constants[i] + w[i]; \
r[7] = r[6]; \
r[6] = r[5]; \
r[5] = r[4]; \
r[4] = r[3] + t0; \
r[3] = r[2]; \
r[2] = r[1]; \
r[1] = r[0]; \
r[0] = t0 + t1;
static void
sha256_blocks(scrypt_hash_state *S, const uint8_t *in, size_t blocks) {
uint32_t r[8], w[64], t0, t1;
size_t i;
for (i = 0; i < 8; i++) r[i] = S->H[i];
while (blocks--) {
for (i = 0; i < 16; i++) { w[i] = W0(in, i); }
for (i = 16; i < 64; i++) { w[i] = W1(i); }
for (i = 0; i < 64; i++) { STEP(i); }
for (i = 0; i < 8; i++) { r[i] += S->H[i]; S->H[i] = r[i]; }
S->T += SCRYPT_HASH_BLOCK_SIZE * 8;
in += SCRYPT_HASH_BLOCK_SIZE;
}
}
static void
scrypt_hash_init(scrypt_hash_state *S) {
S->H[0] = 0x6a09e667;
S->H[1] = 0xbb67ae85;
S->H[2] = 0x3c6ef372;
S->H[3] = 0xa54ff53a;
S->H[4] = 0x510e527f;
S->H[5] = 0x9b05688c;
S->H[6] = 0x1f83d9ab;
S->H[7] = 0x5be0cd19;
S->T = 0;
S->leftover = 0;
}
static void
scrypt_hash_update(scrypt_hash_state *S, const uint8_t *in, size_t inlen) {
size_t blocks, want;
/* handle the previous data */
if (S->leftover) {
want = (SCRYPT_HASH_BLOCK_SIZE - S->leftover);
want = (want < inlen) ? want : inlen;
memcpy(S->buffer + S->leftover, in, want);
S->leftover += (uint32_t)want;
if (S->leftover < SCRYPT_HASH_BLOCK_SIZE)
return;
in += want;
inlen -= want;
sha256_blocks(S, S->buffer, 1);
}
/* handle the current data */
blocks = (inlen & ~(SCRYPT_HASH_BLOCK_SIZE - 1));
S->leftover = (uint32_t)(inlen - blocks);
if (blocks) {
sha256_blocks(S, in, blocks / SCRYPT_HASH_BLOCK_SIZE);
in += blocks;
}
/* handle leftover data */
if (S->leftover)
memcpy(S->buffer, in, S->leftover);
}
static void
scrypt_hash_finish(scrypt_hash_state *S, uint8_t *hash) {
uint64_t t = S->T + (S->leftover * 8);
S->buffer[S->leftover] = 0x80;
if (S->leftover <= 55) {
memset(S->buffer + S->leftover + 1, 0, 55 - S->leftover);
} else {
memset(S->buffer + S->leftover + 1, 0, 63 - S->leftover);
sha256_blocks(S, S->buffer, 1);
memset(S->buffer, 0, 56);
}
U64TO8_BE(S->buffer + 56, t);
sha256_blocks(S, S->buffer, 1);
U32TO8_BE(&hash[ 0], S->H[0]);
U32TO8_BE(&hash[ 4], S->H[1]);
U32TO8_BE(&hash[ 8], S->H[2]);
U32TO8_BE(&hash[12], S->H[3]);
U32TO8_BE(&hash[16], S->H[4]);
U32TO8_BE(&hash[20], S->H[5]);
U32TO8_BE(&hash[24], S->H[6]);
U32TO8_BE(&hash[28], S->H[7]);
}
static const uint8_t scrypt_test_hash_expected[SCRYPT_HASH_DIGEST_SIZE] = {
0xee,0x36,0xae,0xa6,0x65,0xf0,0x28,0x7d,0xc9,0xde,0xd8,0xad,0x48,0x33,0x7d,0xbf,
0xcb,0xc0,0x48,0xfa,0x5f,0x92,0xfd,0x0a,0x95,0x6f,0x34,0x8e,0x8c,0x1e,0x73,0xad,
};

View file

@ -0,0 +1,152 @@
#define SCRYPT_HASH "SHA-2-512"
#define SCRYPT_HASH_BLOCK_SIZE 128
#define SCRYPT_HASH_DIGEST_SIZE 64
typedef uint8_t scrypt_hash_digest[SCRYPT_HASH_DIGEST_SIZE];
typedef struct scrypt_hash_state_t {
uint64_t H[8];
uint64_t T[2];
uint32_t leftover;
uint8_t buffer[SCRYPT_HASH_BLOCK_SIZE];
} scrypt_hash_state;
static const uint64_t sha512_constants[80] = {
0x428a2f98d728ae22ull, 0x7137449123ef65cdull, 0xb5c0fbcfec4d3b2full, 0xe9b5dba58189dbbcull,
0x3956c25bf348b538ull, 0x59f111f1b605d019ull, 0x923f82a4af194f9bull, 0xab1c5ed5da6d8118ull,
0xd807aa98a3030242ull, 0x12835b0145706fbeull, 0x243185be4ee4b28cull, 0x550c7dc3d5ffb4e2ull,
0x72be5d74f27b896full, 0x80deb1fe3b1696b1ull, 0x9bdc06a725c71235ull, 0xc19bf174cf692694ull,
0xe49b69c19ef14ad2ull, 0xefbe4786384f25e3ull, 0x0fc19dc68b8cd5b5ull, 0x240ca1cc77ac9c65ull,
0x2de92c6f592b0275ull, 0x4a7484aa6ea6e483ull, 0x5cb0a9dcbd41fbd4ull, 0x76f988da831153b5ull,
0x983e5152ee66dfabull, 0xa831c66d2db43210ull, 0xb00327c898fb213full, 0xbf597fc7beef0ee4ull,
0xc6e00bf33da88fc2ull, 0xd5a79147930aa725ull, 0x06ca6351e003826full, 0x142929670a0e6e70ull,
0x27b70a8546d22ffcull, 0x2e1b21385c26c926ull, 0x4d2c6dfc5ac42aedull, 0x53380d139d95b3dfull,
0x650a73548baf63deull, 0x766a0abb3c77b2a8ull, 0x81c2c92e47edaee6ull, 0x92722c851482353bull,
0xa2bfe8a14cf10364ull, 0xa81a664bbc423001ull, 0xc24b8b70d0f89791ull, 0xc76c51a30654be30ull,
0xd192e819d6ef5218ull, 0xd69906245565a910ull, 0xf40e35855771202aull, 0x106aa07032bbd1b8ull,
0x19a4c116b8d2d0c8ull, 0x1e376c085141ab53ull, 0x2748774cdf8eeb99ull, 0x34b0bcb5e19b48a8ull,
0x391c0cb3c5c95a63ull, 0x4ed8aa4ae3418acbull, 0x5b9cca4f7763e373ull, 0x682e6ff3d6b2b8a3ull,
0x748f82ee5defb2fcull, 0x78a5636f43172f60ull, 0x84c87814a1f0ab72ull, 0x8cc702081a6439ecull,
0x90befffa23631e28ull, 0xa4506cebde82bde9ull, 0xbef9a3f7b2c67915ull, 0xc67178f2e372532bull,
0xca273eceea26619cull, 0xd186b8c721c0c207ull, 0xeada7dd6cde0eb1eull, 0xf57d4f7fee6ed178ull,
0x06f067aa72176fbaull, 0x0a637dc5a2c898a6ull, 0x113f9804bef90daeull, 0x1b710b35131c471bull,
0x28db77f523047d84ull, 0x32caab7b40c72493ull, 0x3c9ebe0a15c9bebcull, 0x431d67c49c100d4cull,
0x4cc5d4becb3e42b6ull, 0x597f299cfc657e2aull, 0x5fcb6fab3ad6faecull, 0x6c44198c4a475817ull
};
#define Ch(x,y,z) (z ^ (x & (y ^ z)))
#define Maj(x,y,z) (((x | y) & z) | (x & y))
#define S0(x) (ROTR64(x, 28) ^ ROTR64(x, 34) ^ ROTR64(x, 39))
#define S1(x) (ROTR64(x, 14) ^ ROTR64(x, 18) ^ ROTR64(x, 41))
#define G0(x) (ROTR64(x, 1) ^ ROTR64(x, 8) ^ (x >> 7))
#define G1(x) (ROTR64(x, 19) ^ ROTR64(x, 61) ^ (x >> 6))
#define W0(in,i) (U8TO64_BE(&in[i * 8]))
#define W1(i) (G1(w[i - 2]) + w[i - 7] + G0(w[i - 15]) + w[i - 16])
#define STEP(i) \
t1 = S0(r[0]) + Maj(r[0], r[1], r[2]); \
t0 = r[7] + S1(r[4]) + Ch(r[4], r[5], r[6]) + sha512_constants[i] + w[i]; \
r[7] = r[6]; \
r[6] = r[5]; \
r[5] = r[4]; \
r[4] = r[3] + t0; \
r[3] = r[2]; \
r[2] = r[1]; \
r[1] = r[0]; \
r[0] = t0 + t1;
static void
sha512_blocks(scrypt_hash_state *S, const uint8_t *in, size_t blocks) {
uint64_t r[8], w[80], t0, t1;
size_t i;
for (i = 0; i < 8; i++) r[i] = S->H[i];
while (blocks--) {
for (i = 0; i < 16; i++) { w[i] = W0(in, i); }
for (i = 16; i < 80; i++) { w[i] = W1(i); }
for (i = 0; i < 80; i++) { STEP(i); }
for (i = 0; i < 8; i++) { r[i] += S->H[i]; S->H[i] = r[i]; }
S->T[0] += SCRYPT_HASH_BLOCK_SIZE * 8;
S->T[1] += (!S->T[0]) ? 1 : 0;
in += SCRYPT_HASH_BLOCK_SIZE;
}
}
static void
scrypt_hash_init(scrypt_hash_state *S) {
S->H[0] = 0x6a09e667f3bcc908ull;
S->H[1] = 0xbb67ae8584caa73bull;
S->H[2] = 0x3c6ef372fe94f82bull;
S->H[3] = 0xa54ff53a5f1d36f1ull;
S->H[4] = 0x510e527fade682d1ull;
S->H[5] = 0x9b05688c2b3e6c1full;
S->H[6] = 0x1f83d9abfb41bd6bull;
S->H[7] = 0x5be0cd19137e2179ull;
S->T[0] = 0;
S->T[1] = 0;
S->leftover = 0;
}
static void
scrypt_hash_update(scrypt_hash_state *S, const uint8_t *in, size_t inlen) {
size_t blocks, want;
/* handle the previous data */
if (S->leftover) {
want = (SCRYPT_HASH_BLOCK_SIZE - S->leftover);
want = (want < inlen) ? want : inlen;
memcpy(S->buffer + S->leftover, in, want);
S->leftover += (uint32_t)want;
if (S->leftover < SCRYPT_HASH_BLOCK_SIZE)
return;
in += want;
inlen -= want;
sha512_blocks(S, S->buffer, 1);
}
/* handle the current data */
blocks = (inlen & ~(SCRYPT_HASH_BLOCK_SIZE - 1));
S->leftover = (uint32_t)(inlen - blocks);
if (blocks) {
sha512_blocks(S, in, blocks / SCRYPT_HASH_BLOCK_SIZE);
in += blocks;
}
/* handle leftover data */
if (S->leftover)
memcpy(S->buffer, in, S->leftover);
}
static void
scrypt_hash_finish(scrypt_hash_state *S, uint8_t *hash) {
uint64_t t0 = S->T[0] + (S->leftover * 8), t1 = S->T[1];
S->buffer[S->leftover] = 0x80;
if (S->leftover <= 111) {
memset(S->buffer + S->leftover + 1, 0, 111 - S->leftover);
} else {
memset(S->buffer + S->leftover + 1, 0, 127 - S->leftover);
sha512_blocks(S, S->buffer, 1);
memset(S->buffer, 0, 112);
}
U64TO8_BE(S->buffer + 112, t1);
U64TO8_BE(S->buffer + 120, t0);
sha512_blocks(S, S->buffer, 1);
U64TO8_BE(&hash[ 0], S->H[0]);
U64TO8_BE(&hash[ 8], S->H[1]);
U64TO8_BE(&hash[16], S->H[2]);
U64TO8_BE(&hash[24], S->H[3]);
U64TO8_BE(&hash[32], S->H[4]);
U64TO8_BE(&hash[40], S->H[5]);
U64TO8_BE(&hash[48], S->H[6]);
U64TO8_BE(&hash[56], S->H[7]);
}
static const uint8_t scrypt_test_hash_expected[SCRYPT_HASH_DIGEST_SIZE] = {
0xba,0xc3,0x80,0x2b,0x24,0x56,0x95,0x1f,0x19,0x7c,0xa2,0xd3,0x72,0x7c,0x9a,0x4d,
0x1d,0x50,0x3a,0xa9,0x12,0x27,0xd8,0xe1,0xbe,0x76,0x53,0x87,0x5a,0x1e,0x82,0xec,
0xc8,0xe1,0x6b,0x87,0xd0,0xb5,0x25,0x7e,0xe8,0x1e,0xd7,0x58,0xc6,0x2d,0xc2,0x9c,
0x06,0x31,0x8f,0x5b,0x57,0x8e,0x76,0xba,0xd5,0xf6,0xec,0xfe,0x85,0x1f,0x34,0x0c,
};

View file

@ -0,0 +1,188 @@
#define SCRYPT_HASH "Skein-512"
#define SCRYPT_HASH_BLOCK_SIZE 64
#define SCRYPT_HASH_DIGEST_SIZE 64
typedef uint8_t scrypt_hash_digest[SCRYPT_HASH_DIGEST_SIZE];
typedef struct scrypt_hash_state_t {
uint64_t X[8], T[2];
uint32_t leftover;
uint8_t buffer[SCRYPT_HASH_BLOCK_SIZE];
} scrypt_hash_state;
#include <stdio.h>
static void
skein512_blocks(scrypt_hash_state *S, const uint8_t *in, size_t blocks, size_t add) {
uint64_t X[8], key[8], Xt[9+18], T[3+1];
size_t r;
while (blocks--) {
T[0] = S->T[0] + add;
T[1] = S->T[1];
T[2] = T[0] ^ T[1];
key[0] = U8TO64_LE(in + 0); Xt[0] = S->X[0]; X[0] = key[0] + Xt[0];
key[1] = U8TO64_LE(in + 8); Xt[1] = S->X[1]; X[1] = key[1] + Xt[1];
key[2] = U8TO64_LE(in + 16); Xt[2] = S->X[2]; X[2] = key[2] + Xt[2];
key[3] = U8TO64_LE(in + 24); Xt[3] = S->X[3]; X[3] = key[3] + Xt[3];
key[4] = U8TO64_LE(in + 32); Xt[4] = S->X[4]; X[4] = key[4] + Xt[4];
key[5] = U8TO64_LE(in + 40); Xt[5] = S->X[5]; X[5] = key[5] + Xt[5] + T[0];
key[6] = U8TO64_LE(in + 48); Xt[6] = S->X[6]; X[6] = key[6] + Xt[6] + T[1];
key[7] = U8TO64_LE(in + 56); Xt[7] = S->X[7]; X[7] = key[7] + Xt[7];
Xt[8] = 0x1BD11BDAA9FC1A22ull ^ Xt[0] ^ Xt[1] ^ Xt[2] ^ Xt[3] ^ Xt[4] ^ Xt[5] ^ Xt[6] ^ Xt[7];
in += SCRYPT_HASH_BLOCK_SIZE;
for (r = 0; r < 18; r++)
Xt[r + 9] = Xt[r + 0];
for (r = 0; r < 18; r += 2) {
X[0] += X[1]; X[1] = ROTL64(X[1], 46) ^ X[0];
X[2] += X[3]; X[3] = ROTL64(X[3], 36) ^ X[2];
X[4] += X[5]; X[5] = ROTL64(X[5], 19) ^ X[4];
X[6] += X[7]; X[7] = ROTL64(X[7], 37) ^ X[6];
X[2] += X[1]; X[1] = ROTL64(X[1], 33) ^ X[2];
X[0] += X[3]; X[3] = ROTL64(X[3], 42) ^ X[0];
X[6] += X[5]; X[5] = ROTL64(X[5], 14) ^ X[6];
X[4] += X[7]; X[7] = ROTL64(X[7], 27) ^ X[4];
X[4] += X[1]; X[1] = ROTL64(X[1], 17) ^ X[4];
X[6] += X[3]; X[3] = ROTL64(X[3], 49) ^ X[6];
X[0] += X[5]; X[5] = ROTL64(X[5], 36) ^ X[0];
X[2] += X[7]; X[7] = ROTL64(X[7], 39) ^ X[2];
X[6] += X[1]; X[1] = ROTL64(X[1], 44) ^ X[6];
X[4] += X[3]; X[3] = ROTL64(X[3], 56) ^ X[4];
X[2] += X[5]; X[5] = ROTL64(X[5], 54) ^ X[2];
X[0] += X[7]; X[7] = ROTL64(X[7], 9) ^ X[0];
X[0] += Xt[r + 1];
X[1] += Xt[r + 2];
X[2] += Xt[r + 3];
X[3] += Xt[r + 4];
X[4] += Xt[r + 5];
X[5] += Xt[r + 6] + T[1];
X[6] += Xt[r + 7] + T[2];
X[7] += Xt[r + 8] + r + 1;
T[3] = T[0];
T[0] = T[1];
T[1] = T[2];
T[2] = T[3];
X[0] += X[1]; X[1] = ROTL64(X[1], 39) ^ X[0];
X[2] += X[3]; X[3] = ROTL64(X[3], 30) ^ X[2];
X[4] += X[5]; X[5] = ROTL64(X[5], 34) ^ X[4];
X[6] += X[7]; X[7] = ROTL64(X[7], 24) ^ X[6];
X[2] += X[1]; X[1] = ROTL64(X[1], 13) ^ X[2];
X[0] += X[3]; X[3] = ROTL64(X[3], 17) ^ X[0];
X[6] += X[5]; X[5] = ROTL64(X[5], 10) ^ X[6];
X[4] += X[7]; X[7] = ROTL64(X[7], 50) ^ X[4];
X[4] += X[1]; X[1] = ROTL64(X[1], 25) ^ X[4];
X[6] += X[3]; X[3] = ROTL64(X[3], 29) ^ X[6];
X[0] += X[5]; X[5] = ROTL64(X[5], 39) ^ X[0];
X[2] += X[7]; X[7] = ROTL64(X[7], 43) ^ X[2];
X[6] += X[1]; X[1] = ROTL64(X[1], 8) ^ X[6];
X[4] += X[3]; X[3] = ROTL64(X[3], 22) ^ X[4];
X[2] += X[5]; X[5] = ROTL64(X[5], 56) ^ X[2];
X[0] += X[7]; X[7] = ROTL64(X[7], 35) ^ X[0];
X[0] += Xt[r + 2];
X[1] += Xt[r + 3];
X[2] += Xt[r + 4];
X[3] += Xt[r + 5];
X[4] += Xt[r + 6];
X[5] += Xt[r + 7] + T[1];
X[6] += Xt[r + 8] + T[2];
X[7] += Xt[r + 9] + r + 2;
T[3] = T[0];
T[0] = T[1];
T[1] = T[2];
T[2] = T[3];
}
S->X[0] = key[0] ^ X[0];
S->X[1] = key[1] ^ X[1];
S->X[2] = key[2] ^ X[2];
S->X[3] = key[3] ^ X[3];
S->X[4] = key[4] ^ X[4];
S->X[5] = key[5] ^ X[5];
S->X[6] = key[6] ^ X[6];
S->X[7] = key[7] ^ X[7];
S->T[0] = T[0];
S->T[1] = T[1] & ~0x4000000000000000ull;
}
}
static void
scrypt_hash_init(scrypt_hash_state *S) {
S->X[0] = 0x4903ADFF749C51CEull;
S->X[1] = 0x0D95DE399746DF03ull;
S->X[2] = 0x8FD1934127C79BCEull;
S->X[3] = 0x9A255629FF352CB1ull;
S->X[4] = 0x5DB62599DF6CA7B0ull;
S->X[5] = 0xEABE394CA9D5C3F4ull;
S->X[6] = 0x991112C71A75B523ull;
S->X[7] = 0xAE18A40B660FCC33ull;
S->T[0] = 0x0000000000000000ull;
S->T[1] = 0x7000000000000000ull;
S->leftover = 0;
}
static void
scrypt_hash_update(scrypt_hash_state *S, const uint8_t *in, size_t inlen) {
size_t blocks, want;
/* skein processes the final <=64 bytes raw, so we can only update if there are at least 64+1 bytes available */
if ((S->leftover + inlen) > SCRYPT_HASH_BLOCK_SIZE) {
/* handle the previous data, we know there is enough for at least one block */
if (S->leftover) {
want = (SCRYPT_HASH_BLOCK_SIZE - S->leftover);
memcpy(S->buffer + S->leftover, in, want);
in += want;
inlen -= want;
S->leftover = 0;
skein512_blocks(S, S->buffer, 1, SCRYPT_HASH_BLOCK_SIZE);
}
/* handle the current data if there's more than one block */
if (inlen > SCRYPT_HASH_BLOCK_SIZE) {
blocks = ((inlen - 1) & ~(SCRYPT_HASH_BLOCK_SIZE - 1));
skein512_blocks(S, in, blocks / SCRYPT_HASH_BLOCK_SIZE, SCRYPT_HASH_BLOCK_SIZE);
inlen -= blocks;
in += blocks;
}
}
/* handle leftover data */
memcpy(S->buffer + S->leftover, in, inlen);
S->leftover += inlen;
}
static void
scrypt_hash_finish(scrypt_hash_state *S, uint8_t *hash) {
memset(S->buffer + S->leftover, 0, SCRYPT_HASH_BLOCK_SIZE - S->leftover);
S->T[1] |= 0x8000000000000000ull;
skein512_blocks(S, S->buffer, 1, S->leftover);
memset(S->buffer, 0, SCRYPT_HASH_BLOCK_SIZE);
S->T[0] = 0;
S->T[1] = 0xff00000000000000ull;
skein512_blocks(S, S->buffer, 1, 8);
U64TO8_LE(&hash[ 0], S->X[0]);
U64TO8_LE(&hash[ 8], S->X[1]);
U64TO8_LE(&hash[16], S->X[2]);
U64TO8_LE(&hash[24], S->X[3]);
U64TO8_LE(&hash[32], S->X[4]);
U64TO8_LE(&hash[40], S->X[5]);
U64TO8_LE(&hash[48], S->X[6]);
U64TO8_LE(&hash[56], S->X[7]);
}
static const uint8_t scrypt_test_hash_expected[SCRYPT_HASH_DIGEST_SIZE] = {
0x4d,0x52,0x29,0xff,0x10,0xbc,0xd2,0x62,0xd1,0x61,0x83,0xc8,0xe6,0xf0,0x83,0xc4,
0x9f,0xf5,0x6a,0x42,0x75,0x2a,0x26,0x4e,0xf0,0x28,0x72,0x28,0x47,0xe8,0x23,0xdf,
0x1e,0x64,0xf1,0x51,0x38,0x35,0x9d,0xc2,0x83,0xfc,0x35,0x4e,0xc0,0x52,0x5f,0x41,
0x6a,0x0b,0x7d,0xf5,0xce,0x98,0xde,0x6f,0x36,0xd8,0x51,0x15,0x78,0x78,0x93,0x67,
};

View file

@ -0,0 +1,368 @@
/* x86 */
#if defined(X86ASM_AVX) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_CHACHA_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_CHACHA_AVX
asm_naked_fn_proto(void, scrypt_ChunkMix_avx)(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_avx)
a1(push ebx)
a1(push edi)
a1(push esi)
a1(push ebp)
a2(mov ebp,esp)
a2(mov edi,[ebp+20])
a2(mov esi,[ebp+24])
a2(mov eax,[ebp+28])
a2(mov ebx,[ebp+32])
a2(sub esp,64)
a2(and esp,~63)
a2(lea edx,[ebx*2])
a2(shl edx,6)
a2(lea ecx,[edx-64])
a2(and eax, eax)
a2(mov ebx, 0x01000302)
a2(vmovd xmm4, ebx)
a2(mov ebx, 0x05040706)
a2(vmovd xmm0, ebx)
a2(mov ebx, 0x09080b0a)
a2(vmovd xmm1, ebx)
a2(mov ebx, 0x0d0c0f0e)
a2(vmovd xmm2, ebx)
a2(mov ebx, 0x02010003)
a2(vmovd xmm5, ebx)
a2(mov ebx, 0x06050407)
a2(vmovd xmm3, ebx)
a2(mov ebx, 0x0a09080b)
a2(vmovd xmm6, ebx)
a2(mov ebx, 0x0e0d0c0f)
a2(vmovd xmm7, ebx)
a3(vpunpckldq xmm4, xmm4, xmm0)
a3(vpunpckldq xmm5, xmm5, xmm3)
a3(vpunpckldq xmm1, xmm1, xmm2)
a3(vpunpckldq xmm6, xmm6, xmm7)
a3(vpunpcklqdq xmm4, xmm4, xmm1)
a3(vpunpcklqdq xmm5, xmm5, xmm6)
a2(vmovdqa xmm0,[ecx+esi+0])
a2(vmovdqa xmm1,[ecx+esi+16])
a2(vmovdqa xmm2,[ecx+esi+32])
a2(vmovdqa xmm3,[ecx+esi+48])
aj(jz scrypt_ChunkMix_avx_no_xor1)
a3(vpxor xmm0,xmm0,[ecx+eax+0])
a3(vpxor xmm1,xmm1,[ecx+eax+16])
a3(vpxor xmm2,xmm2,[ecx+eax+32])
a3(vpxor xmm3,xmm3,[ecx+eax+48])
a1(scrypt_ChunkMix_avx_no_xor1:)
a2(xor ecx,ecx)
a2(xor ebx,ebx)
a1(scrypt_ChunkMix_avx_loop:)
a2(and eax, eax)
a3(vpxor xmm0,xmm0,[esi+ecx+0])
a3(vpxor xmm1,xmm1,[esi+ecx+16])
a3(vpxor xmm2,xmm2,[esi+ecx+32])
a3(vpxor xmm3,xmm3,[esi+ecx+48])
aj(jz scrypt_ChunkMix_avx_no_xor2)
a3(vpxor xmm0,xmm0,[eax+ecx+0])
a3(vpxor xmm1,xmm1,[eax+ecx+16])
a3(vpxor xmm2,xmm2,[eax+ecx+32])
a3(vpxor xmm3,xmm3,[eax+ecx+48])
a1(scrypt_ChunkMix_avx_no_xor2:)
a2(vmovdqa [esp+0],xmm0)
a2(vmovdqa [esp+16],xmm1)
a2(vmovdqa [esp+32],xmm2)
a2(vmovdqa [esp+48],xmm3)
a2(mov eax,8)
a1(scrypt_chacha_avx_loop: )
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpxor xmm3,xmm3,xmm0)
a3(vpshufb xmm3,xmm3,xmm4)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpxor xmm1,xmm1,xmm2)
a3(vpsrld xmm6,xmm1,20)
a3(vpslld xmm1,xmm1,12)
a3(vpxor xmm1,xmm1,xmm6)
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpxor xmm3,xmm3,xmm0)
a3(vpshufb xmm3,xmm3,xmm5)
a3(vpshufd xmm0,xmm0,0x93)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpshufd xmm3,xmm3,0x4e)
a3(vpxor xmm1,xmm1,xmm2)
a3(vpshufd xmm2,xmm2,0x39)
a3(vpsrld xmm6,xmm1,25)
a3(vpslld xmm1,xmm1,7)
a3(vpxor xmm1,xmm1,xmm6)
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpxor xmm3,xmm3,xmm0)
a3(vpshufb xmm3,xmm3,xmm4)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpxor xmm1,xmm1,xmm2)
a3(vpsrld xmm6,xmm1,20)
a3(vpslld xmm1,xmm1,12)
a3(vpxor xmm1,xmm1,xmm6)
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpxor xmm3,xmm3,xmm0)
a3(vpshufb xmm3,xmm3,xmm5)
a3(vpshufd xmm0,xmm0,0x39)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpshufd xmm3,xmm3,0x4e)
a3(vpxor xmm1,xmm1,xmm2)
a3(vpshufd xmm2,xmm2,0x93)
a3(vpsrld xmm6,xmm1,25)
a3(vpslld xmm1,xmm1,7)
a3(vpxor xmm1,xmm1,xmm6)
a2(sub eax,2)
aj(ja scrypt_chacha_avx_loop)
a3(vpaddd xmm0,xmm0,[esp+0])
a3(vpaddd xmm1,xmm1,[esp+16])
a3(vpaddd xmm2,xmm2,[esp+32])
a3(vpaddd xmm3,xmm3,[esp+48])
a2(lea eax,[ebx+ecx])
a2(xor ebx,edx)
a2(and eax,~0x7f)
a2(add ecx,64)
a2(shr eax,1)
a2(add eax, edi)
a2(cmp ecx,edx)
a2(vmovdqa [eax+0],xmm0)
a2(vmovdqa [eax+16],xmm1)
a2(vmovdqa [eax+32],xmm2)
a2(vmovdqa [eax+48],xmm3)
a2(mov eax,[ebp+28])
aj(jne scrypt_ChunkMix_avx_loop)
a2(mov esp,ebp)
a1(pop ebp)
a1(pop esi)
a1(pop edi)
a1(pop ebx)
aret(16)
asm_naked_fn_end(scrypt_ChunkMix_avx)
#endif
/* x64 */
#if defined(X86_64ASM_AVX) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_CHACHA_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_CHACHA_AVX
asm_naked_fn_proto(void, scrypt_ChunkMix_avx)(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_avx)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,6)
a2(lea r9,[rcx-64])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(vmovdqa xmm0,[rax+0])
a2(vmovdqa xmm1,[rax+16])
a2(vmovdqa xmm2,[rax+32])
a2(vmovdqa xmm3,[rax+48])
a2(mov r8, 0x0504070601000302)
a2(mov rax, 0x0d0c0f0e09080b0a)
a2(movd xmm4, r8)
a2(movd xmm6, rax)
a2(mov r8, 0x0605040702010003)
a2(mov rax, 0x0e0d0c0f0a09080b)
a2(movd xmm5, r8)
a2(movd xmm7, rax)
a3(vpunpcklqdq xmm4, xmm4, xmm6)
a3(vpunpcklqdq xmm5, xmm5, xmm7)
aj(jz scrypt_ChunkMix_avx_no_xor1)
a3(vpxor xmm0,xmm0,[r9+0])
a3(vpxor xmm1,xmm1,[r9+16])
a3(vpxor xmm2,xmm2,[r9+32])
a3(vpxor xmm3,xmm3,[r9+48])
a1(scrypt_ChunkMix_avx_no_xor1:)
a2(xor r8,r8)
a2(xor r9,r9)
a1(scrypt_ChunkMix_avx_loop:)
a2(and rdx, rdx)
a3(vpxor xmm0,xmm0,[rsi+r9+0])
a3(vpxor xmm1,xmm1,[rsi+r9+16])
a3(vpxor xmm2,xmm2,[rsi+r9+32])
a3(vpxor xmm3,xmm3,[rsi+r9+48])
aj(jz scrypt_ChunkMix_avx_no_xor2)
a3(vpxor xmm0,xmm0,[rdx+r9+0])
a3(vpxor xmm1,xmm1,[rdx+r9+16])
a3(vpxor xmm2,xmm2,[rdx+r9+32])
a3(vpxor xmm3,xmm3,[rdx+r9+48])
a1(scrypt_ChunkMix_avx_no_xor2:)
a2(vmovdqa xmm8,xmm0)
a2(vmovdqa xmm9,xmm1)
a2(vmovdqa xmm10,xmm2)
a2(vmovdqa xmm11,xmm3)
a2(mov rax,8)
a1(scrypt_chacha_avx_loop: )
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpxor xmm3,xmm3,xmm0)
a3(vpshufb xmm3,xmm3,xmm4)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpxor xmm1,xmm1,xmm2)
a3(vpsrld xmm12,xmm1,20)
a3(vpslld xmm1,xmm1,12)
a3(vpxor xmm1,xmm1,xmm12)
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpxor xmm3,xmm3,xmm0)
a3(vpshufb xmm3,xmm3,xmm5)
a3(vpshufd xmm0,xmm0,0x93)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpshufd xmm3,xmm3,0x4e)
a3(vpxor xmm1,xmm1,xmm2)
a3(vpshufd xmm2,xmm2,0x39)
a3(vpsrld xmm12,xmm1,25)
a3(vpslld xmm1,xmm1,7)
a3(vpxor xmm1,xmm1,xmm12)
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpxor xmm3,xmm3,xmm0)
a3(vpshufb xmm3,xmm3,xmm4)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpxor xmm1,xmm1,xmm2)
a3(vpsrld xmm12,xmm1,20)
a3(vpslld xmm1,xmm1,12)
a3(vpxor xmm1,xmm1,xmm12)
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpxor xmm3,xmm3,xmm0)
a3(vpshufb xmm3,xmm3,xmm5)
a3(vpshufd xmm0,xmm0,0x39)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpshufd xmm3,xmm3,0x4e)
a3(vpxor xmm1,xmm1,xmm2)
a3(vpshufd xmm2,xmm2,0x93)
a3(vpsrld xmm12,xmm1,25)
a3(vpslld xmm1,xmm1,7)
a3(vpxor xmm1,xmm1,xmm12)
a2(sub rax,2)
aj(ja scrypt_chacha_avx_loop)
a3(vpaddd xmm0,xmm0,xmm8)
a3(vpaddd xmm1,xmm1,xmm9)
a3(vpaddd xmm2,xmm2,xmm10)
a3(vpaddd xmm3,xmm3,xmm11)
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0x7f)
a2(add r9,64)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(vmovdqa [rax+0],xmm0)
a2(vmovdqa [rax+16],xmm1)
a2(vmovdqa [rax+32],xmm2)
a2(vmovdqa [rax+48],xmm3)
aj(jne scrypt_ChunkMix_avx_loop)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_avx)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_AVX) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_CHACHA_INCLUDED))
#define SCRYPT_CHACHA_AVX
static void asm_calling_convention NOINLINE
scrypt_ChunkMix_avx(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x6,t0,t1,t2,t3;
const xmmi x4 = *(xmmi *)&ssse3_rotl16_32bit, x5 = *(xmmi *)&ssse3_rotl8_32bit;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
for (rounds = 8; rounds; rounds -= 2) {
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x3 = _mm_shuffle_epi8(x3, x4);
x2 = _mm_add_epi32(x2, x3);
x1 = _mm_xor_si128(x1, x2);
x6 = x1;
x1 = _mm_or_si128(_mm_slli_epi32(x1, 12), _mm_srli_epi32(x6, 20));
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x3 = _mm_shuffle_epi8(x3, x5);
x0 = _mm_shuffle_epi32(x0, 0x93);
x2 = _mm_add_epi32(x2, x3);
x3 = _mm_shuffle_epi32(x3, 0x4e);
x1 = _mm_xor_si128(x1, x2);
x2 = _mm_shuffle_epi32(x2, 0x39);
x6 = x1;
x1 = _mm_or_si128(_mm_slli_epi32(x1, 7), _mm_srli_epi32(x6, 25));
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x3 = _mm_shuffle_epi8(x3, x4);
x2 = _mm_add_epi32(x2, x3);
x1 = _mm_xor_si128(x1, x2);
x6 = x1;
x1 = _mm_or_si128(_mm_slli_epi32(x1, 12), _mm_srli_epi32(x6, 20));
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x3 = _mm_shuffle_epi8(x3, x5);
x0 = _mm_shuffle_epi32(x0, 0x39);
x2 = _mm_add_epi32(x2, x3);
x3 = _mm_shuffle_epi32(x3, 0x4e);
x1 = _mm_xor_si128(x1, x2);
x2 = _mm_shuffle_epi32(x2, 0x93);
x6 = x1;
x1 = _mm_or_si128(_mm_slli_epi32(x1, 7), _mm_srli_epi32(x6, 25));
}
x0 = _mm_add_epi32(x0, t0);
x1 = _mm_add_epi32(x1, t1);
x2 = _mm_add_epi32(x2, t2);
x3 = _mm_add_epi32(x3, t3);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
}
}
#endif
#if defined(SCRYPT_CHACHA_AVX)
#undef SCRYPT_MIX
#define SCRYPT_MIX "ChaCha/8-AVX"
#undef SCRYPT_CHACHA_INCLUDED
#define SCRYPT_CHACHA_INCLUDED
#endif

View file

@ -0,0 +1,363 @@
/* x86 */
#if defined(X86ASM_SSE2) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_CHACHA_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_CHACHA_SSE2
asm_naked_fn_proto(void, scrypt_ChunkMix_sse2)(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_sse2)
a1(push ebx)
a1(push edi)
a1(push esi)
a1(push ebp)
a2(mov ebp,esp)
a2(mov edi,[ebp+20])
a2(mov esi,[ebp+24])
a2(mov eax,[ebp+28])
a2(mov ebx,[ebp+32])
a2(sub esp,16)
a2(and esp,~15)
a2(lea edx,[ebx*2])
a2(shl edx,6)
a2(lea ecx,[edx-64])
a2(and eax, eax)
a2(movdqa xmm0,[ecx+esi+0])
a2(movdqa xmm1,[ecx+esi+16])
a2(movdqa xmm2,[ecx+esi+32])
a2(movdqa xmm3,[ecx+esi+48])
aj(jz scrypt_ChunkMix_sse2_no_xor1)
a2(pxor xmm0,[ecx+eax+0])
a2(pxor xmm1,[ecx+eax+16])
a2(pxor xmm2,[ecx+eax+32])
a2(pxor xmm3,[ecx+eax+48])
a1(scrypt_ChunkMix_sse2_no_xor1:)
a2(xor ecx,ecx)
a2(xor ebx,ebx)
a1(scrypt_ChunkMix_sse2_loop:)
a2(and eax, eax)
a2(pxor xmm0,[esi+ecx+0])
a2(pxor xmm1,[esi+ecx+16])
a2(pxor xmm2,[esi+ecx+32])
a2(pxor xmm3,[esi+ecx+48])
aj(jz scrypt_ChunkMix_sse2_no_xor2)
a2(pxor xmm0,[eax+ecx+0])
a2(pxor xmm1,[eax+ecx+16])
a2(pxor xmm2,[eax+ecx+32])
a2(pxor xmm3,[eax+ecx+48])
a1(scrypt_ChunkMix_sse2_no_xor2:)
a2(movdqa [esp+0],xmm0)
a2(movdqa xmm4,xmm1)
a2(movdqa xmm5,xmm2)
a2(movdqa xmm7,xmm3)
a2(mov eax,8)
a1(scrypt_chacha_sse2_loop: )
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a3(pshuflw xmm3,xmm3,0xb1)
a3(pshufhw xmm3,xmm3,0xb1)
a2(paddd xmm2,xmm3)
a2(pxor xmm1,xmm2)
a2(movdqa xmm6,xmm1)
a2(pslld xmm1,12)
a2(psrld xmm6,20)
a2(pxor xmm1,xmm6)
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a2(movdqa xmm6,xmm3)
a2(pslld xmm3,8)
a2(psrld xmm6,24)
a2(pxor xmm3,xmm6)
a3(pshufd xmm0,xmm0,0x93)
a2(paddd xmm2,xmm3)
a3(pshufd xmm3,xmm3,0x4e)
a2(pxor xmm1,xmm2)
a3(pshufd xmm2,xmm2,0x39)
a2(movdqa xmm6,xmm1)
a2(pslld xmm1,7)
a2(psrld xmm6,25)
a2(pxor xmm1,xmm6)
a2(sub eax,2)
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a3(pshuflw xmm3,xmm3,0xb1)
a3(pshufhw xmm3,xmm3,0xb1)
a2(paddd xmm2,xmm3)
a2(pxor xmm1,xmm2)
a2(movdqa xmm6,xmm1)
a2(pslld xmm1,12)
a2(psrld xmm6,20)
a2(pxor xmm1,xmm6)
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a2(movdqa xmm6,xmm3)
a2(pslld xmm3,8)
a2(psrld xmm6,24)
a2(pxor xmm3,xmm6)
a3(pshufd xmm0,xmm0,0x39)
a2(paddd xmm2,xmm3)
a3(pshufd xmm3,xmm3,0x4e)
a2(pxor xmm1,xmm2)
a3(pshufd xmm2,xmm2,0x93)
a2(movdqa xmm6,xmm1)
a2(pslld xmm1,7)
a2(psrld xmm6,25)
a2(pxor xmm1,xmm6)
aj(ja scrypt_chacha_sse2_loop)
a2(paddd xmm0,[esp+0])
a2(paddd xmm1,xmm4)
a2(paddd xmm2,xmm5)
a2(paddd xmm3,xmm7)
a2(lea eax,[ebx+ecx])
a2(xor ebx,edx)
a2(and eax,~0x7f)
a2(add ecx,64)
a2(shr eax,1)
a2(add eax, edi)
a2(cmp ecx,edx)
a2(movdqa [eax+0],xmm0)
a2(movdqa [eax+16],xmm1)
a2(movdqa [eax+32],xmm2)
a2(movdqa [eax+48],xmm3)
a2(mov eax,[ebp+28])
aj(jne scrypt_ChunkMix_sse2_loop)
a2(mov esp,ebp)
a1(pop ebp)
a1(pop esi)
a1(pop edi)
a1(pop ebx)
aret(16)
asm_naked_fn_end(scrypt_ChunkMix_sse2)
#endif
/* x64 */
#if defined(X86_64ASM_SSE2) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_CHACHA_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_CHACHA_SSE2
asm_naked_fn_proto(void, scrypt_ChunkMix_sse2)(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_sse2)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,6)
a2(lea r9,[rcx-64])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(movdqa xmm0,[rax+0])
a2(movdqa xmm1,[rax+16])
a2(movdqa xmm2,[rax+32])
a2(movdqa xmm3,[rax+48])
aj(jz scrypt_ChunkMix_sse2_no_xor1)
a2(pxor xmm0,[r9+0])
a2(pxor xmm1,[r9+16])
a2(pxor xmm2,[r9+32])
a2(pxor xmm3,[r9+48])
a1(scrypt_ChunkMix_sse2_no_xor1:)
a2(xor r9,r9)
a2(xor r8,r8)
a1(scrypt_ChunkMix_sse2_loop:)
a2(and rdx, rdx)
a2(pxor xmm0,[rsi+r9+0])
a2(pxor xmm1,[rsi+r9+16])
a2(pxor xmm2,[rsi+r9+32])
a2(pxor xmm3,[rsi+r9+48])
aj(jz scrypt_ChunkMix_sse2_no_xor2)
a2(pxor xmm0,[rdx+r9+0])
a2(pxor xmm1,[rdx+r9+16])
a2(pxor xmm2,[rdx+r9+32])
a2(pxor xmm3,[rdx+r9+48])
a1(scrypt_ChunkMix_sse2_no_xor2:)
a2(movdqa xmm8,xmm0)
a2(movdqa xmm9,xmm1)
a2(movdqa xmm10,xmm2)
a2(movdqa xmm11,xmm3)
a2(mov rax,8)
a1(scrypt_chacha_sse2_loop: )
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a3(pshuflw xmm3,xmm3,0xb1)
a3(pshufhw xmm3,xmm3,0xb1)
a2(paddd xmm2,xmm3)
a2(pxor xmm1,xmm2)
a2(movdqa xmm6,xmm1)
a2(pslld xmm1,12)
a2(psrld xmm6,20)
a2(pxor xmm1,xmm6)
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a2(movdqa xmm6,xmm3)
a2(pslld xmm3,8)
a2(psrld xmm6,24)
a2(pxor xmm3,xmm6)
a3(pshufd xmm0,xmm0,0x93)
a2(paddd xmm2,xmm3)
a3(pshufd xmm3,xmm3,0x4e)
a2(pxor xmm1,xmm2)
a3(pshufd xmm2,xmm2,0x39)
a2(movdqa xmm6,xmm1)
a2(pslld xmm1,7)
a2(psrld xmm6,25)
a2(pxor xmm1,xmm6)
a2(sub rax,2)
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a3(pshuflw xmm3,xmm3,0xb1)
a3(pshufhw xmm3,xmm3,0xb1)
a2(paddd xmm2,xmm3)
a2(pxor xmm1,xmm2)
a2(movdqa xmm6,xmm1)
a2(pslld xmm1,12)
a2(psrld xmm6,20)
a2(pxor xmm1,xmm6)
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a2(movdqa xmm6,xmm3)
a2(pslld xmm3,8)
a2(psrld xmm6,24)
a2(pxor xmm3,xmm6)
a3(pshufd xmm0,xmm0,0x39)
a2(paddd xmm2,xmm3)
a3(pshufd xmm3,xmm3,0x4e)
a2(pxor xmm1,xmm2)
a3(pshufd xmm2,xmm2,0x93)
a2(movdqa xmm6,xmm1)
a2(pslld xmm1,7)
a2(psrld xmm6,25)
a2(pxor xmm1,xmm6)
aj(ja scrypt_chacha_sse2_loop)
a2(paddd xmm0,xmm8)
a2(paddd xmm1,xmm9)
a2(paddd xmm2,xmm10)
a2(paddd xmm3,xmm11)
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0x7f)
a2(add r9,64)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(movdqa [rax+0],xmm0)
a2(movdqa [rax+16],xmm1)
a2(movdqa [rax+32],xmm2)
a2(movdqa [rax+48],xmm3)
aj(jne scrypt_ChunkMix_sse2_loop)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_sse2)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_SSE2) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_CHACHA_INCLUDED))
#define SCRYPT_CHACHA_SSE2
static void NOINLINE asm_calling_convention
scrypt_ChunkMix_sse2(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x4,t0,t1,t2,t3;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
for (rounds = 8; rounds; rounds -= 2) {
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x4 = x3;
x3 = _mm_shufflehi_epi16(_mm_shufflelo_epi16(x3, 0xb1), 0xb1);
x2 = _mm_add_epi32(x2, x3);
x1 = _mm_xor_si128(x1, x2);
x4 = x1;
x1 = _mm_or_si128(_mm_slli_epi32(x1, 12), _mm_srli_epi32(x4, 20));
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x4 = x3;
x3 = _mm_or_si128(_mm_slli_epi32(x3, 8), _mm_srli_epi32(x4, 24));
x0 = _mm_shuffle_epi32(x0, 0x93);
x2 = _mm_add_epi32(x2, x3);
x3 = _mm_shuffle_epi32(x3, 0x4e);
x1 = _mm_xor_si128(x1, x2);
x2 = _mm_shuffle_epi32(x2, 0x39);
x4 = x1;
x1 = _mm_or_si128(_mm_slli_epi32(x1, 7), _mm_srli_epi32(x4, 25));
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x4 = x3;
x3 = _mm_shufflehi_epi16(_mm_shufflelo_epi16(x3, 0xb1), 0xb1);
x2 = _mm_add_epi32(x2, x3);
x1 = _mm_xor_si128(x1, x2);
x4 = x1;
x1 = _mm_or_si128(_mm_slli_epi32(x1, 12), _mm_srli_epi32(x4, 20));
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x4 = x3;
x3 = _mm_or_si128(_mm_slli_epi32(x3, 8), _mm_srli_epi32(x4, 24));
x0 = _mm_shuffle_epi32(x0, 0x39);
x2 = _mm_add_epi32(x2, x3);
x3 = _mm_shuffle_epi32(x3, 0x4e);
x1 = _mm_xor_si128(x1, x2);
x2 = _mm_shuffle_epi32(x2, 0x93);
x4 = x1;
x1 = _mm_or_si128(_mm_slli_epi32(x1, 7), _mm_srli_epi32(x4, 25));
}
x0 = _mm_add_epi32(x0, t0);
x1 = _mm_add_epi32(x1, t1);
x2 = _mm_add_epi32(x2, t2);
x3 = _mm_add_epi32(x3, t3);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
}
}
#endif
#if defined(SCRYPT_CHACHA_SSE2)
#undef SCRYPT_MIX
#define SCRYPT_MIX "ChaCha/8-SSE2"
#undef SCRYPT_CHACHA_INCLUDED
#define SCRYPT_CHACHA_INCLUDED
#endif

View file

@ -0,0 +1,376 @@
/* x86 */
#if defined(X86ASM_SSSE3) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_CHACHA_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_CHACHA_SSSE3
asm_naked_fn_proto(void, scrypt_ChunkMix_ssse3)(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_ssse3)
a1(push ebx)
a1(push edi)
a1(push esi)
a1(push ebp)
a2(mov ebp,esp)
a2(mov edi,[ebp+20])
a2(mov esi,[ebp+24])
a2(mov eax,[ebp+28])
a2(mov ebx,[ebp+32])
a2(sub esp,64)
a2(and esp,~63)
a2(lea edx,[ebx*2])
a2(shl edx,6)
a2(lea ecx,[edx-64])
a2(and eax, eax)
a2(mov ebx, 0x01000302)
a2(movd xmm4, ebx)
a2(mov ebx, 0x05040706)
a2(movd xmm0, ebx)
a2(mov ebx, 0x09080b0a)
a2(movd xmm1, ebx)
a2(mov ebx, 0x0d0c0f0e)
a2(movd xmm2, ebx)
a2(mov ebx, 0x02010003)
a2(movd xmm5, ebx)
a2(mov ebx, 0x06050407)
a2(movd xmm3, ebx)
a2(mov ebx, 0x0a09080b)
a2(movd xmm6, ebx)
a2(mov ebx, 0x0e0d0c0f)
a2(movd xmm7, ebx)
a2(punpckldq xmm4, xmm0)
a2(punpckldq xmm5, xmm3)
a2(punpckldq xmm1, xmm2)
a2(punpckldq xmm6, xmm7)
a2(punpcklqdq xmm4, xmm1)
a2(punpcklqdq xmm5, xmm6)
a2(movdqa xmm0,[ecx+esi+0])
a2(movdqa xmm1,[ecx+esi+16])
a2(movdqa xmm2,[ecx+esi+32])
a2(movdqa xmm3,[ecx+esi+48])
aj(jz scrypt_ChunkMix_ssse3_no_xor1)
a2(pxor xmm0,[ecx+eax+0])
a2(pxor xmm1,[ecx+eax+16])
a2(pxor xmm2,[ecx+eax+32])
a2(pxor xmm3,[ecx+eax+48])
a1(scrypt_ChunkMix_ssse3_no_xor1:)
a2(xor ecx,ecx)
a2(xor ebx,ebx)
a1(scrypt_ChunkMix_ssse3_loop:)
a2(and eax, eax)
a2(pxor xmm0,[esi+ecx+0])
a2(pxor xmm1,[esi+ecx+16])
a2(pxor xmm2,[esi+ecx+32])
a2(pxor xmm3,[esi+ecx+48])
aj(jz scrypt_ChunkMix_ssse3_no_xor2)
a2(pxor xmm0,[eax+ecx+0])
a2(pxor xmm1,[eax+ecx+16])
a2(pxor xmm2,[eax+ecx+32])
a2(pxor xmm3,[eax+ecx+48])
a1(scrypt_ChunkMix_ssse3_no_xor2:)
a2(movdqa [esp+0],xmm0)
a2(movdqa [esp+16],xmm1)
a2(movdqa [esp+32],xmm2)
a2(movdqa xmm7,xmm3)
a2(mov eax,8)
a1(scrypt_chacha_ssse3_loop: )
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a2(pshufb xmm3,xmm4)
a2(paddd xmm2,xmm3)
a2(pxor xmm1,xmm2)
a2(movdqa xmm6,xmm1)
a2(pslld xmm1,12)
a2(psrld xmm6,20)
a2(pxor xmm1,xmm6)
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a2(pshufb xmm3,xmm5)
a3(pshufd xmm0,xmm0,0x93)
a2(paddd xmm2,xmm3)
a3(pshufd xmm3,xmm3,0x4e)
a2(pxor xmm1,xmm2)
a3(pshufd xmm2,xmm2,0x39)
a2(movdqa xmm6,xmm1)
a2(pslld xmm1,7)
a2(psrld xmm6,25)
a2(pxor xmm1,xmm6)
a2(sub eax,2)
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a2(pshufb xmm3,xmm4)
a2(paddd xmm2,xmm3)
a2(pxor xmm1,xmm2)
a2(movdqa xmm6,xmm1)
a2(pslld xmm1,12)
a2(psrld xmm6,20)
a2(pxor xmm1,xmm6)
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a2(pshufb xmm3,xmm5)
a3(pshufd xmm0,xmm0,0x39)
a2(paddd xmm2,xmm3)
a3(pshufd xmm3,xmm3,0x4e)
a2(pxor xmm1,xmm2)
a3(pshufd xmm2,xmm2,0x93)
a2(movdqa xmm6,xmm1)
a2(pslld xmm1,7)
a2(psrld xmm6,25)
a2(pxor xmm1,xmm6)
aj(ja scrypt_chacha_ssse3_loop)
a2(paddd xmm0,[esp+0])
a2(paddd xmm1,[esp+16])
a2(paddd xmm2,[esp+32])
a2(paddd xmm3,xmm7)
a2(lea eax,[ebx+ecx])
a2(xor ebx,edx)
a2(and eax,~0x7f)
a2(add ecx,64)
a2(shr eax,1)
a2(add eax, edi)
a2(cmp ecx,edx)
a2(movdqa [eax+0],xmm0)
a2(movdqa [eax+16],xmm1)
a2(movdqa [eax+32],xmm2)
a2(movdqa [eax+48],xmm3)
a2(mov eax,[ebp+28])
aj(jne scrypt_ChunkMix_ssse3_loop)
a2(mov esp,ebp)
a1(pop ebp)
a1(pop esi)
a1(pop edi)
a1(pop ebx)
aret(16)
asm_naked_fn_end(scrypt_ChunkMix_ssse3)
#endif
/* x64 */
#if defined(X86_64ASM_SSSE3) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_CHACHA_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_CHACHA_SSSE3
asm_naked_fn_proto(void, scrypt_ChunkMix_ssse3)(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_ssse3)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,6)
a2(lea r9,[rcx-64])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(movdqa xmm0,[rax+0])
a2(movdqa xmm1,[rax+16])
a2(movdqa xmm2,[rax+32])
a2(movdqa xmm3,[rax+48])
a2(mov r8, 0x0504070601000302)
a2(mov rax, 0x0d0c0f0e09080b0a)
a2(movd xmm4, r8)
a2(movd xmm6, rax)
a2(mov r8, 0x0605040702010003)
a2(mov rax, 0x0e0d0c0f0a09080b)
a2(movd xmm5, r8)
a2(movd xmm7, rax)
a2(punpcklqdq xmm4, xmm6)
a2(punpcklqdq xmm5, xmm7)
aj(jz scrypt_ChunkMix_ssse3_no_xor1)
a2(pxor xmm0,[r9+0])
a2(pxor xmm1,[r9+16])
a2(pxor xmm2,[r9+32])
a2(pxor xmm3,[r9+48])
a1(scrypt_ChunkMix_ssse3_no_xor1:)
a2(xor r8,r8)
a2(xor r9,r9)
a1(scrypt_ChunkMix_ssse3_loop:)
a2(and rdx, rdx)
a2(pxor xmm0,[rsi+r9+0])
a2(pxor xmm1,[rsi+r9+16])
a2(pxor xmm2,[rsi+r9+32])
a2(pxor xmm3,[rsi+r9+48])
aj(jz scrypt_ChunkMix_ssse3_no_xor2)
a2(pxor xmm0,[rdx+r9+0])
a2(pxor xmm1,[rdx+r9+16])
a2(pxor xmm2,[rdx+r9+32])
a2(pxor xmm3,[rdx+r9+48])
a1(scrypt_ChunkMix_ssse3_no_xor2:)
a2(movdqa xmm8,xmm0)
a2(movdqa xmm9,xmm1)
a2(movdqa xmm10,xmm2)
a2(movdqa xmm11,xmm3)
a2(mov rax,8)
a1(scrypt_chacha_ssse3_loop: )
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a2(pshufb xmm3,xmm4)
a2(paddd xmm2,xmm3)
a2(pxor xmm1,xmm2)
a2(movdqa xmm12,xmm1)
a2(pslld xmm1,12)
a2(psrld xmm12,20)
a2(pxor xmm1,xmm12)
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a2(pshufb xmm3,xmm5)
a3(pshufd xmm0,xmm0,0x93)
a2(paddd xmm2,xmm3)
a3(pshufd xmm3,xmm3,0x4e)
a2(pxor xmm1,xmm2)
a3(pshufd xmm2,xmm2,0x39)
a2(movdqa xmm12,xmm1)
a2(pslld xmm1,7)
a2(psrld xmm12,25)
a2(pxor xmm1,xmm12)
a2(sub rax,2)
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a2(pshufb xmm3,xmm4)
a2(paddd xmm2,xmm3)
a2(pxor xmm1,xmm2)
a2(movdqa xmm12,xmm1)
a2(pslld xmm1,12)
a2(psrld xmm12,20)
a2(pxor xmm1,xmm12)
a2(paddd xmm0,xmm1)
a2(pxor xmm3,xmm0)
a2(pshufb xmm3,xmm5)
a3(pshufd xmm0,xmm0,0x39)
a2(paddd xmm2,xmm3)
a3(pshufd xmm3,xmm3,0x4e)
a2(pxor xmm1,xmm2)
a3(pshufd xmm2,xmm2,0x93)
a2(movdqa xmm12,xmm1)
a2(pslld xmm1,7)
a2(psrld xmm12,25)
a2(pxor xmm1,xmm12)
aj(ja scrypt_chacha_ssse3_loop)
a2(paddd xmm0,xmm8)
a2(paddd xmm1,xmm9)
a2(paddd xmm2,xmm10)
a2(paddd xmm3,xmm11)
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0x7f)
a2(add r9,64)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(movdqa [rax+0],xmm0)
a2(movdqa [rax+16],xmm1)
a2(movdqa [rax+32],xmm2)
a2(movdqa [rax+48],xmm3)
aj(jne scrypt_ChunkMix_ssse3_loop)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_ssse3)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_SSSE3) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_CHACHA_INCLUDED))
#define SCRYPT_CHACHA_SSSE3
static void NOINLINE asm_calling_convention
scrypt_ChunkMix_ssse3(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x6,t0,t1,t2,t3;
const xmmi x4 = *(xmmi *)&ssse3_rotl16_32bit, x5 = *(xmmi *)&ssse3_rotl8_32bit;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
for (rounds = 8; rounds; rounds -= 2) {
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x3 = _mm_shuffle_epi8(x3, x4);
x2 = _mm_add_epi32(x2, x3);
x1 = _mm_xor_si128(x1, x2);
x6 = x1;
x1 = _mm_or_si128(_mm_slli_epi32(x1, 12), _mm_srli_epi32(x6, 20));
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x3 = _mm_shuffle_epi8(x3, x5);
x0 = _mm_shuffle_epi32(x0, 0x93);
x2 = _mm_add_epi32(x2, x3);
x3 = _mm_shuffle_epi32(x3, 0x4e);
x1 = _mm_xor_si128(x1, x2);
x2 = _mm_shuffle_epi32(x2, 0x39);
x6 = x1;
x1 = _mm_or_si128(_mm_slli_epi32(x1, 7), _mm_srli_epi32(x6, 25));
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x3 = _mm_shuffle_epi8(x3, x4);
x2 = _mm_add_epi32(x2, x3);
x1 = _mm_xor_si128(x1, x2);
x6 = x1;
x1 = _mm_or_si128(_mm_slli_epi32(x1, 12), _mm_srli_epi32(x6, 20));
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x3 = _mm_shuffle_epi8(x3, x5);
x0 = _mm_shuffle_epi32(x0, 0x39);
x2 = _mm_add_epi32(x2, x3);
x3 = _mm_shuffle_epi32(x3, 0x4e);
x1 = _mm_xor_si128(x1, x2);
x2 = _mm_shuffle_epi32(x2, 0x93);
x6 = x1;
x1 = _mm_or_si128(_mm_slli_epi32(x1, 7), _mm_srli_epi32(x6, 25));
}
x0 = _mm_add_epi32(x0, t0);
x1 = _mm_add_epi32(x1, t1);
x2 = _mm_add_epi32(x2, t2);
x3 = _mm_add_epi32(x3, t3);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
}
}
#endif
#if defined(SCRYPT_CHACHA_SSSE3)
#undef SCRYPT_MIX
#define SCRYPT_MIX "ChaCha/8-SSSE3"
#undef SCRYPT_CHACHA_INCLUDED
#define SCRYPT_CHACHA_INCLUDED
#endif

View file

@ -0,0 +1,315 @@
/* x86 */
#if defined(X86ASM_XOP) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_CHACHA_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_CHACHA_XOP
asm_naked_fn_proto(void, scrypt_ChunkMix_xop)(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_xop)
a1(push ebx)
a1(push edi)
a1(push esi)
a1(push ebp)
a2(mov ebp,esp)
a2(mov edi,[ebp+20])
a2(mov esi,[ebp+24])
a2(mov eax,[ebp+28])
a2(mov ebx,[ebp+32])
a2(sub esp,64)
a2(and esp,~63)
a2(lea edx,[ebx*2])
a2(shl edx,6)
a2(lea ecx,[edx-64])
a2(and eax, eax)
a2(vmovdqa xmm0,[ecx+esi+0])
a2(vmovdqa xmm1,[ecx+esi+16])
a2(vmovdqa xmm2,[ecx+esi+32])
a2(vmovdqa xmm3,[ecx+esi+48])
aj(jz scrypt_ChunkMix_xop_no_xor1)
a3(vpxor xmm0,xmm0,[ecx+eax+0])
a3(vpxor xmm1,xmm1,[ecx+eax+16])
a3(vpxor xmm2,xmm2,[ecx+eax+32])
a3(vpxor xmm3,xmm3,[ecx+eax+48])
a1(scrypt_ChunkMix_xop_no_xor1:)
a2(xor ecx,ecx)
a2(xor ebx,ebx)
a1(scrypt_ChunkMix_xop_loop:)
a2(and eax, eax)
a3(vpxor xmm0,xmm0,[esi+ecx+0])
a3(vpxor xmm1,xmm1,[esi+ecx+16])
a3(vpxor xmm2,xmm2,[esi+ecx+32])
a3(vpxor xmm3,xmm3,[esi+ecx+48])
aj(jz scrypt_ChunkMix_xop_no_xor2)
a3(vpxor xmm0,xmm0,[eax+ecx+0])
a3(vpxor xmm1,xmm1,[eax+ecx+16])
a3(vpxor xmm2,xmm2,[eax+ecx+32])
a3(vpxor xmm3,xmm3,[eax+ecx+48])
a1(scrypt_ChunkMix_xop_no_xor2:)
a2(vmovdqa xmm4,xmm0)
a2(vmovdqa xmm5,xmm1)
a2(vmovdqa xmm6,xmm2)
a2(vmovdqa xmm7,xmm3)
a2(mov eax,8)
a1(scrypt_chacha_xop_loop: )
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpxor xmm3,xmm3,xmm0)
a3(vprotd xmm3,xmm3,16)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpxor xmm1,xmm1,xmm2)
a3(vprotd xmm1,xmm1,12)
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpxor xmm3,xmm3,xmm0)
a3(vprotd xmm3,xmm3,8)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpshufd xmm0,xmm0,0x93)
a3(vpxor xmm1,xmm1,xmm2)
a3(vprotd xmm1,xmm1,7)
a3(vpshufd xmm3,xmm3,0x4e)
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpshufd xmm2,xmm2,0x39)
a3(vpxor xmm3,xmm3,xmm0)
a3(vprotd xmm3,xmm3,16)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpxor xmm1,xmm1,xmm2)
a3(vprotd xmm1,xmm1,12)
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpxor xmm3,xmm3,xmm0)
a3(vprotd xmm3,xmm3,8)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpxor xmm1,xmm1,xmm2)
a3(vpshufd xmm0,xmm0,0x39)
a3(vprotd xmm1,xmm1,7)
a3(pshufd xmm3,xmm3,0x4e)
a3(pshufd xmm2,xmm2,0x93)
a2(sub eax,2)
aj(ja scrypt_chacha_xop_loop)
a3(vpaddd xmm0,xmm0,xmm4)
a3(vpaddd xmm1,xmm1,xmm5)
a3(vpaddd xmm2,xmm2,xmm6)
a3(vpaddd xmm3,xmm3,xmm7)
a2(lea eax,[ebx+ecx])
a2(xor ebx,edx)
a2(and eax,~0x7f)
a2(add ecx,64)
a2(shr eax,1)
a2(add eax, edi)
a2(cmp ecx,edx)
a2(vmovdqa [eax+0],xmm0)
a2(vmovdqa [eax+16],xmm1)
a2(vmovdqa [eax+32],xmm2)
a2(vmovdqa [eax+48],xmm3)
a2(mov eax,[ebp+28])
aj(jne scrypt_ChunkMix_xop_loop)
a2(mov esp,ebp)
a1(pop ebp)
a1(pop esi)
a1(pop edi)
a1(pop ebx)
aret(16)
asm_naked_fn_end(scrypt_ChunkMix_xop)
#endif
/* x64 */
#if defined(X86_64ASM_XOP) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_CHACHA_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_CHACHA_XOP
asm_naked_fn_proto(void, scrypt_ChunkMix_xop)(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_xop)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,6)
a2(lea r9,[rcx-64])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(vmovdqa xmm0,[rax+0])
a2(vmovdqa xmm1,[rax+16])
a2(vmovdqa xmm2,[rax+32])
a2(vmovdqa xmm3,[rax+48])
aj(jz scrypt_ChunkMix_xop_no_xor1)
a3(vpxor xmm0,xmm0,[r9+0])
a3(vpxor xmm1,xmm1,[r9+16])
a3(vpxor xmm2,xmm2,[r9+32])
a3(vpxor xmm3,xmm3,[r9+48])
a1(scrypt_ChunkMix_xop_no_xor1:)
a2(xor r8,r8)
a2(xor r9,r9)
a1(scrypt_ChunkMix_xop_loop:)
a2(and rdx, rdx)
a3(vpxor xmm0,xmm0,[rsi+r9+0])
a3(vpxor xmm1,xmm1,[rsi+r9+16])
a3(vpxor xmm2,xmm2,[rsi+r9+32])
a3(vpxor xmm3,xmm3,[rsi+r9+48])
aj(jz scrypt_ChunkMix_xop_no_xor2)
a3(vpxor xmm0,xmm0,[rdx+r9+0])
a3(vpxor xmm1,xmm1,[rdx+r9+16])
a3(vpxor xmm2,xmm2,[rdx+r9+32])
a3(vpxor xmm3,xmm3,[rdx+r9+48])
a1(scrypt_ChunkMix_xop_no_xor2:)
a2(vmovdqa xmm4,xmm0)
a2(vmovdqa xmm5,xmm1)
a2(vmovdqa xmm6,xmm2)
a2(vmovdqa xmm7,xmm3)
a2(mov rax,8)
a1(scrypt_chacha_xop_loop: )
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpxor xmm3,xmm3,xmm0)
a3(vprotd xmm3,xmm3,16)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpxor xmm1,xmm1,xmm2)
a3(vprotd xmm1,xmm1,12)
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpxor xmm3,xmm3,xmm0)
a3(vprotd xmm3,xmm3,8)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpshufd xmm0,xmm0,0x93)
a3(vpxor xmm1,xmm1,xmm2)
a3(vprotd xmm1,xmm1,7)
a3(vpshufd xmm3,xmm3,0x4e)
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpshufd xmm2,xmm2,0x39)
a3(vpxor xmm3,xmm3,xmm0)
a3(vprotd xmm3,xmm3,16)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpxor xmm1,xmm1,xmm2)
a3(vprotd xmm1,xmm1,12)
a3(vpaddd xmm0,xmm0,xmm1)
a3(vpxor xmm3,xmm3,xmm0)
a3(vprotd xmm3,xmm3,8)
a3(vpaddd xmm2,xmm2,xmm3)
a3(vpxor xmm1,xmm1,xmm2)
a3(vpshufd xmm0,xmm0,0x39)
a3(vprotd xmm1,xmm1,7)
a3(pshufd xmm3,xmm3,0x4e)
a3(pshufd xmm2,xmm2,0x93)
a2(sub rax,2)
aj(ja scrypt_chacha_xop_loop)
a3(vpaddd xmm0,xmm0,xmm4)
a3(vpaddd xmm1,xmm1,xmm5)
a3(vpaddd xmm2,xmm2,xmm6)
a3(vpaddd xmm3,xmm3,xmm7)
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0x7f)
a2(add r9,64)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(vmovdqa [rax+0],xmm0)
a2(vmovdqa [rax+16],xmm1)
a2(vmovdqa [rax+32],xmm2)
a2(vmovdqa [rax+48],xmm3)
aj(jne scrypt_ChunkMix_xop_loop)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_xop)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_XOP) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_CHACHA_INCLUDED))
#define SCRYPT_CHACHA_XOP
static void asm_calling_convention NOINLINE
scrypt_ChunkMix_xop(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x6,t0,t1,t2,t3;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
for (rounds = 8; rounds; rounds -= 2) {
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x3 = _mm_roti_epi32(x3, 16);
x2 = _mm_add_epi32(x2, x3);
x1 = _mm_xor_si128(x1, x2);
x1 = _mm_roti_epi32(x1, 12);
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x3 = _mm_roti_epi32(x3, 8);
x2 = _mm_add_epi32(x2, x3);
x0 = _mm_shuffle_epi32(x0, 0x93);
x1 = _mm_xor_si128(x1, x2);
x1 = _mm_roti_epi32(x1, 7);
x3 = _mm_shuffle_epi32(x3, 0x4e);
x0 = _mm_add_epi32(x0, x1);
x2 = _mm_shuffle_epi32(x2, 0x39);
x3 = _mm_xor_si128(x3, x0);
x3 = _mm_roti_epi32(x3, 16);
x2 = _mm_add_epi32(x2, x3);
x1 = _mm_xor_si128(x1, x2);
x1 = _mm_roti_epi32(x1, 12);
x0 = _mm_add_epi32(x0, x1);
x3 = _mm_xor_si128(x3, x0);
x3 = _mm_roti_epi32(x3, 8);
x2 = _mm_add_epi32(x2, x3);
x1 = _mm_xor_si128(x1, x2);
x0 = _mm_shuffle_epi32(x0, 0x39);
x1 = _mm_roti_epi32(x1, 7);
x3 = _mm_shuffle_epi32(x3, 0x4e);
x2 = _mm_shuffle_epi32(x2, 0x93);
}
x0 = _mm_add_epi32(x0, t0);
x1 = _mm_add_epi32(x1, t1);
x2 = _mm_add_epi32(x2, t2);
x3 = _mm_add_epi32(x3, t3);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
}
}
#endif
#if defined(SCRYPT_CHACHA_XOP)
#undef SCRYPT_MIX
#define SCRYPT_MIX "ChaCha/8-XOP"
#undef SCRYPT_CHACHA_INCLUDED
#define SCRYPT_CHACHA_INCLUDED
#endif

View file

@ -0,0 +1,69 @@
#if !defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_CHACHA_INCLUDED)
#undef SCRYPT_MIX
#define SCRYPT_MIX "ChaCha20/8 Ref"
#undef SCRYPT_CHACHA_INCLUDED
#define SCRYPT_CHACHA_INCLUDED
#define SCRYPT_CHACHA_BASIC
static void
chacha_core_basic(uint32_t state[16]) {
size_t rounds = 8;
uint32_t x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,t;
x0 = state[0];
x1 = state[1];
x2 = state[2];
x3 = state[3];
x4 = state[4];
x5 = state[5];
x6 = state[6];
x7 = state[7];
x8 = state[8];
x9 = state[9];
x10 = state[10];
x11 = state[11];
x12 = state[12];
x13 = state[13];
x14 = state[14];
x15 = state[15];
#define quarter(a,b,c,d) \
a += b; t = d^a; d = ROTL32(t,16); \
c += d; t = b^c; b = ROTL32(t,12); \
a += b; t = d^a; d = ROTL32(t, 8); \
c += d; t = b^c; b = ROTL32(t, 7);
for (; rounds; rounds -= 2) {
quarter( x0, x4, x8,x12)
quarter( x1, x5, x9,x13)
quarter( x2, x6,x10,x14)
quarter( x3, x7,x11,x15)
quarter( x0, x5,x10,x15)
quarter( x1, x6,x11,x12)
quarter( x2, x7, x8,x13)
quarter( x3, x4, x9,x14)
}
state[0] += x0;
state[1] += x1;
state[2] += x2;
state[3] += x3;
state[4] += x4;
state[5] += x5;
state[6] += x6;
state[7] += x7;
state[8] += x8;
state[9] += x9;
state[10] += x10;
state[11] += x11;
state[12] += x12;
state[13] += x13;
state[14] += x14;
state[15] += x15;
#undef quarter
}
#endif

View file

@ -0,0 +1,381 @@
/* x86 */
#if defined(X86ASM_AVX) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA_AVX
asm_naked_fn_proto(void, scrypt_ChunkMix_avx)(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_avx)
a1(push ebx)
a1(push edi)
a1(push esi)
a1(push ebp)
a2(mov ebp,esp)
a2(mov edi,[ebp+20])
a2(mov esi,[ebp+24])
a2(mov eax,[ebp+28])
a2(mov ebx,[ebp+32])
a2(sub esp,32)
a2(and esp,~63)
a2(lea edx,[ebx*2])
a2(shl edx,6)
a2(lea ecx,[edx-64])
a2(and eax, eax)
a2(movdqa xmm0,[ecx+esi+0])
a2(movdqa xmm1,[ecx+esi+16])
a2(movdqa xmm2,[ecx+esi+32])
a2(movdqa xmm3,[ecx+esi+48])
aj(jz scrypt_ChunkMix_avx_no_xor1)
a3(vpxor xmm0,xmm0,[ecx+eax+0])
a3(vpxor xmm1,xmm1,[ecx+eax+16])
a3(vpxor xmm2,xmm2,[ecx+eax+32])
a3(vpxor xmm3,xmm3,[ecx+eax+48])
a1(scrypt_ChunkMix_avx_no_xor1:)
a2(xor ecx,ecx)
a2(xor ebx,ebx)
a1(scrypt_ChunkMix_avx_loop:)
a2(and eax, eax)
a3(vpxor xmm0,xmm0,[esi+ecx+0])
a3(vpxor xmm1,xmm1,[esi+ecx+16])
a3(vpxor xmm2,xmm2,[esi+ecx+32])
a3(vpxor xmm3,xmm3,[esi+ecx+48])
aj(jz scrypt_ChunkMix_avx_no_xor2)
a3(vpxor xmm0,xmm0,[eax+ecx+0])
a3(vpxor xmm1,xmm1,[eax+ecx+16])
a3(vpxor xmm2,xmm2,[eax+ecx+32])
a3(vpxor xmm3,xmm3,[eax+ecx+48])
a1(scrypt_ChunkMix_avx_no_xor2:)
a2(vmovdqa [esp+0],xmm0)
a2(vmovdqa [esp+16],xmm1)
a2(vmovdqa xmm6,xmm2)
a2(vmovdqa xmm7,xmm3)
a2(mov eax,8)
a1(scrypt_salsa_avx_loop: )
a3(vpaddd xmm4, xmm1, xmm0)
a3(vpsrld xmm5, xmm4, 25)
a3(vpslld xmm4, xmm4, 7)
a3(vpxor xmm3, xmm3, xmm5)
a3(vpxor xmm3, xmm3, xmm4)
a3(vpaddd xmm4, xmm0, xmm3)
a3(vpsrld xmm5, xmm4, 23)
a3(vpslld xmm4, xmm4, 9)
a3(vpxor xmm2, xmm2, xmm5)
a3(vpxor xmm2, xmm2, xmm4)
a3(vpaddd xmm4, xmm3, xmm2)
a3(vpsrld xmm5, xmm4, 19)
a3(vpslld xmm4, xmm4, 13)
a3(vpxor xmm1, xmm1, xmm5)
a3(vpshufd xmm3, xmm3, 0x93)
a3(vpxor xmm1, xmm1, xmm4)
a3(vpaddd xmm4, xmm2, xmm1)
a3(vpsrld xmm5, xmm4, 14)
a3(vpslld xmm4, xmm4, 18)
a3(vpxor xmm0, xmm0, xmm5)
a3(vpshufd xmm2, xmm2, 0x4e)
a3(vpxor xmm0, xmm0, xmm4)
a3(vpaddd xmm4, xmm3, xmm0)
a3(vpshufd xmm1, xmm1, 0x39)
a3(vpsrld xmm5, xmm4, 25)
a3(vpslld xmm4, xmm4, 7)
a3(vpxor xmm1, xmm1, xmm5)
a3(vpxor xmm1, xmm1, xmm4)
a3(vpaddd xmm4, xmm0, xmm1)
a3(vpsrld xmm5, xmm4, 23)
a3(vpslld xmm4, xmm4, 9)
a3(vpxor xmm2, xmm2, xmm5)
a3(vpxor xmm2, xmm2, xmm4)
a3(vpaddd xmm4, xmm1, xmm2)
a3(vpsrld xmm5, xmm4, 19)
a3(vpslld xmm4, xmm4, 13)
a3(vpxor xmm3, xmm3, xmm5)
a3(vpshufd xmm1, xmm1, 0x93)
a3(vpxor xmm3, xmm3, xmm4)
a3(vpaddd xmm4, xmm2, xmm3)
a3(vpsrld xmm5, xmm4, 14)
a3(vpslld xmm4, xmm4, 18)
a3(vpxor xmm0, xmm0, xmm5)
a3(vpshufd xmm2, xmm2, 0x4e)
a3(vpxor xmm0, xmm0, xmm4)
a3(vpshufd xmm3, xmm3, 0x39)
a2(sub eax, 2)
aj(ja scrypt_salsa_avx_loop)
a3(vpaddd xmm0,xmm0,[esp+0])
a3(vpaddd xmm1,xmm1,[esp+16])
a3(vpaddd xmm2,xmm2,xmm6)
a3(vpaddd xmm3,xmm3,xmm7)
a2(lea eax,[ebx+ecx])
a2(xor ebx,edx)
a2(and eax,~0x7f)
a2(add ecx,64)
a2(shr eax,1)
a2(add eax, edi)
a2(cmp ecx,edx)
a2(vmovdqa [eax+0],xmm0)
a2(vmovdqa [eax+16],xmm1)
a2(vmovdqa [eax+32],xmm2)
a2(vmovdqa [eax+48],xmm3)
a2(mov eax,[ebp+28])
aj(jne scrypt_ChunkMix_avx_loop)
a2(mov esp,ebp)
a1(pop ebp)
a1(pop esi)
a1(pop edi)
a1(pop ebx)
aret(16)
asm_naked_fn_end(scrypt_ChunkMix_avx)
#endif
/* x64 */
#if defined(X86_64ASM_AVX) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA_AVX
asm_naked_fn_proto(void, scrypt_ChunkMix_avx)(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_avx)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,6)
a2(lea r9,[rcx-64])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(vmovdqa xmm0,[rax+0])
a2(vmovdqa xmm1,[rax+16])
a2(vmovdqa xmm2,[rax+32])
a2(vmovdqa xmm3,[rax+48])
aj(jz scrypt_ChunkMix_avx_no_xor1)
a3(vpxor xmm0,xmm0,[r9+0])
a3(vpxor xmm1,xmm1,[r9+16])
a3(vpxor xmm2,xmm2,[r9+32])
a3(vpxor xmm3,xmm3,[r9+48])
a1(scrypt_ChunkMix_avx_no_xor1:)
a2(xor r9,r9)
a2(xor r8,r8)
a1(scrypt_ChunkMix_avx_loop:)
a2(and rdx, rdx)
a3(vpxor xmm0,xmm0,[rsi+r9+0])
a3(vpxor xmm1,xmm1,[rsi+r9+16])
a3(vpxor xmm2,xmm2,[rsi+r9+32])
a3(vpxor xmm3,xmm3,[rsi+r9+48])
aj(jz scrypt_ChunkMix_avx_no_xor2)
a3(vpxor xmm0,xmm0,[rdx+r9+0])
a3(vpxor xmm1,xmm1,[rdx+r9+16])
a3(vpxor xmm2,xmm2,[rdx+r9+32])
a3(vpxor xmm3,xmm3,[rdx+r9+48])
a1(scrypt_ChunkMix_avx_no_xor2:)
a2(vmovdqa xmm8,xmm0)
a2(vmovdqa xmm9,xmm1)
a2(vmovdqa xmm10,xmm2)
a2(vmovdqa xmm11,xmm3)
a2(mov rax,8)
a1(scrypt_salsa_avx_loop: )
a3(vpaddd xmm4, xmm1, xmm0)
a3(vpsrld xmm5, xmm4, 25)
a3(vpslld xmm4, xmm4, 7)
a3(vpxor xmm3, xmm3, xmm5)
a3(vpxor xmm3, xmm3, xmm4)
a3(vpaddd xmm4, xmm0, xmm3)
a3(vpsrld xmm5, xmm4, 23)
a3(vpslld xmm4, xmm4, 9)
a3(vpxor xmm2, xmm2, xmm5)
a3(vpxor xmm2, xmm2, xmm4)
a3(vpaddd xmm4, xmm3, xmm2)
a3(vpsrld xmm5, xmm4, 19)
a3(vpslld xmm4, xmm4, 13)
a3(vpxor xmm1, xmm1, xmm5)
a3(vpshufd xmm3, xmm3, 0x93)
a3(vpxor xmm1, xmm1, xmm4)
a3(vpaddd xmm4, xmm2, xmm1)
a3(vpsrld xmm5, xmm4, 14)
a3(vpslld xmm4, xmm4, 18)
a3(vpxor xmm0, xmm0, xmm5)
a3(vpshufd xmm2, xmm2, 0x4e)
a3(vpxor xmm0, xmm0, xmm4)
a3(vpaddd xmm4, xmm3, xmm0)
a3(vpshufd xmm1, xmm1, 0x39)
a3(vpsrld xmm5, xmm4, 25)
a3(vpslld xmm4, xmm4, 7)
a3(vpxor xmm1, xmm1, xmm5)
a3(vpxor xmm1, xmm1, xmm4)
a3(vpaddd xmm4, xmm0, xmm1)
a3(vpsrld xmm5, xmm4, 23)
a3(vpslld xmm4, xmm4, 9)
a3(vpxor xmm2, xmm2, xmm5)
a3(vpxor xmm2, xmm2, xmm4)
a3(vpaddd xmm4, xmm1, xmm2)
a3(vpsrld xmm5, xmm4, 19)
a3(vpslld xmm4, xmm4, 13)
a3(vpxor xmm3, xmm3, xmm5)
a3(vpshufd xmm1, xmm1, 0x93)
a3(vpxor xmm3, xmm3, xmm4)
a3(vpaddd xmm4, xmm2, xmm3)
a3(vpsrld xmm5, xmm4, 14)
a3(vpslld xmm4, xmm4, 18)
a3(vpxor xmm0, xmm0, xmm5)
a3(vpshufd xmm2, xmm2, 0x4e)
a3(vpxor xmm0, xmm0, xmm4)
a3(vpshufd xmm3, xmm3, 0x39)
a2(sub rax, 2)
aj(ja scrypt_salsa_avx_loop)
a3(vpaddd xmm0,xmm0,xmm8)
a3(vpaddd xmm1,xmm1,xmm9)
a3(vpaddd xmm2,xmm2,xmm10)
a3(vpaddd xmm3,xmm3,xmm11)
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0x7f)
a2(add r9,64)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(vmovdqa [rax+0],xmm0)
a2(vmovdqa [rax+16],xmm1)
a2(vmovdqa [rax+32],xmm2)
a2(vmovdqa [rax+48],xmm3)
aj(jne scrypt_ChunkMix_avx_loop)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_avx)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_AVX) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA_INCLUDED))
#define SCRYPT_SALSA_AVX
static void asm_calling_convention NOINLINE
scrypt_ChunkMix_avx(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x4,x5,t0,t1,t2,t3;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
for (rounds = 8; rounds; rounds -= 2) {
x4 = x1;
x4 = _mm_add_epi32(x4, x0);
x5 = x4;
x4 = _mm_slli_epi32(x4, 7);
x5 = _mm_srli_epi32(x5, 25);
x3 = _mm_xor_si128(x3, x4);
x4 = x0;
x3 = _mm_xor_si128(x3, x5);
x4 = _mm_add_epi32(x4, x3);
x5 = x4;
x4 = _mm_slli_epi32(x4, 9);
x5 = _mm_srli_epi32(x5, 23);
x2 = _mm_xor_si128(x2, x4);
x4 = x3;
x2 = _mm_xor_si128(x2, x5);
x3 = _mm_shuffle_epi32(x3, 0x93);
x4 = _mm_add_epi32(x4, x2);
x5 = x4;
x4 = _mm_slli_epi32(x4, 13);
x5 = _mm_srli_epi32(x5, 19);
x1 = _mm_xor_si128(x1, x4);
x4 = x2;
x1 = _mm_xor_si128(x1, x5);
x2 = _mm_shuffle_epi32(x2, 0x4e);
x4 = _mm_add_epi32(x4, x1);
x5 = x4;
x4 = _mm_slli_epi32(x4, 18);
x5 = _mm_srli_epi32(x5, 14);
x0 = _mm_xor_si128(x0, x4);
x4 = x3;
x0 = _mm_xor_si128(x0, x5);
x1 = _mm_shuffle_epi32(x1, 0x39);
x4 = _mm_add_epi32(x4, x0);
x5 = x4;
x4 = _mm_slli_epi32(x4, 7);
x5 = _mm_srli_epi32(x5, 25);
x1 = _mm_xor_si128(x1, x4);
x4 = x0;
x1 = _mm_xor_si128(x1, x5);
x4 = _mm_add_epi32(x4, x1);
x5 = x4;
x4 = _mm_slli_epi32(x4, 9);
x5 = _mm_srli_epi32(x5, 23);
x2 = _mm_xor_si128(x2, x4);
x4 = x1;
x2 = _mm_xor_si128(x2, x5);
x1 = _mm_shuffle_epi32(x1, 0x93);
x4 = _mm_add_epi32(x4, x2);
x5 = x4;
x4 = _mm_slli_epi32(x4, 13);
x5 = _mm_srli_epi32(x5, 19);
x3 = _mm_xor_si128(x3, x4);
x4 = x2;
x3 = _mm_xor_si128(x3, x5);
x2 = _mm_shuffle_epi32(x2, 0x4e);
x4 = _mm_add_epi32(x4, x3);
x5 = x4;
x4 = _mm_slli_epi32(x4, 18);
x5 = _mm_srli_epi32(x5, 14);
x0 = _mm_xor_si128(x0, x4);
x3 = _mm_shuffle_epi32(x3, 0x39);
x0 = _mm_xor_si128(x0, x5);
}
x0 = _mm_add_epi32(x0, t0);
x1 = _mm_add_epi32(x1, t1);
x2 = _mm_add_epi32(x2, t2);
x3 = _mm_add_epi32(x3, t3);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
}
}
#endif
#if defined(SCRYPT_SALSA_AVX)
/* uses salsa_core_tangle_sse2 */
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa/8-AVX"
#undef SCRYPT_SALSA_INCLUDED
#define SCRYPT_SALSA_INCLUDED
#endif

View file

@ -0,0 +1,443 @@
/* x86 */
#if defined(X86ASM_SSE2) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA_SSE2
asm_naked_fn_proto(void, scrypt_ChunkMix_sse2)(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_sse2)
a1(push ebx)
a1(push edi)
a1(push esi)
a1(push ebp)
a2(mov ebp,esp)
a2(mov edi,[ebp+20])
a2(mov esi,[ebp+24])
a2(mov eax,[ebp+28])
a2(mov ebx,[ebp+32])
a2(sub esp,32)
a2(and esp,~63)
a2(lea edx,[ebx*2])
a2(shl edx,6)
a2(lea ecx,[edx-64])
a2(and eax, eax)
a2(movdqa xmm0,[ecx+esi+0])
a2(movdqa xmm1,[ecx+esi+16])
a2(movdqa xmm2,[ecx+esi+32])
a2(movdqa xmm3,[ecx+esi+48])
aj(jz scrypt_ChunkMix_sse2_no_xor1)
a2(pxor xmm0,[ecx+eax+0])
a2(pxor xmm1,[ecx+eax+16])
a2(pxor xmm2,[ecx+eax+32])
a2(pxor xmm3,[ecx+eax+48])
a1(scrypt_ChunkMix_sse2_no_xor1:)
a2(xor ecx,ecx)
a2(xor ebx,ebx)
a1(scrypt_ChunkMix_sse2_loop:)
a2(and eax, eax)
a2(pxor xmm0,[esi+ecx+0])
a2(pxor xmm1,[esi+ecx+16])
a2(pxor xmm2,[esi+ecx+32])
a2(pxor xmm3,[esi+ecx+48])
aj(jz scrypt_ChunkMix_sse2_no_xor2)
a2(pxor xmm0,[eax+ecx+0])
a2(pxor xmm1,[eax+ecx+16])
a2(pxor xmm2,[eax+ecx+32])
a2(pxor xmm3,[eax+ecx+48])
a1(scrypt_ChunkMix_sse2_no_xor2:)
a2(movdqa [esp+0],xmm0)
a2(movdqa [esp+16],xmm1)
a2(movdqa xmm6,xmm2)
a2(movdqa xmm7,xmm3)
a2(mov eax,8)
a1(scrypt_salsa_sse2_loop: )
a2(movdqa xmm4, xmm1)
a2(paddd xmm4, xmm0)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 7)
a2(psrld xmm5, 25)
a2(pxor xmm3, xmm4)
a2(movdqa xmm4, xmm0)
a2(pxor xmm3, xmm5)
a2(paddd xmm4, xmm3)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 9)
a2(psrld xmm5, 23)
a2(pxor xmm2, xmm4)
a2(movdqa xmm4, xmm3)
a2(pxor xmm2, xmm5)
a3(pshufd xmm3, xmm3, 0x93)
a2(paddd xmm4, xmm2)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 13)
a2(psrld xmm5, 19)
a2(pxor xmm1, xmm4)
a2(movdqa xmm4, xmm2)
a2(pxor xmm1, xmm5)
a3(pshufd xmm2, xmm2, 0x4e)
a2(paddd xmm4, xmm1)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 18)
a2(psrld xmm5, 14)
a2(pxor xmm0, xmm4)
a2(movdqa xmm4, xmm3)
a2(pxor xmm0, xmm5)
a3(pshufd xmm1, xmm1, 0x39)
a2(paddd xmm4, xmm0)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 7)
a2(psrld xmm5, 25)
a2(pxor xmm1, xmm4)
a2(movdqa xmm4, xmm0)
a2(pxor xmm1, xmm5)
a2(paddd xmm4, xmm1)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 9)
a2(psrld xmm5, 23)
a2(pxor xmm2, xmm4)
a2(movdqa xmm4, xmm1)
a2(pxor xmm2, xmm5)
a3(pshufd xmm1, xmm1, 0x93)
a2(paddd xmm4, xmm2)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 13)
a2(psrld xmm5, 19)
a2(pxor xmm3, xmm4)
a2(movdqa xmm4, xmm2)
a2(pxor xmm3, xmm5)
a3(pshufd xmm2, xmm2, 0x4e)
a2(paddd xmm4, xmm3)
a2(sub eax, 2)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 18)
a2(psrld xmm5, 14)
a2(pxor xmm0, xmm4)
a3(pshufd xmm3, xmm3, 0x39)
a2(pxor xmm0, xmm5)
aj(ja scrypt_salsa_sse2_loop)
a2(paddd xmm0,[esp+0])
a2(paddd xmm1,[esp+16])
a2(paddd xmm2,xmm6)
a2(paddd xmm3,xmm7)
a2(lea eax,[ebx+ecx])
a2(xor ebx,edx)
a2(and eax,~0x7f)
a2(add ecx,64)
a2(shr eax,1)
a2(add eax, edi)
a2(cmp ecx,edx)
a2(movdqa [eax+0],xmm0)
a2(movdqa [eax+16],xmm1)
a2(movdqa [eax+32],xmm2)
a2(movdqa [eax+48],xmm3)
a2(mov eax,[ebp+28])
aj(jne scrypt_ChunkMix_sse2_loop)
a2(mov esp,ebp)
a1(pop ebp)
a1(pop esi)
a1(pop edi)
a1(pop ebx)
aret(16)
asm_naked_fn_end(scrypt_ChunkMix_sse2)
#endif
/* x64 */
#if defined(X86_64ASM_SSE2) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA_SSE2
asm_naked_fn_proto(void, scrypt_ChunkMix_sse2)(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_sse2)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,6)
a2(lea r9,[rcx-64])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(movdqa xmm0,[rax+0])
a2(movdqa xmm1,[rax+16])
a2(movdqa xmm2,[rax+32])
a2(movdqa xmm3,[rax+48])
aj(jz scrypt_ChunkMix_sse2_no_xor1)
a2(pxor xmm0,[r9+0])
a2(pxor xmm1,[r9+16])
a2(pxor xmm2,[r9+32])
a2(pxor xmm3,[r9+48])
a1(scrypt_ChunkMix_sse2_no_xor1:)
a2(xor r9,r9)
a2(xor r8,r8)
a1(scrypt_ChunkMix_sse2_loop:)
a2(and rdx, rdx)
a2(pxor xmm0,[rsi+r9+0])
a2(pxor xmm1,[rsi+r9+16])
a2(pxor xmm2,[rsi+r9+32])
a2(pxor xmm3,[rsi+r9+48])
aj(jz scrypt_ChunkMix_sse2_no_xor2)
a2(pxor xmm0,[rdx+r9+0])
a2(pxor xmm1,[rdx+r9+16])
a2(pxor xmm2,[rdx+r9+32])
a2(pxor xmm3,[rdx+r9+48])
a1(scrypt_ChunkMix_sse2_no_xor2:)
a2(movdqa xmm8,xmm0)
a2(movdqa xmm9,xmm1)
a2(movdqa xmm10,xmm2)
a2(movdqa xmm11,xmm3)
a2(mov rax,8)
a1(scrypt_salsa_sse2_loop: )
a2(movdqa xmm4, xmm1)
a2(paddd xmm4, xmm0)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 7)
a2(psrld xmm5, 25)
a2(pxor xmm3, xmm4)
a2(movdqa xmm4, xmm0)
a2(pxor xmm3, xmm5)
a2(paddd xmm4, xmm3)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 9)
a2(psrld xmm5, 23)
a2(pxor xmm2, xmm4)
a2(movdqa xmm4, xmm3)
a2(pxor xmm2, xmm5)
a3(pshufd xmm3, xmm3, 0x93)
a2(paddd xmm4, xmm2)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 13)
a2(psrld xmm5, 19)
a2(pxor xmm1, xmm4)
a2(movdqa xmm4, xmm2)
a2(pxor xmm1, xmm5)
a3(pshufd xmm2, xmm2, 0x4e)
a2(paddd xmm4, xmm1)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 18)
a2(psrld xmm5, 14)
a2(pxor xmm0, xmm4)
a2(movdqa xmm4, xmm3)
a2(pxor xmm0, xmm5)
a3(pshufd xmm1, xmm1, 0x39)
a2(paddd xmm4, xmm0)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 7)
a2(psrld xmm5, 25)
a2(pxor xmm1, xmm4)
a2(movdqa xmm4, xmm0)
a2(pxor xmm1, xmm5)
a2(paddd xmm4, xmm1)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 9)
a2(psrld xmm5, 23)
a2(pxor xmm2, xmm4)
a2(movdqa xmm4, xmm1)
a2(pxor xmm2, xmm5)
a3(pshufd xmm1, xmm1, 0x93)
a2(paddd xmm4, xmm2)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 13)
a2(psrld xmm5, 19)
a2(pxor xmm3, xmm4)
a2(movdqa xmm4, xmm2)
a2(pxor xmm3, xmm5)
a3(pshufd xmm2, xmm2, 0x4e)
a2(paddd xmm4, xmm3)
a2(sub rax, 2)
a2(movdqa xmm5, xmm4)
a2(pslld xmm4, 18)
a2(psrld xmm5, 14)
a2(pxor xmm0, xmm4)
a3(pshufd xmm3, xmm3, 0x39)
a2(pxor xmm0, xmm5)
aj(ja scrypt_salsa_sse2_loop)
a2(paddd xmm0,xmm8)
a2(paddd xmm1,xmm9)
a2(paddd xmm2,xmm10)
a2(paddd xmm3,xmm11)
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0x7f)
a2(add r9,64)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(movdqa [rax+0],xmm0)
a2(movdqa [rax+16],xmm1)
a2(movdqa [rax+32],xmm2)
a2(movdqa [rax+48],xmm3)
aj(jne scrypt_ChunkMix_sse2_loop)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_sse2)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_SSE2) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA_INCLUDED))
#define SCRYPT_SALSA_SSE2
static void NOINLINE asm_calling_convention
scrypt_ChunkMix_sse2(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x4,x5,t0,t1,t2,t3;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
for (rounds = 8; rounds; rounds -= 2) {
x4 = x1;
x4 = _mm_add_epi32(x4, x0);
x5 = x4;
x4 = _mm_slli_epi32(x4, 7);
x5 = _mm_srli_epi32(x5, 25);
x3 = _mm_xor_si128(x3, x4);
x4 = x0;
x3 = _mm_xor_si128(x3, x5);
x4 = _mm_add_epi32(x4, x3);
x5 = x4;
x4 = _mm_slli_epi32(x4, 9);
x5 = _mm_srli_epi32(x5, 23);
x2 = _mm_xor_si128(x2, x4);
x4 = x3;
x2 = _mm_xor_si128(x2, x5);
x3 = _mm_shuffle_epi32(x3, 0x93);
x4 = _mm_add_epi32(x4, x2);
x5 = x4;
x4 = _mm_slli_epi32(x4, 13);
x5 = _mm_srli_epi32(x5, 19);
x1 = _mm_xor_si128(x1, x4);
x4 = x2;
x1 = _mm_xor_si128(x1, x5);
x2 = _mm_shuffle_epi32(x2, 0x4e);
x4 = _mm_add_epi32(x4, x1);
x5 = x4;
x4 = _mm_slli_epi32(x4, 18);
x5 = _mm_srli_epi32(x5, 14);
x0 = _mm_xor_si128(x0, x4);
x4 = x3;
x0 = _mm_xor_si128(x0, x5);
x1 = _mm_shuffle_epi32(x1, 0x39);
x4 = _mm_add_epi32(x4, x0);
x5 = x4;
x4 = _mm_slli_epi32(x4, 7);
x5 = _mm_srli_epi32(x5, 25);
x1 = _mm_xor_si128(x1, x4);
x4 = x0;
x1 = _mm_xor_si128(x1, x5);
x4 = _mm_add_epi32(x4, x1);
x5 = x4;
x4 = _mm_slli_epi32(x4, 9);
x5 = _mm_srli_epi32(x5, 23);
x2 = _mm_xor_si128(x2, x4);
x4 = x1;
x2 = _mm_xor_si128(x2, x5);
x1 = _mm_shuffle_epi32(x1, 0x93);
x4 = _mm_add_epi32(x4, x2);
x5 = x4;
x4 = _mm_slli_epi32(x4, 13);
x5 = _mm_srli_epi32(x5, 19);
x3 = _mm_xor_si128(x3, x4);
x4 = x2;
x3 = _mm_xor_si128(x3, x5);
x2 = _mm_shuffle_epi32(x2, 0x4e);
x4 = _mm_add_epi32(x4, x3);
x5 = x4;
x4 = _mm_slli_epi32(x4, 18);
x5 = _mm_srli_epi32(x5, 14);
x0 = _mm_xor_si128(x0, x4);
x3 = _mm_shuffle_epi32(x3, 0x39);
x0 = _mm_xor_si128(x0, x5);
}
x0 = _mm_add_epi32(x0, t0);
x1 = _mm_add_epi32(x1, t1);
x2 = _mm_add_epi32(x2, t2);
x3 = _mm_add_epi32(x3, t3);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
}
}
#endif
#if defined(SCRYPT_SALSA_SSE2)
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa/8-SSE2"
#undef SCRYPT_SALSA_INCLUDED
#define SCRYPT_SALSA_INCLUDED
#endif
/* used by avx,etc as well */
#if defined(SCRYPT_SALSA_INCLUDED)
/*
Default layout:
0 1 2 3
4 5 6 7
8 9 10 11
12 13 14 15
SSE2 layout:
0 5 10 15
12 1 6 11
8 13 2 7
4 9 14 3
*/
static void asm_calling_convention
salsa_core_tangle_sse2(uint32_t *blocks, size_t count) {
uint32_t t;
while (count--) {
t = blocks[1]; blocks[1] = blocks[5]; blocks[5] = t;
t = blocks[2]; blocks[2] = blocks[10]; blocks[10] = t;
t = blocks[3]; blocks[3] = blocks[15]; blocks[15] = t;
t = blocks[4]; blocks[4] = blocks[12]; blocks[12] = t;
t = blocks[7]; blocks[7] = blocks[11]; blocks[11] = t;
t = blocks[9]; blocks[9] = blocks[13]; blocks[13] = t;
blocks += 16;
}
}
#endif

View file

@ -0,0 +1,317 @@
/* x86 */
#if defined(X86ASM_XOP) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA_XOP
asm_naked_fn_proto(void, scrypt_ChunkMix_xop)(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_xop)
a1(push ebx)
a1(push edi)
a1(push esi)
a1(push ebp)
a2(mov ebp,esp)
a2(mov edi,[ebp+20])
a2(mov esi,[ebp+24])
a2(mov eax,[ebp+28])
a2(mov ebx,[ebp+32])
a2(sub esp,32)
a2(and esp,~63)
a2(lea edx,[ebx*2])
a2(shl edx,6)
a2(lea ecx,[edx-64])
a2(and eax, eax)
a2(movdqa xmm0,[ecx+esi+0])
a2(movdqa xmm1,[ecx+esi+16])
a2(movdqa xmm2,[ecx+esi+32])
a2(movdqa xmm3,[ecx+esi+48])
aj(jz scrypt_ChunkMix_xop_no_xor1)
a3(vpxor xmm0,xmm0,[ecx+eax+0])
a3(vpxor xmm1,xmm1,[ecx+eax+16])
a3(vpxor xmm2,xmm2,[ecx+eax+32])
a3(vpxor xmm3,xmm3,[ecx+eax+48])
a1(scrypt_ChunkMix_xop_no_xor1:)
a2(xor ecx,ecx)
a2(xor ebx,ebx)
a1(scrypt_ChunkMix_xop_loop:)
a2(and eax, eax)
a3(vpxor xmm0,xmm0,[esi+ecx+0])
a3(vpxor xmm1,xmm1,[esi+ecx+16])
a3(vpxor xmm2,xmm2,[esi+ecx+32])
a3(vpxor xmm3,xmm3,[esi+ecx+48])
aj(jz scrypt_ChunkMix_xop_no_xor2)
a3(vpxor xmm0,xmm0,[eax+ecx+0])
a3(vpxor xmm1,xmm1,[eax+ecx+16])
a3(vpxor xmm2,xmm2,[eax+ecx+32])
a3(vpxor xmm3,xmm3,[eax+ecx+48])
a1(scrypt_ChunkMix_xop_no_xor2:)
a2(vmovdqa [esp+0],xmm0)
a2(vmovdqa [esp+16],xmm1)
a2(vmovdqa xmm6,xmm2)
a2(vmovdqa xmm7,xmm3)
a2(mov eax,8)
a1(scrypt_salsa_xop_loop: )
a3(vpaddd xmm4, xmm1, xmm0)
a3(vprotd xmm4, xmm4, 7)
a3(vpxor xmm3, xmm3, xmm4)
a3(vpaddd xmm4, xmm0, xmm3)
a3(vprotd xmm4, xmm4, 9)
a3(vpxor xmm2, xmm2, xmm4)
a3(vpaddd xmm4, xmm3, xmm2)
a3(vprotd xmm4, xmm4, 13)
a3(vpxor xmm1, xmm1, xmm4)
a3(vpaddd xmm4, xmm2, xmm1)
a3(pshufd xmm3, xmm3, 0x93)
a3(vprotd xmm4, xmm4, 18)
a3(pshufd xmm2, xmm2, 0x4e)
a3(vpxor xmm0, xmm0, xmm4)
a3(pshufd xmm1, xmm1, 0x39)
a3(vpaddd xmm4, xmm3, xmm0)
a3(vprotd xmm4, xmm4, 7)
a3(vpxor xmm1, xmm1, xmm4)
a3(vpaddd xmm4, xmm0, xmm1)
a3(vprotd xmm4, xmm4, 9)
a3(vpxor xmm2, xmm2, xmm4)
a3(vpaddd xmm4, xmm1, xmm2)
a3(vprotd xmm4, xmm4, 13)
a3(vpxor xmm3, xmm3, xmm4)
a3(pshufd xmm1, xmm1, 0x93)
a3(vpaddd xmm4, xmm2, xmm3)
a3(pshufd xmm2, xmm2, 0x4e)
a3(vprotd xmm4, xmm4, 18)
a3(pshufd xmm3, xmm3, 0x39)
a3(vpxor xmm0, xmm0, xmm4)
a2(sub eax, 2)
aj(ja scrypt_salsa_xop_loop)
a3(vpaddd xmm0,xmm0,[esp+0])
a3(vpaddd xmm1,xmm1,[esp+16])
a3(vpaddd xmm2,xmm2,xmm6)
a3(vpaddd xmm3,xmm3,xmm7)
a2(lea eax,[ebx+ecx])
a2(xor ebx,edx)
a2(and eax,~0x7f)
a2(add ecx,64)
a2(shr eax,1)
a2(add eax, edi)
a2(cmp ecx,edx)
a2(vmovdqa [eax+0],xmm0)
a2(vmovdqa [eax+16],xmm1)
a2(vmovdqa [eax+32],xmm2)
a2(vmovdqa [eax+48],xmm3)
a2(mov eax,[ebp+28])
aj(jne scrypt_ChunkMix_xop_loop)
a2(mov esp,ebp)
a1(pop ebp)
a1(pop esi)
a1(pop edi)
a1(pop ebx)
aret(16)
asm_naked_fn_end(scrypt_ChunkMix_xop)
#endif
/* x64 */
#if defined(X86_64ASM_XOP) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA_XOP
asm_naked_fn_proto(void, scrypt_ChunkMix_xop)(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_xop)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,6)
a2(lea r9,[rcx-64])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(vmovdqa xmm0,[rax+0])
a2(vmovdqa xmm1,[rax+16])
a2(vmovdqa xmm2,[rax+32])
a2(vmovdqa xmm3,[rax+48])
aj(jz scrypt_ChunkMix_xop_no_xor1)
a3(vpxor xmm0,xmm0,[r9+0])
a3(vpxor xmm1,xmm1,[r9+16])
a3(vpxor xmm2,xmm2,[r9+32])
a3(vpxor xmm3,xmm3,[r9+48])
a1(scrypt_ChunkMix_xop_no_xor1:)
a2(xor r9,r9)
a2(xor r8,r8)
a1(scrypt_ChunkMix_xop_loop:)
a2(and rdx, rdx)
a3(vpxor xmm0,xmm0,[rsi+r9+0])
a3(vpxor xmm1,xmm1,[rsi+r9+16])
a3(vpxor xmm2,xmm2,[rsi+r9+32])
a3(vpxor xmm3,xmm3,[rsi+r9+48])
aj(jz scrypt_ChunkMix_xop_no_xor2)
a3(vpxor xmm0,xmm0,[rdx+r9+0])
a3(vpxor xmm1,xmm1,[rdx+r9+16])
a3(vpxor xmm2,xmm2,[rdx+r9+32])
a3(vpxor xmm3,xmm3,[rdx+r9+48])
a1(scrypt_ChunkMix_xop_no_xor2:)
a2(vmovdqa xmm8,xmm0)
a2(vmovdqa xmm9,xmm1)
a2(vmovdqa xmm10,xmm2)
a2(vmovdqa xmm11,xmm3)
a2(mov rax,8)
a1(scrypt_salsa_xop_loop: )
a3(vpaddd xmm4, xmm1, xmm0)
a3(vprotd xmm4, xmm4, 7)
a3(vpxor xmm3, xmm3, xmm4)
a3(vpaddd xmm4, xmm0, xmm3)
a3(vprotd xmm4, xmm4, 9)
a3(vpxor xmm2, xmm2, xmm4)
a3(vpaddd xmm4, xmm3, xmm2)
a3(vprotd xmm4, xmm4, 13)
a3(vpxor xmm1, xmm1, xmm4)
a3(vpaddd xmm4, xmm2, xmm1)
a3(pshufd xmm3, xmm3, 0x93)
a3(vprotd xmm4, xmm4, 18)
a3(pshufd xmm2, xmm2, 0x4e)
a3(vpxor xmm0, xmm0, xmm4)
a3(pshufd xmm1, xmm1, 0x39)
a3(vpaddd xmm4, xmm3, xmm0)
a3(vprotd xmm4, xmm4, 7)
a3(vpxor xmm1, xmm1, xmm4)
a3(vpaddd xmm4, xmm0, xmm1)
a3(vprotd xmm4, xmm4, 9)
a3(vpxor xmm2, xmm2, xmm4)
a3(vpaddd xmm4, xmm1, xmm2)
a3(vprotd xmm4, xmm4, 13)
a3(vpxor xmm3, xmm3, xmm4)
a3(pshufd xmm1, xmm1, 0x93)
a3(vpaddd xmm4, xmm2, xmm3)
a3(pshufd xmm2, xmm2, 0x4e)
a3(vprotd xmm4, xmm4, 18)
a3(pshufd xmm3, xmm3, 0x39)
a3(vpxor xmm0, xmm0, xmm4)
a2(sub rax, 2)
aj(ja scrypt_salsa_xop_loop)
a3(vpaddd xmm0,xmm0,xmm8)
a3(vpaddd xmm1,xmm1,xmm9)
a3(vpaddd xmm2,xmm2,xmm10)
a3(vpaddd xmm3,xmm3,xmm11)
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0x7f)
a2(add r9,64)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(vmovdqa [rax+0],xmm0)
a2(vmovdqa [rax+16],xmm1)
a2(vmovdqa [rax+32],xmm2)
a2(vmovdqa [rax+48],xmm3)
aj(jne scrypt_ChunkMix_xop_loop)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_xop)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_XOP) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA_INCLUDED))
#define SCRYPT_SALSA_XOP
static void asm_calling_convention NOINLINE
scrypt_ChunkMix_xop(uint32_t *Bout/*[chunkBytes]*/, uint32_t *Bin/*[chunkBytes]*/, uint32_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x4,x5,t0,t1,t2,t3;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
for (rounds = 8; rounds; rounds -= 2) {
x4 = _mm_add_epi32(x1, x0);
x4 = _mm_roti_epi32(x4, 7);
x3 = _mm_xor_si128(x3, x4);
x4 = _mm_add_epi32(x0, x3);
x4 = _mm_roti_epi32(x4, 9);
x2 = _mm_xor_si128(x2, x4);
x4 = _mm_add_epi32(x3, x2);
x4 = _mm_roti_epi32(x4, 13);
x1 = _mm_xor_si128(x1, x4);
x4 = _mm_add_epi32(x2, x1);
x4 = _mm_roti_epi32(x4, 18);
x0 = _mm_xor_si128(x0, x4);
x3 = _mm_shuffle_epi32(x3, 0x93);
x2 = _mm_shuffle_epi32(x2, 0x4e);
x1 = _mm_shuffle_epi32(x1, 0x39);
x4 = _mm_add_epi32(x3, x0);
x4 = _mm_roti_epi32(x4, 7);
x1 = _mm_xor_si128(x1, x4);
x4 = _mm_add_epi32(x0, x1);
x4 = _mm_roti_epi32(x4, 9);
x2 = _mm_xor_si128(x2, x4);
x4 = _mm_add_epi32(x1, x2);
x4 = _mm_roti_epi32(x4, 13);
x3 = _mm_xor_si128(x3, x4);
x4 = _mm_add_epi32(x2, x3);
x4 = _mm_roti_epi32(x4, 18);
x0 = _mm_xor_si128(x0, x4);
x1 = _mm_shuffle_epi32(x1, 0x93);
x2 = _mm_shuffle_epi32(x2, 0x4e);
x3 = _mm_shuffle_epi32(x3, 0x39);
}
x0 = _mm_add_epi32(x0, t0);
x1 = _mm_add_epi32(x1, t1);
x2 = _mm_add_epi32(x2, t2);
x3 = _mm_add_epi32(x3, t3);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
}
}
#endif
#if defined(SCRYPT_SALSA_XOP)
/* uses salsa_core_tangle_sse2 */
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa/8-XOP"
#undef SCRYPT_SALSA_INCLUDED
#define SCRYPT_SALSA_INCLUDED
#endif

View file

@ -0,0 +1,70 @@
#if !defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA_INCLUDED)
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa20/8 Ref"
#undef SCRYPT_SALSA_INCLUDED
#define SCRYPT_SALSA_INCLUDED
#define SCRYPT_SALSA_BASIC
static void
salsa_core_basic(uint32_t state[16]) {
size_t rounds = 8;
uint32_t x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,t;
x0 = state[0];
x1 = state[1];
x2 = state[2];
x3 = state[3];
x4 = state[4];
x5 = state[5];
x6 = state[6];
x7 = state[7];
x8 = state[8];
x9 = state[9];
x10 = state[10];
x11 = state[11];
x12 = state[12];
x13 = state[13];
x14 = state[14];
x15 = state[15];
#define quarter(a,b,c,d) \
t = a+d; t = ROTL32(t, 7); b ^= t; \
t = b+a; t = ROTL32(t, 9); c ^= t; \
t = c+b; t = ROTL32(t, 13); d ^= t; \
t = d+c; t = ROTL32(t, 18); a ^= t; \
for (; rounds; rounds -= 2) {
quarter( x0, x4, x8,x12)
quarter( x5, x9,x13, x1)
quarter(x10,x14, x2, x6)
quarter(x15, x3, x7,x11)
quarter( x0, x1, x2, x3)
quarter( x5, x6, x7, x4)
quarter(x10,x11, x8, x9)
quarter(x15,x12,x13,x14)
}
state[0] += x0;
state[1] += x1;
state[2] += x2;
state[3] += x3;
state[4] += x4;
state[5] += x5;
state[6] += x6;
state[7] += x7;
state[8] += x8;
state[9] += x9;
state[10] += x10;
state[11] += x11;
state[12] += x12;
state[13] += x13;
state[14] += x14;
state[15] += x15;
#undef quarter
}
#endif

View file

@ -0,0 +1,367 @@
/* x64 */
#if defined(X86_64ASM_AVX) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA64_AVX
asm_naked_fn_proto(void, scrypt_ChunkMix_avx)(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_avx)
a1(push rbp)
a2(mov rbp, rsp)
a2(and rsp, ~63)
a2(sub rsp, 128)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,7)
a2(lea r9,[rcx-128])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(vmovdqa xmm0,[rax+0])
a2(vmovdqa xmm1,[rax+16])
a2(vmovdqa xmm2,[rax+32])
a2(vmovdqa xmm3,[rax+48])
a2(vmovdqa xmm4,[rax+64])
a2(vmovdqa xmm5,[rax+80])
a2(vmovdqa xmm6,[rax+96])
a2(vmovdqa xmm7,[rax+112])
aj(jz scrypt_ChunkMix_avx_no_xor1)
a3(vpxor xmm0,xmm0,[r9+0])
a3(vpxor xmm1,xmm1,[r9+16])
a3(vpxor xmm2,xmm2,[r9+32])
a3(vpxor xmm3,xmm3,[r9+48])
a3(vpxor xmm4,xmm4,[r9+64])
a3(vpxor xmm5,xmm5,[r9+80])
a3(vpxor xmm6,xmm6,[r9+96])
a3(vpxor xmm7,xmm7,[r9+112])
a1(scrypt_ChunkMix_avx_no_xor1:)
a2(xor r9,r9)
a2(xor r8,r8)
a1(scrypt_ChunkMix_avx_loop:)
a2(and rdx, rdx)
a3(vpxor xmm0,xmm0,[rsi+r9+0])
a3(vpxor xmm1,xmm1,[rsi+r9+16])
a3(vpxor xmm2,xmm2,[rsi+r9+32])
a3(vpxor xmm3,xmm3,[rsi+r9+48])
a3(vpxor xmm4,xmm4,[rsi+r9+64])
a3(vpxor xmm5,xmm5,[rsi+r9+80])
a3(vpxor xmm6,xmm6,[rsi+r9+96])
a3(vpxor xmm7,xmm7,[rsi+r9+112])
aj(jz scrypt_ChunkMix_avx_no_xor2)
a3(vpxor xmm0,xmm0,[rdx+r9+0])
a3(vpxor xmm1,xmm1,[rdx+r9+16])
a3(vpxor xmm2,xmm2,[rdx+r9+32])
a3(vpxor xmm3,xmm3,[rdx+r9+48])
a3(vpxor xmm4,xmm4,[rdx+r9+64])
a3(vpxor xmm5,xmm5,[rdx+r9+80])
a3(vpxor xmm6,xmm6,[rdx+r9+96])
a3(vpxor xmm7,xmm7,[rdx+r9+112])
a1(scrypt_ChunkMix_avx_no_xor2:)
a2(vmovdqa [rsp+0],xmm0)
a2(vmovdqa [rsp+16],xmm1)
a2(vmovdqa [rsp+32],xmm2)
a2(vmovdqa [rsp+48],xmm3)
a2(vmovdqa [rsp+64],xmm4)
a2(vmovdqa [rsp+80],xmm5)
a2(vmovdqa [rsp+96],xmm6)
a2(vmovdqa [rsp+112],xmm7)
a2(mov rax,8)
a1(scrypt_salsa64_avx_loop: )
a3(vpaddq xmm8, xmm0, xmm2)
a3(vpaddq xmm9, xmm1, xmm3)
a3(vpshufd xmm8, xmm8, 0xb1)
a3(vpshufd xmm9, xmm9, 0xb1)
a3(vpxor xmm6, xmm6, xmm8)
a3(vpxor xmm7, xmm7, xmm9)
a3(vpaddq xmm10, xmm0, xmm6)
a3(vpaddq xmm11, xmm1, xmm7)
a3(vpsrlq xmm8, xmm10, 51)
a3(vpsrlq xmm9, xmm11, 51)
a3(vpsllq xmm10, xmm10, 13)
a3(vpsllq xmm11, xmm11, 13)
a3(vpxor xmm4, xmm4, xmm8)
a3(vpxor xmm5, xmm5, xmm9)
a3(vpxor xmm4, xmm4, xmm10)
a3(vpxor xmm5, xmm5, xmm11)
a3(vpaddq xmm8, xmm6, xmm4)
a3(vpaddq xmm9, xmm7, xmm5)
a3(vpsrlq xmm10, xmm8, 25)
a3(vpsrlq xmm11, xmm9, 25)
a3(vpsllq xmm8, xmm8, 39)
a3(vpsllq xmm9, xmm9, 39)
a3(vpxor xmm2, xmm2, xmm10)
a3(vpxor xmm3, xmm3, xmm11)
a3(vpxor xmm2, xmm2, xmm8)
a3(vpxor xmm3, xmm3, xmm9)
a3(vpaddq xmm10, xmm4, xmm2)
a3(vpaddq xmm11, xmm5, xmm3)
a3(vpshufd xmm10, xmm10, 0xb1)
a3(vpshufd xmm11, xmm11, 0xb1)
a3(vpxor xmm0, xmm0, xmm10)
a3(vpxor xmm1, xmm1, xmm11)
a2(vmovdqa xmm8, xmm2)
a2(vmovdqa xmm9, xmm3)
a4(vpalignr xmm2, xmm6, xmm7, 8)
a4(vpalignr xmm3, xmm7, xmm6, 8)
a4(vpalignr xmm6, xmm9, xmm8, 8)
a4(vpalignr xmm7, xmm8, xmm9, 8)
a3(vpaddq xmm10, xmm0, xmm2)
a3(vpaddq xmm11, xmm1, xmm3)
a3(vpshufd xmm10, xmm10, 0xb1)
a3(vpshufd xmm11, xmm11, 0xb1)
a3(vpxor xmm6, xmm6, xmm10)
a3(vpxor xmm7, xmm7, xmm11)
a3(vpaddq xmm8, xmm0, xmm6)
a3(vpaddq xmm9, xmm1, xmm7)
a3(vpsrlq xmm10, xmm8, 51)
a3(vpsrlq xmm11, xmm9, 51)
a3(vpsllq xmm8, xmm8, 13)
a3(vpsllq xmm9, xmm9, 13)
a3(vpxor xmm5, xmm5, xmm10)
a3(vpxor xmm4, xmm4, xmm11)
a3(vpxor xmm5, xmm5, xmm8)
a3(vpxor xmm4, xmm4, xmm9)
a3(vpaddq xmm10, xmm6, xmm5)
a3(vpaddq xmm11, xmm7, xmm4)
a3(vpsrlq xmm8, xmm10, 25)
a3(vpsrlq xmm9, xmm11, 25)
a3(vpsllq xmm10, xmm10, 39)
a3(vpsllq xmm11, xmm11, 39)
a3(vpxor xmm2, xmm2, xmm8)
a3(vpxor xmm3, xmm3, xmm9)
a3(vpxor xmm2, xmm2, xmm10)
a3(vpxor xmm3, xmm3, xmm11)
a3(vpaddq xmm8, xmm5, xmm2)
a3(vpaddq xmm9, xmm4, xmm3)
a3(vpshufd xmm8, xmm8, 0xb1)
a3(vpshufd xmm9, xmm9, 0xb1)
a3(vpxor xmm0, xmm0, xmm8)
a3(vpxor xmm1, xmm1, xmm9)
a2(vmovdqa xmm10, xmm2)
a2(vmovdqa xmm11, xmm3)
a4(vpalignr xmm2, xmm6, xmm7, 8)
a4(vpalignr xmm3, xmm7, xmm6, 8)
a4(vpalignr xmm6, xmm11, xmm10, 8)
a4(vpalignr xmm7, xmm10, xmm11, 8)
a2(sub rax, 2)
aj(ja scrypt_salsa64_avx_loop)
a3(vpaddq xmm0,xmm0,[rsp+0])
a3(vpaddq xmm1,xmm1,[rsp+16])
a3(vpaddq xmm2,xmm2,[rsp+32])
a3(vpaddq xmm3,xmm3,[rsp+48])
a3(vpaddq xmm4,xmm4,[rsp+64])
a3(vpaddq xmm5,xmm5,[rsp+80])
a3(vpaddq xmm6,xmm6,[rsp+96])
a3(vpaddq xmm7,xmm7,[rsp+112])
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0xff)
a2(add r9,128)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(vmovdqa [rax+0],xmm0)
a2(vmovdqa [rax+16],xmm1)
a2(vmovdqa [rax+32],xmm2)
a2(vmovdqa [rax+48],xmm3)
a2(vmovdqa [rax+64],xmm4)
a2(vmovdqa [rax+80],xmm5)
a2(vmovdqa [rax+96],xmm6)
a2(vmovdqa [rax+112],xmm7)
aj(jne scrypt_ChunkMix_avx_loop)
a2(mov rsp, rbp)
a1(pop rbp)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_avx)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_AVX) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED))
#define SCRYPT_SALSA64_AVX
static void asm_calling_convention
scrypt_ChunkMix_avx(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x4,x5,x6,x7,t0,t1,t2,t3,t4,t5,t6,t7,z0,z1,z2,z3;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
x4 = xmmp[4];
x5 = xmmp[5];
x6 = xmmp[6];
x7 = xmmp[7];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
t4 = x4;
t5 = x5;
t6 = x6;
t7 = x7;
for (rounds = 8; rounds; rounds -= 2) {
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z2 = _mm_srli_epi64(z0, 64-13);
z3 = _mm_srli_epi64(z1, 64-13);
z0 = _mm_slli_epi64(z0, 13);
z1 = _mm_slli_epi64(z1, 13);
x4 = _mm_xor_si128(x4, z2);
x5 = _mm_xor_si128(x5, z3);
x4 = _mm_xor_si128(x4, z0);
x5 = _mm_xor_si128(x5, z1);
z0 = _mm_add_epi64(x4, x6);
z1 = _mm_add_epi64(x5, x7);
z2 = _mm_srli_epi64(z0, 64-39);
z3 = _mm_srli_epi64(z1, 64-39);
z0 = _mm_slli_epi64(z0, 39);
z1 = _mm_slli_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z2);
x3 = _mm_xor_si128(x3, z3);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x4);
z1 = _mm_add_epi64(x3, x5);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x2;
z1 = x3;
x2 = _mm_alignr_epi8(x6, x7, 8);
x3 = _mm_alignr_epi8(x7, x6, 8);
x6 = _mm_alignr_epi8(z1, z0, 8);
x7 = _mm_alignr_epi8(z0, z1, 8);
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z2 = _mm_srli_epi64(z0, 64-13);
z3 = _mm_srli_epi64(z1, 64-13);
z0 = _mm_slli_epi64(z0, 13);
z1 = _mm_slli_epi64(z1, 13);
x5 = _mm_xor_si128(x5, z2);
x4 = _mm_xor_si128(x4, z3);
x5 = _mm_xor_si128(x5, z0);
x4 = _mm_xor_si128(x4, z1);
z0 = _mm_add_epi64(x5, x6);
z1 = _mm_add_epi64(x4, x7);
z2 = _mm_srli_epi64(z0, 64-39);
z3 = _mm_srli_epi64(z1, 64-39);
z0 = _mm_slli_epi64(z0, 39);
z1 = _mm_slli_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z2);
x3 = _mm_xor_si128(x3, z3);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x5);
z1 = _mm_add_epi64(x3, x4);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x2;
z1 = x3;
x2 = _mm_alignr_epi8(x6, x7, 8);
x3 = _mm_alignr_epi8(x7, x6, 8);
x6 = _mm_alignr_epi8(z1, z0, 8);
x7 = _mm_alignr_epi8(z0, z1, 8);
}
x0 = _mm_add_epi64(x0, t0);
x1 = _mm_add_epi64(x1, t1);
x2 = _mm_add_epi64(x2, t2);
x3 = _mm_add_epi64(x3, t3);
x4 = _mm_add_epi64(x4, t4);
x5 = _mm_add_epi64(x5, t5);
x6 = _mm_add_epi64(x6, t6);
x7 = _mm_add_epi64(x7, t7);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
xmmp[4] = x4;
xmmp[5] = x5;
xmmp[6] = x6;
xmmp[7] = x7;
}
}
#endif
#if defined(SCRYPT_SALSA64_AVX)
/* uses salsa64_core_tangle_sse2 */
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa64/8-AVX"
#undef SCRYPT_SALSA64_INCLUDED
#define SCRYPT_SALSA64_INCLUDED
#endif

View file

@ -0,0 +1,221 @@
/* x64 */
#if defined(X86_64ASM_AVX2) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA64_AVX2
asm_naked_fn_proto(void, scrypt_ChunkMix_avx2)(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_avx2)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,7)
a2(lea r9,[rcx-128])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(vmovdqa ymm0,[rax+0])
a2(vmovdqa ymm1,[rax+32])
a2(vmovdqa ymm2,[rax+64])
a2(vmovdqa ymm3,[rax+96])
aj(jz scrypt_ChunkMix_avx2_no_xor1)
a3(vpxor ymm0,ymm0,[r9+0])
a3(vpxor ymm1,ymm1,[r9+32])
a3(vpxor ymm2,ymm2,[r9+64])
a3(vpxor ymm3,ymm3,[r9+96])
a1(scrypt_ChunkMix_avx2_no_xor1:)
a2(xor r9,r9)
a2(xor r8,r8)
a1(scrypt_ChunkMix_avx2_loop:)
a2(and rdx, rdx)
a3(vpxor ymm0,ymm0,[rsi+r9+0])
a3(vpxor ymm1,ymm1,[rsi+r9+32])
a3(vpxor ymm2,ymm2,[rsi+r9+64])
a3(vpxor ymm3,ymm3,[rsi+r9+96])
aj(jz scrypt_ChunkMix_avx2_no_xor2)
a3(vpxor ymm0,ymm0,[rdx+r9+0])
a3(vpxor ymm1,ymm1,[rdx+r9+32])
a3(vpxor ymm2,ymm2,[rdx+r9+64])
a3(vpxor ymm3,ymm3,[rdx+r9+96])
a1(scrypt_ChunkMix_avx2_no_xor2:)
a2(vmovdqa ymm6,ymm0)
a2(vmovdqa ymm7,ymm1)
a2(vmovdqa ymm8,ymm2)
a2(vmovdqa ymm9,ymm3)
a2(mov rax,4)
a1(scrypt_salsa64_avx2_loop: )
a3(vpaddq ymm4, ymm1, ymm0)
a3(vpshufd ymm4, ymm4, 0xb1)
a3(vpxor ymm3, ymm3, ymm4)
a3(vpaddq ymm4, ymm0, ymm3)
a3(vpsrlq ymm5, ymm4, 51)
a3(vpxor ymm2, ymm2, ymm5)
a3(vpsllq ymm4, ymm4, 13)
a3(vpxor ymm2, ymm2, ymm4)
a3(vpaddq ymm4, ymm3, ymm2)
a3(vpsrlq ymm5, ymm4, 25)
a3(vpxor ymm1, ymm1, ymm5)
a3(vpsllq ymm4, ymm4, 39)
a3(vpxor ymm1, ymm1, ymm4)
a3(vpaddq ymm4, ymm2, ymm1)
a3(vpshufd ymm4, ymm4, 0xb1)
a3(vpermq ymm1, ymm1, 0x39)
a3(vpermq ymm10, ymm2, 0x4e)
a3(vpxor ymm0, ymm0, ymm4)
a3(vpermq ymm3, ymm3, 0x93)
a3(vpaddq ymm4, ymm3, ymm0)
a3(vpshufd ymm4, ymm4, 0xb1)
a3(vpxor ymm1, ymm1, ymm4)
a3(vpaddq ymm4, ymm0, ymm1)
a3(vpsrlq ymm5, ymm4, 51)
a3(vpxor ymm10, ymm10, ymm5)
a3(vpsllq ymm4, ymm4, 13)
a3(vpxor ymm10, ymm10, ymm4)
a3(vpaddq ymm4, ymm1, ymm10)
a3(vpsrlq ymm5, ymm4, 25)
a3(vpxor ymm3, ymm3, ymm5)
a3(vpsllq ymm4, ymm4, 39)
a3(vpermq ymm1, ymm1, 0x93)
a3(vpxor ymm3, ymm3, ymm4)
a3(vpermq ymm2, ymm10, 0x4e)
a3(vpaddq ymm4, ymm10, ymm3)
a3(vpshufd ymm4, ymm4, 0xb1)
a3(vpermq ymm3, ymm3, 0x39)
a3(vpxor ymm0, ymm0, ymm4)
a1(dec rax)
aj(jnz scrypt_salsa64_avx2_loop)
a3(vpaddq ymm0,ymm0,ymm6)
a3(vpaddq ymm1,ymm1,ymm7)
a3(vpaddq ymm2,ymm2,ymm8)
a3(vpaddq ymm3,ymm3,ymm9)
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0xff)
a2(add r9,128)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(vmovdqa [rax+0],ymm0)
a2(vmovdqa [rax+32],ymm1)
a2(vmovdqa [rax+64],ymm2)
a2(vmovdqa [rax+96],ymm3)
aj(jne scrypt_ChunkMix_avx2_loop)
a1(vzeroupper)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_avx2)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_AVX2) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED))
#define SCRYPT_SALSA64_AVX2
static void asm_calling_convention
scrypt_ChunkMix_avx2(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
ymmi *ymmp,y0,y1,y2,y3,t0,t1,t2,t3,z0,z1;
size_t rounds;
/* 1: X = B_{2r - 1} */
ymmp = (ymmi *)scrypt_block(Bin, blocksPerChunk - 1);
y0 = ymmp[0];
y1 = ymmp[1];
y2 = ymmp[2];
y3 = ymmp[3];
if (Bxor) {
ymmp = (ymmi *)scrypt_block(Bxor, blocksPerChunk - 1);
y0 = _mm256_xor_si256(y0, ymmp[0]);
y1 = _mm256_xor_si256(y1, ymmp[1]);
y2 = _mm256_xor_si256(y2, ymmp[2]);
y3 = _mm256_xor_si256(y3, ymmp[3]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
ymmp = (ymmi *)scrypt_block(Bin, i);
y0 = _mm256_xor_si256(y0, ymmp[0]);
y1 = _mm256_xor_si256(y1, ymmp[1]);
y2 = _mm256_xor_si256(y2, ymmp[2]);
y3 = _mm256_xor_si256(y3, ymmp[3]);
if (Bxor) {
ymmp = (ymmi *)scrypt_block(Bxor, i);
y0 = _mm256_xor_si256(y0, ymmp[0]);
y1 = _mm256_xor_si256(y1, ymmp[1]);
y2 = _mm256_xor_si256(y2, ymmp[2]);
y3 = _mm256_xor_si256(y3, ymmp[3]);
}
t0 = y0;
t1 = y1;
t2 = y2;
t3 = y3;
for (rounds = 8; rounds; rounds -= 2) {
z0 = _mm256_add_epi64(y0, y1);
z0 = _mm256_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
y3 = _mm256_xor_si256(y3, z0);
z0 = _mm256_add_epi64(y3, y0);
z1 = _mm256_srli_epi64(z0, 64-13);
y2 = _mm256_xor_si256(y2, z1);
z0 = _mm256_slli_epi64(z0, 13);
y2 = _mm256_xor_si256(y2, z0);
z0 = _mm256_add_epi64(y2, y3);
z1 = _mm256_srli_epi64(z0, 64-39);
y1 = _mm256_xor_si256(y1, z1);
z0 = _mm256_slli_epi64(z0, 39);
y1 = _mm256_xor_si256(y1, z0);
y1 = _mm256_permute4x64_epi64(y1, _MM_SHUFFLE(0,3,2,1));
y2 = _mm256_permute4x64_epi64(y2, _MM_SHUFFLE(1,0,3,2));
y3 = _mm256_permute4x64_epi64(y3, _MM_SHUFFLE(2,1,0,3));
z0 = _mm256_add_epi64(y1, y2);
z0 = _mm256_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
y0 = _mm256_xor_si256(y0, z0);
z0 = _mm256_add_epi64(y0, y3);
z0 = _mm256_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
y1 = _mm256_xor_si256(y1, z0);
z0 = _mm256_add_epi64(y1, y0);
z1 = _mm256_srli_epi64(z0, 64-13);
y2 = _mm256_xor_si256(y2, z1);
z0 = _mm256_slli_epi64(z0, 13);
y2 = _mm256_xor_si256(y2, z0);
z0 = _mm256_add_epi64(y2, y1);
z1 = _mm256_srli_epi64(z0, 64-39);
y3 = _mm256_xor_si256(y3, z1);
z0 = _mm256_slli_epi64(z0, 39);
y3 = _mm256_xor_si256(y3, z0);
z0 = _mm256_add_epi64(y3, y2);
z0 = _mm256_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
y0 = _mm256_xor_si256(y0, z0);
y1 = _mm256_permute4x64_epi64(y1, _MM_SHUFFLE(2,1,0,3));
y2 = _mm256_permute4x64_epi64(y2, _MM_SHUFFLE(1,0,3,2));
y3 = _mm256_permute4x64_epi64(y3, _MM_SHUFFLE(0,3,2,1));
}
y0 = _mm256_add_epi64(y0, t0);
y1 = _mm256_add_epi64(y1, t1);
y2 = _mm256_add_epi64(y2, t2);
y3 = _mm256_add_epi64(y3, t3);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
ymmp = (ymmi *)scrypt_block(Bout, (i / 2) + half);
ymmp[0] = y0;
ymmp[1] = y1;
ymmp[2] = y2;
ymmp[3] = y3;
}
}
#endif
#if defined(SCRYPT_SALSA64_AVX2)
/* uses salsa64_core_tangle_sse2 */
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa64/8-AVX2"
#undef SCRYPT_SALSA64_INCLUDED
#define SCRYPT_SALSA64_INCLUDED
#endif

View file

@ -0,0 +1,449 @@
/* x64 */
#if defined(X86_64ASM_SSE2) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA64_SSE2
asm_naked_fn_proto(void, scrypt_ChunkMix_sse2)(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_sse2)
a1(push rbp)
a2(mov rbp, rsp)
a2(and rsp, ~63)
a2(sub rsp, 128)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,7)
a2(lea r9,[rcx-128])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(movdqa xmm0,[rax+0])
a2(movdqa xmm1,[rax+16])
a2(movdqa xmm2,[rax+32])
a2(movdqa xmm3,[rax+48])
a2(movdqa xmm4,[rax+64])
a2(movdqa xmm5,[rax+80])
a2(movdqa xmm6,[rax+96])
a2(movdqa xmm7,[rax+112])
aj(jz scrypt_ChunkMix_sse2_no_xor1)
a2(pxor xmm0,[r9+0])
a2(pxor xmm1,[r9+16])
a2(pxor xmm2,[r9+32])
a2(pxor xmm3,[r9+48])
a2(pxor xmm4,[r9+64])
a2(pxor xmm5,[r9+80])
a2(pxor xmm6,[r9+96])
a2(pxor xmm7,[r9+112])
a1(scrypt_ChunkMix_sse2_no_xor1:)
a2(xor r9,r9)
a2(xor r8,r8)
a1(scrypt_ChunkMix_sse2_loop:)
a2(and rdx, rdx)
a2(pxor xmm0,[rsi+r9+0])
a2(pxor xmm1,[rsi+r9+16])
a2(pxor xmm2,[rsi+r9+32])
a2(pxor xmm3,[rsi+r9+48])
a2(pxor xmm4,[rsi+r9+64])
a2(pxor xmm5,[rsi+r9+80])
a2(pxor xmm6,[rsi+r9+96])
a2(pxor xmm7,[rsi+r9+112])
aj(jz scrypt_ChunkMix_sse2_no_xor2)
a2(pxor xmm0,[rdx+r9+0])
a2(pxor xmm1,[rdx+r9+16])
a2(pxor xmm2,[rdx+r9+32])
a2(pxor xmm3,[rdx+r9+48])
a2(pxor xmm4,[rdx+r9+64])
a2(pxor xmm5,[rdx+r9+80])
a2(pxor xmm6,[rdx+r9+96])
a2(pxor xmm7,[rdx+r9+112])
a1(scrypt_ChunkMix_sse2_no_xor2:)
a2(movdqa [rsp+0],xmm0)
a2(movdqa [rsp+16],xmm1)
a2(movdqa [rsp+32],xmm2)
a2(movdqa [rsp+48],xmm3)
a2(movdqa [rsp+64],xmm4)
a2(movdqa [rsp+80],xmm5)
a2(movdqa [rsp+96],xmm6)
a2(movdqa [rsp+112],xmm7)
a2(mov rax,8)
a1(scrypt_salsa64_sse2_loop: )
a2(movdqa xmm8, xmm0)
a2(movdqa xmm9, xmm1)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm6, xmm8)
a2(pxor xmm7, xmm9)
a2(movdqa xmm10, xmm0)
a2(movdqa xmm11, xmm1)
a2(paddq xmm10, xmm6)
a2(paddq xmm11, xmm7)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 51)
a2(psrlq xmm11, 51)
a2(psllq xmm8, 13)
a2(psllq xmm9, 13)
a2(pxor xmm4, xmm10)
a2(pxor xmm5, xmm11)
a2(pxor xmm4, xmm8)
a2(pxor xmm5, xmm9)
a2(movdqa xmm10, xmm6)
a2(movdqa xmm11, xmm7)
a2(paddq xmm10, xmm4)
a2(paddq xmm11, xmm5)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 25)
a2(psrlq xmm11, 25)
a2(psllq xmm8, 39)
a2(psllq xmm9, 39)
a2(pxor xmm2, xmm10)
a2(pxor xmm3, xmm11)
a2(pxor xmm2, xmm8)
a2(pxor xmm3, xmm9)
a2(movdqa xmm8, xmm4)
a2(movdqa xmm9, xmm5)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm0, xmm8)
a2(pxor xmm1, xmm9)
a2(movdqa xmm8, xmm2)
a2(movdqa xmm9, xmm3)
a2(movdqa xmm10, xmm6)
a2(movdqa xmm11, xmm7)
a2(movdqa xmm2, xmm7)
a2(movdqa xmm3, xmm6)
a2(punpcklqdq xmm10, xmm6)
a2(punpcklqdq xmm11, xmm7)
a2(movdqa xmm6, xmm8)
a2(movdqa xmm7, xmm9)
a2(punpcklqdq xmm9, xmm9)
a2(punpcklqdq xmm8, xmm8)
a2(punpckhqdq xmm2, xmm10)
a2(punpckhqdq xmm3, xmm11)
a2(punpckhqdq xmm6, xmm9)
a2(punpckhqdq xmm7, xmm8)
a2(sub rax, 2)
a2(movdqa xmm8, xmm0)
a2(movdqa xmm9, xmm1)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm6, xmm8)
a2(pxor xmm7, xmm9)
a2(movdqa xmm10, xmm0)
a2(movdqa xmm11, xmm1)
a2(paddq xmm10, xmm6)
a2(paddq xmm11, xmm7)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 51)
a2(psrlq xmm11, 51)
a2(psllq xmm8, 13)
a2(psllq xmm9, 13)
a2(pxor xmm5, xmm10)
a2(pxor xmm4, xmm11)
a2(pxor xmm5, xmm8)
a2(pxor xmm4, xmm9)
a2(movdqa xmm10, xmm6)
a2(movdqa xmm11, xmm7)
a2(paddq xmm10, xmm5)
a2(paddq xmm11, xmm4)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 25)
a2(psrlq xmm11, 25)
a2(psllq xmm8, 39)
a2(psllq xmm9, 39)
a2(pxor xmm2, xmm10)
a2(pxor xmm3, xmm11)
a2(pxor xmm2, xmm8)
a2(pxor xmm3, xmm9)
a2(movdqa xmm8, xmm5)
a2(movdqa xmm9, xmm4)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm0, xmm8)
a2(pxor xmm1, xmm9)
a2(movdqa xmm8, xmm2)
a2(movdqa xmm9, xmm3)
a2(movdqa xmm10, xmm6)
a2(movdqa xmm11, xmm7)
a2(movdqa xmm2, xmm7)
a2(movdqa xmm3, xmm6)
a2(punpcklqdq xmm10, xmm6)
a2(punpcklqdq xmm11, xmm7)
a2(movdqa xmm6, xmm8)
a2(movdqa xmm7, xmm9)
a2(punpcklqdq xmm9, xmm9)
a2(punpcklqdq xmm8, xmm8)
a2(punpckhqdq xmm2, xmm10)
a2(punpckhqdq xmm3, xmm11)
a2(punpckhqdq xmm6, xmm9)
a2(punpckhqdq xmm7, xmm8)
aj(ja scrypt_salsa64_sse2_loop)
a2(paddq xmm0,[rsp+0])
a2(paddq xmm1,[rsp+16])
a2(paddq xmm2,[rsp+32])
a2(paddq xmm3,[rsp+48])
a2(paddq xmm4,[rsp+64])
a2(paddq xmm5,[rsp+80])
a2(paddq xmm6,[rsp+96])
a2(paddq xmm7,[rsp+112])
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0xff)
a2(add r9,128)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(movdqa [rax+0],xmm0)
a2(movdqa [rax+16],xmm1)
a2(movdqa [rax+32],xmm2)
a2(movdqa [rax+48],xmm3)
a2(movdqa [rax+64],xmm4)
a2(movdqa [rax+80],xmm5)
a2(movdqa [rax+96],xmm6)
a2(movdqa [rax+112],xmm7)
aj(jne scrypt_ChunkMix_sse2_loop)
a2(mov rsp, rbp)
a1(pop rbp)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_sse2)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_SSE2) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED))
#define SCRYPT_SALSA64_SSE2
static void asm_calling_convention
scrypt_ChunkMix_sse2(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x4,x5,x6,x7,t0,t1,t2,t3,t4,t5,t6,t7,z0,z1,z2,z3;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
x4 = xmmp[4];
x5 = xmmp[5];
x6 = xmmp[6];
x7 = xmmp[7];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
t4 = x4;
t5 = x5;
t6 = x6;
t7 = x7;
for (rounds = 8; rounds; rounds -= 2) {
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z2 = _mm_srli_epi64(z0, 64-13);
z3 = _mm_srli_epi64(z1, 64-13);
z0 = _mm_slli_epi64(z0, 13);
z1 = _mm_slli_epi64(z1, 13);
x4 = _mm_xor_si128(x4, z2);
x5 = _mm_xor_si128(x5, z3);
x4 = _mm_xor_si128(x4, z0);
x5 = _mm_xor_si128(x5, z1);
z0 = _mm_add_epi64(x4, x6);
z1 = _mm_add_epi64(x5, x7);
z2 = _mm_srli_epi64(z0, 64-39);
z3 = _mm_srli_epi64(z1, 64-39);
z0 = _mm_slli_epi64(z0, 39);
z1 = _mm_slli_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z2);
x3 = _mm_xor_si128(x3, z3);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x4);
z1 = _mm_add_epi64(x3, x5);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x4;
z1 = x5;
z2 = x2;
z3 = x3;
x4 = z1;
x5 = z0;
x2 = _mm_unpackhi_epi64(x7, _mm_unpacklo_epi64(x6, x6));
x3 = _mm_unpackhi_epi64(x6, _mm_unpacklo_epi64(x7, x7));
x6 = _mm_unpackhi_epi64(z2, _mm_unpacklo_epi64(z3, z3));
x7 = _mm_unpackhi_epi64(z3, _mm_unpacklo_epi64(z2, z2));
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z2 = _mm_srli_epi64(z0, 64-13);
z3 = _mm_srli_epi64(z1, 64-13);
z0 = _mm_slli_epi64(z0, 13);
z1 = _mm_slli_epi64(z1, 13);
x4 = _mm_xor_si128(x4, z2);
x5 = _mm_xor_si128(x5, z3);
x4 = _mm_xor_si128(x4, z0);
x5 = _mm_xor_si128(x5, z1);
z0 = _mm_add_epi64(x4, x6);
z1 = _mm_add_epi64(x5, x7);
z2 = _mm_srli_epi64(z0, 64-39);
z3 = _mm_srli_epi64(z1, 64-39);
z0 = _mm_slli_epi64(z0, 39);
z1 = _mm_slli_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z2);
x3 = _mm_xor_si128(x3, z3);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x4);
z1 = _mm_add_epi64(x3, x5);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x4;
z1 = x5;
z2 = x2;
z3 = x3;
x4 = z1;
x5 = z0;
x2 = _mm_unpackhi_epi64(x7, _mm_unpacklo_epi64(x6, x6));
x3 = _mm_unpackhi_epi64(x6, _mm_unpacklo_epi64(x7, x7));
x6 = _mm_unpackhi_epi64(z2, _mm_unpacklo_epi64(z3, z3));
x7 = _mm_unpackhi_epi64(z3, _mm_unpacklo_epi64(z2, z2));
}
x0 = _mm_add_epi64(x0, t0);
x1 = _mm_add_epi64(x1, t1);
x2 = _mm_add_epi64(x2, t2);
x3 = _mm_add_epi64(x3, t3);
x4 = _mm_add_epi64(x4, t4);
x5 = _mm_add_epi64(x5, t5);
x6 = _mm_add_epi64(x6, t6);
x7 = _mm_add_epi64(x7, t7);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
xmmp[4] = x4;
xmmp[5] = x5;
xmmp[6] = x6;
xmmp[7] = x7;
}
}
#endif
#if defined(SCRYPT_SALSA64_SSE2)
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa64/8-SSE2"
#undef SCRYPT_SALSA64_INCLUDED
#define SCRYPT_SALSA64_INCLUDED
#endif
/* sse3/avx use this as well */
#if defined(SCRYPT_SALSA64_INCLUDED)
/*
Default layout:
0 1 2 3
4 5 6 7
8 9 10 11
12 13 14 15
SSE2 layout:
0 5 10 15
12 1 6 11
8 13 2 7
4 9 14 3
*/
static void asm_calling_convention
salsa64_core_tangle_sse2(uint64_t *blocks, size_t count) {
uint64_t t;
while (count--) {
t = blocks[1]; blocks[1] = blocks[5]; blocks[5] = t;
t = blocks[2]; blocks[2] = blocks[10]; blocks[10] = t;
t = blocks[3]; blocks[3] = blocks[15]; blocks[15] = t;
t = blocks[4]; blocks[4] = blocks[12]; blocks[12] = t;
t = blocks[7]; blocks[7] = blocks[11]; blocks[11] = t;
t = blocks[9]; blocks[9] = blocks[13]; blocks[13] = t;
blocks += 16;
}
}
#endif

View file

@ -0,0 +1,399 @@
/* x64 */
#if defined(X86_64ASM_SSSE3) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA64_SSSE3
asm_naked_fn_proto(void, scrypt_ChunkMix_ssse3)(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_ssse3)
a1(push rbp)
a2(mov rbp, rsp)
a2(and rsp, ~63)
a2(sub rsp, 128)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,7)
a2(lea r9,[rcx-128])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(movdqa xmm0,[rax+0])
a2(movdqa xmm1,[rax+16])
a2(movdqa xmm2,[rax+32])
a2(movdqa xmm3,[rax+48])
a2(movdqa xmm4,[rax+64])
a2(movdqa xmm5,[rax+80])
a2(movdqa xmm6,[rax+96])
a2(movdqa xmm7,[rax+112])
aj(jz scrypt_ChunkMix_ssse3_no_xor1)
a2(pxor xmm0,[r9+0])
a2(pxor xmm1,[r9+16])
a2(pxor xmm2,[r9+32])
a2(pxor xmm3,[r9+48])
a2(pxor xmm4,[r9+64])
a2(pxor xmm5,[r9+80])
a2(pxor xmm6,[r9+96])
a2(pxor xmm7,[r9+112])
a1(scrypt_ChunkMix_ssse3_no_xor1:)
a2(xor r9,r9)
a2(xor r8,r8)
a1(scrypt_ChunkMix_ssse3_loop:)
a2(and rdx, rdx)
a2(pxor xmm0,[rsi+r9+0])
a2(pxor xmm1,[rsi+r9+16])
a2(pxor xmm2,[rsi+r9+32])
a2(pxor xmm3,[rsi+r9+48])
a2(pxor xmm4,[rsi+r9+64])
a2(pxor xmm5,[rsi+r9+80])
a2(pxor xmm6,[rsi+r9+96])
a2(pxor xmm7,[rsi+r9+112])
aj(jz scrypt_ChunkMix_ssse3_no_xor2)
a2(pxor xmm0,[rdx+r9+0])
a2(pxor xmm1,[rdx+r9+16])
a2(pxor xmm2,[rdx+r9+32])
a2(pxor xmm3,[rdx+r9+48])
a2(pxor xmm4,[rdx+r9+64])
a2(pxor xmm5,[rdx+r9+80])
a2(pxor xmm6,[rdx+r9+96])
a2(pxor xmm7,[rdx+r9+112])
a1(scrypt_ChunkMix_ssse3_no_xor2:)
a2(movdqa [rsp+0],xmm0)
a2(movdqa [rsp+16],xmm1)
a2(movdqa [rsp+32],xmm2)
a2(movdqa [rsp+48],xmm3)
a2(movdqa [rsp+64],xmm4)
a2(movdqa [rsp+80],xmm5)
a2(movdqa [rsp+96],xmm6)
a2(movdqa [rsp+112],xmm7)
a2(mov rax,8)
a1(scrypt_salsa64_ssse3_loop: )
a2(movdqa xmm8, xmm0)
a2(movdqa xmm9, xmm1)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm6, xmm8)
a2(pxor xmm7, xmm9)
a2(movdqa xmm10, xmm0)
a2(movdqa xmm11, xmm1)
a2(paddq xmm10, xmm6)
a2(paddq xmm11, xmm7)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 51)
a2(psrlq xmm11, 51)
a2(psllq xmm8, 13)
a2(psllq xmm9, 13)
a2(pxor xmm4, xmm10)
a2(pxor xmm5, xmm11)
a2(pxor xmm4, xmm8)
a2(pxor xmm5, xmm9)
a2(movdqa xmm10, xmm6)
a2(movdqa xmm11, xmm7)
a2(paddq xmm10, xmm4)
a2(paddq xmm11, xmm5)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 25)
a2(psrlq xmm11, 25)
a2(psllq xmm8, 39)
a2(psllq xmm9, 39)
a2(pxor xmm2, xmm10)
a2(pxor xmm3, xmm11)
a2(pxor xmm2, xmm8)
a2(pxor xmm3, xmm9)
a2(movdqa xmm8, xmm4)
a2(movdqa xmm9, xmm5)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm0, xmm8)
a2(pxor xmm1, xmm9)
a2(movdqa xmm10, xmm2)
a2(movdqa xmm11, xmm3)
a2(movdqa xmm2, xmm6)
a2(movdqa xmm3, xmm7)
a3(palignr xmm2, xmm7, 8)
a3(palignr xmm3, xmm6, 8)
a2(movdqa xmm6, xmm11)
a2(movdqa xmm7, xmm10)
a3(palignr xmm6, xmm10, 8)
a3(palignr xmm7, xmm11, 8)
a2(sub rax, 2)
a2(movdqa xmm8, xmm0)
a2(movdqa xmm9, xmm1)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm6, xmm8)
a2(pxor xmm7, xmm9)
a2(movdqa xmm10, xmm0)
a2(movdqa xmm11, xmm1)
a2(paddq xmm10, xmm6)
a2(paddq xmm11, xmm7)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 51)
a2(psrlq xmm11, 51)
a2(psllq xmm8, 13)
a2(psllq xmm9, 13)
a2(pxor xmm5, xmm10)
a2(pxor xmm4, xmm11)
a2(pxor xmm5, xmm8)
a2(pxor xmm4, xmm9)
a2(movdqa xmm10, xmm6)
a2(movdqa xmm11, xmm7)
a2(paddq xmm10, xmm5)
a2(paddq xmm11, xmm4)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 25)
a2(psrlq xmm11, 25)
a2(psllq xmm8, 39)
a2(psllq xmm9, 39)
a2(pxor xmm2, xmm10)
a2(pxor xmm3, xmm11)
a2(pxor xmm2, xmm8)
a2(pxor xmm3, xmm9)
a2(movdqa xmm8, xmm5)
a2(movdqa xmm9, xmm4)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm0, xmm8)
a2(pxor xmm1, xmm9)
a2(movdqa xmm10, xmm2)
a2(movdqa xmm11, xmm3)
a2(movdqa xmm2, xmm6)
a2(movdqa xmm3, xmm7)
a3(palignr xmm2, xmm7, 8)
a3(palignr xmm3, xmm6, 8)
a2(movdqa xmm6, xmm11)
a2(movdqa xmm7, xmm10)
a3(palignr xmm6, xmm10, 8)
a3(palignr xmm7, xmm11, 8)
aj(ja scrypt_salsa64_ssse3_loop)
a2(paddq xmm0,[rsp+0])
a2(paddq xmm1,[rsp+16])
a2(paddq xmm2,[rsp+32])
a2(paddq xmm3,[rsp+48])
a2(paddq xmm4,[rsp+64])
a2(paddq xmm5,[rsp+80])
a2(paddq xmm6,[rsp+96])
a2(paddq xmm7,[rsp+112])
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0xff)
a2(add r9,128)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(movdqa [rax+0],xmm0)
a2(movdqa [rax+16],xmm1)
a2(movdqa [rax+32],xmm2)
a2(movdqa [rax+48],xmm3)
a2(movdqa [rax+64],xmm4)
a2(movdqa [rax+80],xmm5)
a2(movdqa [rax+96],xmm6)
a2(movdqa [rax+112],xmm7)
aj(jne scrypt_ChunkMix_ssse3_loop)
a2(mov rsp, rbp)
a1(pop rbp)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_ssse3)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_SSSE3) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED))
#define SCRYPT_SALSA64_SSSE3
static void asm_calling_convention
scrypt_ChunkMix_ssse3(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x4,x5,x6,x7,t0,t1,t2,t3,t4,t5,t6,t7,z0,z1,z2,z3;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
x4 = xmmp[4];
x5 = xmmp[5];
x6 = xmmp[6];
x7 = xmmp[7];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
t4 = x4;
t5 = x5;
t6 = x6;
t7 = x7;
for (rounds = 8; rounds; rounds -= 2) {
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z2 = _mm_srli_epi64(z0, 64-13);
z3 = _mm_srli_epi64(z1, 64-13);
z0 = _mm_slli_epi64(z0, 13);
z1 = _mm_slli_epi64(z1, 13);
x4 = _mm_xor_si128(x4, z2);
x5 = _mm_xor_si128(x5, z3);
x4 = _mm_xor_si128(x4, z0);
x5 = _mm_xor_si128(x5, z1);
z0 = _mm_add_epi64(x4, x6);
z1 = _mm_add_epi64(x5, x7);
z2 = _mm_srli_epi64(z0, 64-39);
z3 = _mm_srli_epi64(z1, 64-39);
z0 = _mm_slli_epi64(z0, 39);
z1 = _mm_slli_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z2);
x3 = _mm_xor_si128(x3, z3);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x4);
z1 = _mm_add_epi64(x3, x5);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x2;
z1 = x3;
x2 = _mm_alignr_epi8(x6, x7, 8);
x3 = _mm_alignr_epi8(x7, x6, 8);
x6 = _mm_alignr_epi8(z1, z0, 8);
x7 = _mm_alignr_epi8(z0, z1, 8);
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z2 = _mm_srli_epi64(z0, 64-13);
z3 = _mm_srli_epi64(z1, 64-13);
z0 = _mm_slli_epi64(z0, 13);
z1 = _mm_slli_epi64(z1, 13);
x5 = _mm_xor_si128(x5, z2);
x4 = _mm_xor_si128(x4, z3);
x5 = _mm_xor_si128(x5, z0);
x4 = _mm_xor_si128(x4, z1);
z0 = _mm_add_epi64(x5, x6);
z1 = _mm_add_epi64(x4, x7);
z2 = _mm_srli_epi64(z0, 64-39);
z3 = _mm_srli_epi64(z1, 64-39);
z0 = _mm_slli_epi64(z0, 39);
z1 = _mm_slli_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z2);
x3 = _mm_xor_si128(x3, z3);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x5);
z1 = _mm_add_epi64(x3, x4);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x2;
z1 = x3;
x2 = _mm_alignr_epi8(x6, x7, 8);
x3 = _mm_alignr_epi8(x7, x6, 8);
x6 = _mm_alignr_epi8(z1, z0, 8);
x7 = _mm_alignr_epi8(z0, z1, 8);
}
x0 = _mm_add_epi64(x0, t0);
x1 = _mm_add_epi64(x1, t1);
x2 = _mm_add_epi64(x2, t2);
x3 = _mm_add_epi64(x3, t3);
x4 = _mm_add_epi64(x4, t4);
x5 = _mm_add_epi64(x5, t5);
x6 = _mm_add_epi64(x6, t6);
x7 = _mm_add_epi64(x7, t7);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
xmmp[4] = x4;
xmmp[5] = x5;
xmmp[6] = x6;
xmmp[7] = x7;
}
}
#endif
#if defined(SCRYPT_SALSA64_SSSE3)
/* uses salsa64_core_tangle_sse2 */
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa64/8-SSSE3"
#undef SCRYPT_SALSA64_INCLUDED
#define SCRYPT_SALSA64_INCLUDED
#endif

View file

@ -0,0 +1,335 @@
/* x64 */
#if defined(X86_64ASM_XOP) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA64_XOP
asm_naked_fn_proto(void, scrypt_ChunkMix_xop)(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_xop)
a1(push rbp)
a2(mov rbp, rsp)
a2(and rsp, ~63)
a2(sub rsp, 128)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,7)
a2(lea r9,[rcx-128])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(vmovdqa xmm0,[rax+0])
a2(vmovdqa xmm1,[rax+16])
a2(vmovdqa xmm2,[rax+32])
a2(vmovdqa xmm3,[rax+48])
a2(vmovdqa xmm4,[rax+64])
a2(vmovdqa xmm5,[rax+80])
a2(vmovdqa xmm6,[rax+96])
a2(vmovdqa xmm7,[rax+112])
aj(jz scrypt_ChunkMix_xop_no_xor1)
a3(vpxor xmm0,xmm0,[r9+0])
a3(vpxor xmm1,xmm1,[r9+16])
a3(vpxor xmm2,xmm2,[r9+32])
a3(vpxor xmm3,xmm3,[r9+48])
a3(vpxor xmm4,xmm4,[r9+64])
a3(vpxor xmm5,xmm5,[r9+80])
a3(vpxor xmm6,xmm6,[r9+96])
a3(vpxor xmm7,xmm7,[r9+112])
a1(scrypt_ChunkMix_xop_no_xor1:)
a2(xor r9,r9)
a2(xor r8,r8)
a1(scrypt_ChunkMix_xop_loop:)
a2(and rdx, rdx)
a3(vpxor xmm0,xmm0,[rsi+r9+0])
a3(vpxor xmm1,xmm1,[rsi+r9+16])
a3(vpxor xmm2,xmm2,[rsi+r9+32])
a3(vpxor xmm3,xmm3,[rsi+r9+48])
a3(vpxor xmm4,xmm4,[rsi+r9+64])
a3(vpxor xmm5,xmm5,[rsi+r9+80])
a3(vpxor xmm6,xmm6,[rsi+r9+96])
a3(vpxor xmm7,xmm7,[rsi+r9+112])
aj(jz scrypt_ChunkMix_xop_no_xor2)
a3(vpxor xmm0,xmm0,[rdx+r9+0])
a3(vpxor xmm1,xmm1,[rdx+r9+16])
a3(vpxor xmm2,xmm2,[rdx+r9+32])
a3(vpxor xmm3,xmm3,[rdx+r9+48])
a3(vpxor xmm4,xmm4,[rdx+r9+64])
a3(vpxor xmm5,xmm5,[rdx+r9+80])
a3(vpxor xmm6,xmm6,[rdx+r9+96])
a3(vpxor xmm7,xmm7,[rdx+r9+112])
a1(scrypt_ChunkMix_xop_no_xor2:)
a2(vmovdqa [rsp+0],xmm0)
a2(vmovdqa [rsp+16],xmm1)
a2(vmovdqa [rsp+32],xmm2)
a2(vmovdqa [rsp+48],xmm3)
a2(vmovdqa [rsp+64],xmm4)
a2(vmovdqa [rsp+80],xmm5)
a2(vmovdqa [rsp+96],xmm6)
a2(vmovdqa [rsp+112],xmm7)
a2(mov rax,8)
a1(scrypt_salsa64_xop_loop: )
a3(vpaddq xmm8, xmm0, xmm2)
a3(vpaddq xmm9, xmm1, xmm3)
a3(vpshufd xmm8, xmm8, 0xb1)
a3(vpshufd xmm9, xmm9, 0xb1)
a3(vpxor xmm6, xmm6, xmm8)
a3(vpxor xmm7, xmm7, xmm9)
a3(vpaddq xmm10, xmm0, xmm6)
a3(vpaddq xmm11, xmm1, xmm7)
a3(vprotq xmm10, xmm10, 13)
a3(vprotq xmm11, xmm11, 13)
a3(vpxor xmm4, xmm4, xmm10)
a3(vpxor xmm5, xmm5, xmm11)
a3(vpaddq xmm8, xmm6, xmm4)
a3(vpaddq xmm9, xmm7, xmm5)
a3(vprotq xmm8, xmm8, 39)
a3(vprotq xmm9, xmm9, 39)
a3(vpxor xmm2, xmm2, xmm8)
a3(vpxor xmm3, xmm3, xmm9)
a3(vpaddq xmm10, xmm4, xmm2)
a3(vpaddq xmm11, xmm5, xmm3)
a3(vpshufd xmm10, xmm10, 0xb1)
a3(vpshufd xmm11, xmm11, 0xb1)
a3(vpxor xmm0, xmm0, xmm10)
a3(vpxor xmm1, xmm1, xmm11)
a2(vmovdqa xmm8, xmm2)
a2(vmovdqa xmm9, xmm3)
a4(vpalignr xmm2, xmm6, xmm7, 8)
a4(vpalignr xmm3, xmm7, xmm6, 8)
a4(vpalignr xmm6, xmm9, xmm8, 8)
a4(vpalignr xmm7, xmm8, xmm9, 8)
a3(vpaddq xmm10, xmm0, xmm2)
a3(vpaddq xmm11, xmm1, xmm3)
a3(vpshufd xmm10, xmm10, 0xb1)
a3(vpshufd xmm11, xmm11, 0xb1)
a3(vpxor xmm6, xmm6, xmm10)
a3(vpxor xmm7, xmm7, xmm11)
a3(vpaddq xmm8, xmm0, xmm6)
a3(vpaddq xmm9, xmm1, xmm7)
a3(vprotq xmm8, xmm8, 13)
a3(vprotq xmm9, xmm9, 13)
a3(vpxor xmm5, xmm5, xmm8)
a3(vpxor xmm4, xmm4, xmm9)
a3(vpaddq xmm10, xmm6, xmm5)
a3(vpaddq xmm11, xmm7, xmm4)
a3(vprotq xmm10, xmm10, 39)
a3(vprotq xmm11, xmm11, 39)
a3(vpxor xmm2, xmm2, xmm10)
a3(vpxor xmm3, xmm3, xmm11)
a3(vpaddq xmm8, xmm5, xmm2)
a3(vpaddq xmm9, xmm4, xmm3)
a3(vpshufd xmm8, xmm8, 0xb1)
a3(vpshufd xmm9, xmm9, 0xb1)
a3(vpxor xmm0, xmm0, xmm8)
a3(vpxor xmm1, xmm1, xmm9)
a2(vmovdqa xmm10, xmm2)
a2(vmovdqa xmm11, xmm3)
a4(vpalignr xmm2, xmm6, xmm7, 8)
a4(vpalignr xmm3, xmm7, xmm6, 8)
a4(vpalignr xmm6, xmm11, xmm10, 8)
a4(vpalignr xmm7, xmm10, xmm11, 8)
a2(sub rax, 2)
aj(ja scrypt_salsa64_xop_loop)
a3(vpaddq xmm0,xmm0,[rsp+0])
a3(vpaddq xmm1,xmm1,[rsp+16])
a3(vpaddq xmm2,xmm2,[rsp+32])
a3(vpaddq xmm3,xmm3,[rsp+48])
a3(vpaddq xmm4,xmm4,[rsp+64])
a3(vpaddq xmm5,xmm5,[rsp+80])
a3(vpaddq xmm6,xmm6,[rsp+96])
a3(vpaddq xmm7,xmm7,[rsp+112])
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0xff)
a2(add r9,128)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(vmovdqa [rax+0],xmm0)
a2(vmovdqa [rax+16],xmm1)
a2(vmovdqa [rax+32],xmm2)
a2(vmovdqa [rax+48],xmm3)
a2(vmovdqa [rax+64],xmm4)
a2(vmovdqa [rax+80],xmm5)
a2(vmovdqa [rax+96],xmm6)
a2(vmovdqa [rax+112],xmm7)
aj(jne scrypt_ChunkMix_xop_loop)
a2(mov rsp, rbp)
a1(pop rbp)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_xop)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_XOP) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED))
#define SCRYPT_SALSA64_XOP
static void asm_calling_convention
scrypt_ChunkMix_xop(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x4,x5,x6,x7,t0,t1,t2,t3,t4,t5,t6,t7,z0,z1,z2,z3;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
x4 = xmmp[4];
x5 = xmmp[5];
x6 = xmmp[6];
x7 = xmmp[7];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
t4 = x4;
t5 = x5;
t6 = x6;
t7 = x7;
for (rounds = 8; rounds; rounds -= 2) {
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z0 = _mm_roti_epi64(z0, 13);
z1 = _mm_roti_epi64(z1, 13);
x4 = _mm_xor_si128(x4, z0);
x5 = _mm_xor_si128(x5, z1);
z0 = _mm_add_epi64(x4, x6);
z1 = _mm_add_epi64(x5, x7);
z0 = _mm_roti_epi64(z0, 39);
z1 = _mm_roti_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x4);
z1 = _mm_add_epi64(x3, x5);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x2;
z1 = x3;
x2 = _mm_alignr_epi8(x6, x7, 8);
x3 = _mm_alignr_epi8(x7, x6, 8);
x6 = _mm_alignr_epi8(z1, z0, 8);
x7 = _mm_alignr_epi8(z0, z1, 8);
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z0 = _mm_roti_epi64(z0, 13);
z1 = _mm_roti_epi64(z1, 13);
x5 = _mm_xor_si128(x5, z0);
x4 = _mm_xor_si128(x4, z1);
z0 = _mm_add_epi64(x5, x6);
z1 = _mm_add_epi64(x4, x7);
z0 = _mm_roti_epi64(z0, 39);
z1 = _mm_roti_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x5);
z1 = _mm_add_epi64(x3, x4);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x2;
z1 = x3;
x2 = _mm_alignr_epi8(x6, x7, 8);
x3 = _mm_alignr_epi8(x7, x6, 8);
x6 = _mm_alignr_epi8(z1, z0, 8);
x7 = _mm_alignr_epi8(z0, z1, 8);
}
x0 = _mm_add_epi64(x0, t0);
x1 = _mm_add_epi64(x1, t1);
x2 = _mm_add_epi64(x2, t2);
x3 = _mm_add_epi64(x3, t3);
x4 = _mm_add_epi64(x4, t4);
x5 = _mm_add_epi64(x5, t5);
x6 = _mm_add_epi64(x6, t6);
x7 = _mm_add_epi64(x7, t7);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
xmmp[4] = x4;
xmmp[5] = x5;
xmmp[6] = x6;
xmmp[7] = x7;
}
}
#endif
#if defined(SCRYPT_SALSA64_XOP)
/* uses salsa64_core_tangle_sse2 */
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa64/8-XOP"
#undef SCRYPT_SALSA64_INCLUDED
#define SCRYPT_SALSA64_INCLUDED
#endif

View file

@ -0,0 +1,41 @@
#if !defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED)
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa64/8 Ref"
#undef SCRYPT_SALSA64_INCLUDED
#define SCRYPT_SALSA64_INCLUDED
#define SCRYPT_SALSA64_BASIC
static void
salsa64_core_basic(uint64_t state[16]) {
const size_t rounds = 8;
uint64_t v[16], t;
size_t i;
for (i = 0; i < 16; i++) v[i] = state[i];
#define G(a,b,c,d) \
t = v[a]+v[d]; t = ROTL64(t, 32); v[b] ^= t; \
t = v[b]+v[a]; t = ROTL64(t, 13); v[c] ^= t; \
t = v[c]+v[b]; t = ROTL64(t, 39); v[d] ^= t; \
t = v[d]+v[c]; t = ROTL64(t, 32); v[a] ^= t; \
for (i = 0; i < rounds; i += 2) {
G( 0, 4, 8,12);
G( 5, 9,13, 1);
G(10,14, 2, 6);
G(15, 3, 7,11);
G( 0, 1, 2, 3);
G( 5, 6, 7, 4);
G(10,11, 8, 9);
G(15,12,13,14);
}
for (i = 0; i < 16; i++) state[i] += v[i];
#undef G
}
#endif

View file

@ -0,0 +1,112 @@
typedef struct scrypt_hmac_state_t {
scrypt_hash_state inner, outer;
} scrypt_hmac_state;
static void
scrypt_hash(scrypt_hash_digest hash, const uint8_t *m, size_t mlen) {
scrypt_hash_state st;
scrypt_hash_init(&st);
scrypt_hash_update(&st, m, mlen);
scrypt_hash_finish(&st, hash);
}
/* hmac */
static void
scrypt_hmac_init(scrypt_hmac_state *st, const uint8_t *key, size_t keylen) {
uint8_t pad[SCRYPT_HASH_BLOCK_SIZE] = {0};
size_t i;
scrypt_hash_init(&st->inner);
scrypt_hash_init(&st->outer);
if (keylen <= SCRYPT_HASH_BLOCK_SIZE) {
/* use the key directly if it's <= blocksize bytes */
memcpy(pad, key, keylen);
} else {
/* if it's > blocksize bytes, hash it */
scrypt_hash(pad, key, keylen);
}
/* inner = (key ^ 0x36) */
/* h(inner || ...) */
for (i = 0; i < SCRYPT_HASH_BLOCK_SIZE; i++)
pad[i] ^= 0x36;
scrypt_hash_update(&st->inner, pad, SCRYPT_HASH_BLOCK_SIZE);
/* outer = (key ^ 0x5c) */
/* h(outer || ...) */
for (i = 0; i < SCRYPT_HASH_BLOCK_SIZE; i++)
pad[i] ^= (0x5c ^ 0x36);
scrypt_hash_update(&st->outer, pad, SCRYPT_HASH_BLOCK_SIZE);
scrypt_ensure_zero(pad, sizeof(pad));
}
static void
scrypt_hmac_update(scrypt_hmac_state *st, const uint8_t *m, size_t mlen) {
/* h(inner || m...) */
scrypt_hash_update(&st->inner, m, mlen);
}
static void
scrypt_hmac_finish(scrypt_hmac_state *st, scrypt_hash_digest mac) {
/* h(inner || m) */
scrypt_hash_digest innerhash;
scrypt_hash_finish(&st->inner, innerhash);
/* h(outer || h(inner || m)) */
scrypt_hash_update(&st->outer, innerhash, sizeof(innerhash));
scrypt_hash_finish(&st->outer, mac);
scrypt_ensure_zero(st, sizeof(*st));
}
static void
scrypt_pbkdf2(const uint8_t *password, size_t password_len, const uint8_t *salt, size_t salt_len, uint64_t N, uint8_t *out, size_t bytes) {
scrypt_hmac_state hmac_pw, hmac_pw_salt, work;
scrypt_hash_digest ti, u;
uint8_t be[4];
uint32_t i, j, blocks;
uint64_t c;
/* bytes must be <= (0xffffffff - (SCRYPT_HASH_DIGEST_SIZE - 1)), which they will always be under scrypt */
/* hmac(password, ...) */
scrypt_hmac_init(&hmac_pw, password, password_len);
/* hmac(password, salt...) */
hmac_pw_salt = hmac_pw;
scrypt_hmac_update(&hmac_pw_salt, salt, salt_len);
blocks = ((uint32_t)bytes + (SCRYPT_HASH_DIGEST_SIZE - 1)) / SCRYPT_HASH_DIGEST_SIZE;
for (i = 1; i <= blocks; i++) {
/* U1 = hmac(password, salt || be(i)) */
U32TO8_BE(be, i);
work = hmac_pw_salt;
scrypt_hmac_update(&work, be, 4);
scrypt_hmac_finish(&work, ti);
memcpy(u, ti, sizeof(u));
/* T[i] = U1 ^ U2 ^ U3... */
for (c = 0; c < N - 1; c++) {
/* UX = hmac(password, U{X-1}) */
work = hmac_pw;
scrypt_hmac_update(&work, u, SCRYPT_HASH_DIGEST_SIZE);
scrypt_hmac_finish(&work, u);
/* T[i] ^= UX */
for (j = 0; j < sizeof(u); j++)
ti[j] ^= u[j];
}
memcpy(out, ti, (bytes > SCRYPT_HASH_DIGEST_SIZE) ? SCRYPT_HASH_DIGEST_SIZE : bytes);
out += SCRYPT_HASH_DIGEST_SIZE;
bytes -= SCRYPT_HASH_DIGEST_SIZE;
}
scrypt_ensure_zero(ti, sizeof(ti));
scrypt_ensure_zero(u, sizeof(u));
scrypt_ensure_zero(&hmac_pw, sizeof(hmac_pw));
scrypt_ensure_zero(&hmac_pw_salt, sizeof(hmac_pw_salt));
}

View file

@ -0,0 +1,462 @@
#if defined(CPU_X86) && (defined(COMPILER_MSVC) || defined(COMPILER_GCC))
#define X86ASM
/* gcc 2.95 royally screws up stack alignments on variables */
#if ((defined(COMPILER_MSVC) && (COMPILER_MSVC >= COMPILER_MSVC_VS6PP)) || (defined(COMPILER_GCC) && (COMPILER_GCC >= 30000)))
#define X86ASM_SSE
#define X86ASM_SSE2
#endif
#if ((defined(COMPILER_MSVC) && (COMPILER_MSVC >= COMPILER_MSVC_VS2005)) || (defined(COMPILER_GCC) && (COMPILER_GCC >= 40102)))
#define X86ASM_SSSE3
#endif
#if ((defined(COMPILER_MSVC) && (COMPILER_MSVC >= COMPILER_MSVC_VS2010SP1)) || (defined(COMPILER_GCC) && (COMPILER_GCC >= 40400)))
#define X86ASM_AVX
#define X86ASM_XOP
#endif
#if ((defined(COMPILER_MSVC) && (COMPILER_MSVC >= COMPILER_MSVC_VS2012)) || (defined(COMPILER_GCC) && (COMPILER_GCC >= 40700)))
#define X86ASM_AVX2
#endif
#endif
#if defined(CPU_X86_64) && defined(COMPILER_GCC)
#define X86_64ASM
#define X86_64ASM_SSE2
#if (COMPILER_GCC >= 40102)
#define X86_64ASM_SSSE3
#endif
#if (COMPILER_GCC >= 40400)
#define X86_64ASM_AVX
#define X86_64ASM_XOP
#endif
#if (COMPILER_GCC >= 40700)
#define X86_64ASM_AVX2
#endif
#endif
#if defined(COMPILER_MSVC) && (defined(CPU_X86_FORCE_INTRINSICS) || defined(CPU_X86_64))
#define X86_INTRINSIC
#if defined(CPU_X86_64) || defined(X86ASM_SSE)
#define X86_INTRINSIC_SSE
#endif
#if defined(CPU_X86_64) || defined(X86ASM_SSE2)
#define X86_INTRINSIC_SSE2
#endif
#if (COMPILER_MSVC >= COMPILER_MSVC_VS2005)
#define X86_INTRINSIC_SSSE3
#endif
#if (COMPILER_MSVC >= COMPILER_MSVC_VS2010SP1)
#define X86_INTRINSIC_AVX
#define X86_INTRINSIC_XOP
#endif
#if (COMPILER_MSVC >= COMPILER_MSVC_VS2012)
#define X86_INTRINSIC_AVX2
#endif
#endif
#if defined(COMPILER_GCC) && defined(CPU_X86_FORCE_INTRINSICS)
#define X86_INTRINSIC
#if defined(__SSE__)
#define X86_INTRINSIC_SSE
#endif
#if defined(__SSE2__)
#define X86_INTRINSIC_SSE2
#endif
#if defined(__SSSE3__)
#define X86_INTRINSIC_SSSE3
#endif
#if defined(__AVX__)
#define X86_INTRINSIC_AVX
#endif
#if defined(__XOP__)
#define X86_INTRINSIC_XOP
#endif
#if defined(__AVX2__)
#define X86_INTRINSIC_AVX2
#endif
#endif
/* only use simd on windows (or SSE2 on gcc)! */
#if defined(CPU_X86_FORCE_INTRINSICS) || defined(X86_INTRINSIC)
#if defined(X86_INTRINSIC_SSE)
#include <mmintrin.h>
#include <xmmintrin.h>
typedef __m64 qmm;
typedef __m128 xmm;
typedef __m128d xmmd;
#endif
#if defined(X86_INTRINSIC_SSE2)
#include <emmintrin.h>
typedef __m128i xmmi;
#endif
#if defined(X86_INTRINSIC_SSSE3)
#include <tmmintrin.h>
#endif
#if defined(X86_INTRINSIC_AVX)
#include <immintrin.h>
#endif
#if defined(X86_INTRINSIC_XOP)
#if defined(COMPILER_MSVC)
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#endif
#if defined(X86_INTRINSIC_AVX2)
typedef __m256i ymmi;
#endif
#endif
#if defined(X86_INTRINSIC_SSE2)
typedef union packedelem8_t {
uint8_t u[16];
xmmi v;
} packedelem8;
typedef union packedelem32_t {
uint32_t u[4];
xmmi v;
} packedelem32;
typedef union packedelem64_t {
uint64_t u[2];
xmmi v;
} packedelem64;
#else
typedef union packedelem8_t {
uint8_t u[16];
uint32_t dw[4];
} packedelem8;
typedef union packedelem32_t {
uint32_t u[4];
uint8_t b[16];
} packedelem32;
typedef union packedelem64_t {
uint64_t u[2];
uint8_t b[16];
} packedelem64;
#endif
#if defined(X86_INTRINSIC_SSSE3)
static const packedelem8 ALIGN(16) ssse3_rotl16_32bit = {{2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13}};
static const packedelem8 ALIGN(16) ssse3_rotl8_32bit = {{3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14}};
#endif
/*
x86 inline asm for gcc/msvc. usage:
asm_naked_fn_proto(return_type, name) (type parm1, type parm2..)
asm_naked_fn(name)
a1(..)
a2(.., ..)
a3(.., .., ..)
64bit OR 0 paramters: a1(ret)
32bit AND n parameters: aret(4n), eg aret(16) for 4 parameters
asm_naked_fn_end(name)
*/
#if defined(X86ASM) || defined(X86_64ASM)
#if defined(COMPILER_MSVC)
#pragma warning(disable : 4731) /* frame pointer modified by inline assembly */
#define a1(x) __asm {x}
#define a2(x, y) __asm {x, y}
#define a3(x, y, z) __asm {x, y, z}
#define a4(x, y, z, w) __asm {x, y, z, w}
#define aj(x) __asm {x}
#define asm_align8 a1(ALIGN 8)
#define asm_align16 a1(ALIGN 16)
#define asm_calling_convention STDCALL
#define aret(n) a1(ret n)
#define asm_naked_fn_proto(type, fn) static NAKED type asm_calling_convention fn
#define asm_naked_fn(fn) {
#define asm_naked_fn_end(fn) }
#elif defined(COMPILER_GCC)
#define GNU_AS1(x) #x ";\n"
#define GNU_AS2(x, y) #x ", " #y ";\n"
#define GNU_AS3(x, y, z) #x ", " #y ", " #z ";\n"
#define GNU_AS4(x, y, z, w) #x ", " #y ", " #z ", " #w ";\n"
#define GNU_ASFN(x) "\n_" #x ":\n" #x ":\n"
#define GNU_ASJ(x) ".att_syntax prefix\n" #x "\n.intel_syntax noprefix\n"
#define a1(x) GNU_AS1(x)
#define a2(x, y) GNU_AS2(x, y)
#define a3(x, y, z) GNU_AS3(x, y, z)
#define a4(x, y, z, w) GNU_AS4(x, y, z, w)
#define aj(x) GNU_ASJ(x)
#define asm_align8 ".p2align 3,,7"
#define asm_align16 ".p2align 4,,15"
#if defined(OS_WINDOWS)
#define asm_calling_convention CDECL
#define aret(n) a1(ret)
#if defined(X86_64ASM)
#define asm_naked_fn(fn) ; __asm__ ( \
".text\n" \
asm_align16 GNU_ASFN(fn) \
"subq $136, %rsp;" \
"movdqa %xmm6, 0(%rsp);" \
"movdqa %xmm7, 16(%rsp);" \
"movdqa %xmm8, 32(%rsp);" \
"movdqa %xmm9, 48(%rsp);" \
"movdqa %xmm10, 64(%rsp);" \
"movdqa %xmm11, 80(%rsp);" \
"movdqa %xmm12, 96(%rsp);" \
"movq %rdi, 112(%rsp);" \
"movq %rsi, 120(%rsp);" \
"movq %rcx, %rdi;" \
"movq %rdx, %rsi;" \
"movq %r8, %rdx;" \
"movq %r9, %rcx;" \
"call 1f;" \
"movdqa 0(%rsp), %xmm6;" \
"movdqa 16(%rsp), %xmm7;" \
"movdqa 32(%rsp), %xmm8;" \
"movdqa 48(%rsp), %xmm9;" \
"movdqa 64(%rsp), %xmm10;" \
"movdqa 80(%rsp), %xmm11;" \
"movdqa 96(%rsp), %xmm12;" \
"movq 112(%rsp), %rdi;" \
"movq 120(%rsp), %rsi;" \
"addq $136, %rsp;" \
"ret;" \
".intel_syntax noprefix;" \
".p2align 4,,15;" \
"1:;"
#else
#define asm_naked_fn(fn) ; __asm__ (".intel_syntax noprefix;\n.text\n" asm_align16 GNU_ASFN(fn)
#endif
#else
#define asm_calling_convention STDCALL
#define aret(n) a1(ret n)
#define asm_naked_fn(fn) ; __asm__ (".intel_syntax noprefix;\n.text\n" asm_align16 GNU_ASFN(fn)
#endif
#define asm_naked_fn_proto(type, fn) extern type asm_calling_convention fn
#define asm_naked_fn_end(fn) ".att_syntax prefix;\n" );
#define asm_gcc() __asm__ __volatile__(".intel_syntax noprefix;\n"
#define asm_gcc_parms() ".att_syntax prefix;"
#define asm_gcc_trashed() __asm__ __volatile__("" :::
#define asm_gcc_end() );
#else
need x86 asm
#endif
#endif /* X86ASM || X86_64ASM */
#if defined(CPU_X86) || defined(CPU_X86_64)
typedef enum cpu_flags_x86_t {
cpu_mmx = 1 << 0,
cpu_sse = 1 << 1,
cpu_sse2 = 1 << 2,
cpu_sse3 = 1 << 3,
cpu_ssse3 = 1 << 4,
cpu_sse4_1 = 1 << 5,
cpu_sse4_2 = 1 << 6,
cpu_avx = 1 << 7,
cpu_xop = 1 << 8,
cpu_avx2 = 1 << 9
} cpu_flags_x86;
typedef enum cpu_vendors_x86_t {
cpu_nobody,
cpu_intel,
cpu_amd
} cpu_vendors_x86;
typedef struct x86_regs_t {
uint32_t eax, ebx, ecx, edx;
} x86_regs;
#if defined(X86ASM)
asm_naked_fn_proto(int, has_cpuid)(void)
asm_naked_fn(has_cpuid)
a1(pushfd)
a1(pop eax)
a2(mov ecx, eax)
a2(xor eax, 0x200000)
a1(push eax)
a1(popfd)
a1(pushfd)
a1(pop eax)
a2(xor eax, ecx)
a2(shr eax, 21)
a2(and eax, 1)
a1(push ecx)
a1(popfd)
a1(ret)
asm_naked_fn_end(has_cpuid)
#endif /* X86ASM */
static void NOINLINE
get_cpuid(x86_regs *regs, uint32_t flags) {
#if defined(COMPILER_MSVC)
__cpuid((int *)regs, (int)flags);
#else
#if defined(CPU_X86_64)
#define cpuid_bx rbx
#else
#define cpuid_bx ebx
#endif
asm_gcc()
a1(push cpuid_bx)
a2(xor ecx, ecx)
a1(cpuid)
a2(mov [%1 + 0], eax)
a2(mov [%1 + 4], ebx)
a2(mov [%1 + 8], ecx)
a2(mov [%1 + 12], edx)
a1(pop cpuid_bx)
asm_gcc_parms() : "+a"(flags) : "S"(regs) : "%ecx", "%edx", "cc"
asm_gcc_end()
#endif
}
#if defined(X86ASM_AVX) || defined(X86_64ASM_AVX)
static uint64_t NOINLINE
get_xgetbv(uint32_t flags) {
#if defined(COMPILER_MSVC)
return _xgetbv(flags);
#else
uint32_t lo, hi;
asm_gcc()
a1(xgetbv)
asm_gcc_parms() : "+c"(flags), "=a" (lo), "=d" (hi)
asm_gcc_end()
return ((uint64_t)lo | ((uint64_t)hi << 32));
#endif
}
#endif // AVX support
#if defined(SCRYPT_TEST_SPEED)
size_t cpu_detect_mask = (size_t)-1;
#endif
static size_t
detect_cpu(void) {
union { uint8_t s[12]; uint32_t i[3]; } vendor_string;
cpu_vendors_x86 vendor = cpu_nobody;
x86_regs regs;
uint32_t max_level, max_ext_level;
size_t cpu_flags = 0;
#if defined(X86ASM_AVX) || defined(X86_64ASM_AVX)
uint64_t xgetbv_flags;
#endif
#if defined(CPU_X86)
if (!has_cpuid())
return cpu_flags;
#endif
get_cpuid(&regs, 0);
max_level = regs.eax;
vendor_string.i[0] = regs.ebx;
vendor_string.i[1] = regs.edx;
vendor_string.i[2] = regs.ecx;
if (scrypt_verify(vendor_string.s, (const uint8_t *)"GenuineIntel", 12))
vendor = cpu_intel;
else if (scrypt_verify(vendor_string.s, (const uint8_t *)"AuthenticAMD", 12))
vendor = cpu_amd;
if (max_level & 0x00000500) {
/* "Intel P5 pre-B0" */
cpu_flags |= cpu_mmx;
return cpu_flags;
}
if (max_level < 1)
return cpu_flags;
get_cpuid(&regs, 1);
#if defined(X86ASM_AVX) || defined(X86_64ASM_AVX)
/* xsave/xrestore */
if (regs.ecx & (1 << 27)) {
xgetbv_flags = get_xgetbv(0);
if ((regs.ecx & (1 << 28)) && (xgetbv_flags & 0x6)) cpu_flags |= cpu_avx;
}
#endif
if (regs.ecx & (1 << 20)) cpu_flags |= cpu_sse4_2;
if (regs.ecx & (1 << 19)) cpu_flags |= cpu_sse4_2;
if (regs.ecx & (1 << 9)) cpu_flags |= cpu_ssse3;
if (regs.ecx & (1 )) cpu_flags |= cpu_sse3;
if (regs.edx & (1 << 26)) cpu_flags |= cpu_sse2;
if (regs.edx & (1 << 25)) cpu_flags |= cpu_sse;
if (regs.edx & (1 << 23)) cpu_flags |= cpu_mmx;
if (cpu_flags & cpu_avx) {
if (max_level >= 7) {
get_cpuid(&regs, 7);
if (regs.ebx & (1 << 5)) cpu_flags |= cpu_avx2;
}
get_cpuid(&regs, 0x80000000);
max_ext_level = regs.eax;
if (max_ext_level >= 0x80000001) {
get_cpuid(&regs, 0x80000001);
if (regs.ecx & (1 << 11)) cpu_flags |= cpu_xop;
}
}
#if defined(SCRYPT_TEST_SPEED)
cpu_flags &= cpu_detect_mask;
#endif
return cpu_flags;
}
#if defined(SCRYPT_TEST_SPEED)
static const char *
get_top_cpuflag_desc(size_t flag) {
if (flag & cpu_avx2) return "AVX2";
else if (flag & cpu_xop) return "XOP";
else if (flag & cpu_avx) return "AVX";
else if (flag & cpu_sse4_2) return "SSE4.2";
else if (flag & cpu_sse4_1) return "SSE4.1";
else if (flag & cpu_ssse3) return "SSSE3";
else if (flag & cpu_sse2) return "SSE2";
else if (flag & cpu_sse) return "SSE";
else if (flag & cpu_mmx) return "MMX";
else return "Basic";
}
#endif
/* enable the highest system-wide option */
#if defined(SCRYPT_CHOOSE_COMPILETIME)
#if !defined(__AVX2__)
#undef X86_64ASM_AVX2
#undef X86ASM_AVX2
#undef X86_INTRINSIC_AVX2
#endif
#if !defined(__XOP__)
#undef X86_64ASM_XOP
#undef X86ASM_XOP
#undef X86_INTRINSIC_XOP
#endif
#if !defined(__AVX__)
#undef X86_64ASM_AVX
#undef X86ASM_AVX
#undef X86_INTRINSIC_AVX
#endif
#if !defined(__SSSE3__)
#undef X86_64ASM_SSSE3
#undef X86ASM_SSSE3
#undef X86_INTRINSIC_SSSE3
#endif
#if !defined(__SSE2__)
#undef X86_64ASM_SSE2
#undef X86ASM_SSE2
#undef X86_INTRINSIC_SSE2
#endif
#endif
#endif /* defined(CPU_X86) || defined(CPU_X86_64) */

View file

@ -0,0 +1,307 @@
/* determine os */
#if defined(_WIN32) || defined(_WIN64) || defined(__TOS_WIN__) || defined(__WINDOWS__)
#include <windows.h>
#include <wincrypt.h>
#define OS_WINDOWS
#elif defined(sun) || defined(__sun) || defined(__SVR4) || defined(__svr4__)
#include <sys/mman.h>
#include <sys/time.h>
#include <fcntl.h>
#define OS_SOLARIS
#else
#include <sys/mman.h>
#include <sys/time.h>
#include <sys/param.h> /* need this to define BSD */
#include <unistd.h>
#include <fcntl.h>
#define OS_NIX
#if defined(__linux__)
#include <endian.h>
#define OS_LINUX
#elif defined(BSD)
#define OS_BSD
#if defined(MACOS_X) || (defined(__APPLE__) & defined(__MACH__))
#define OS_OSX
#elif defined(macintosh) || defined(Macintosh)
#define OS_MAC
#elif defined(__OpenBSD__)
#define OS_OPENBSD
#endif
#endif
#endif
/* determine compiler */
#if defined(_MSC_VER)
#define COMPILER_MSVC_VS6 120000000
#define COMPILER_MSVC_VS6PP 121000000
#define COMPILER_MSVC_VS2002 130000000
#define COMPILER_MSVC_VS2003 131000000
#define COMPILER_MSVC_VS2005 140050727
#define COMPILER_MSVC_VS2008 150000000
#define COMPILER_MSVC_VS2008SP1 150030729
#define COMPILER_MSVC_VS2010 160000000
#define COMPILER_MSVC_VS2010SP1 160040219
#define COMPILER_MSVC_VS2012RC 170000000
#define COMPILER_MSVC_VS2012 170050727
#if _MSC_FULL_VER > 100000000
#define COMPILER_MSVC (_MSC_FULL_VER)
#else
#define COMPILER_MSVC (_MSC_FULL_VER * 10)
#endif
#if ((_MSC_VER == 1200) && defined(_mm_free))
#undef COMPILER_MSVC
#define COMPILER_MSVC COMPILER_MSVC_VS6PP
#endif
#pragma warning(disable : 4127) /* conditional expression is constant */
#pragma warning(disable : 4100) /* unreferenced formal parameter */
#define _CRT_SECURE_NO_WARNINGS
#include <float.h>
#include <stdlib.h> /* _rotl */
#include <intrin.h>
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
typedef signed int int32_t;
typedef unsigned __int64 uint64_t;
typedef signed __int64 int64_t;
#define ROTL32(a,b) _rotl(a,b)
#define ROTR32(a,b) _rotr(a,b)
#define ROTL64(a,b) _rotl64(a,b)
#define ROTR64(a,b) _rotr64(a,b)
#undef NOINLINE
#define NOINLINE __declspec(noinline)
#undef NORETURN
#define NORETURN
#undef INLINE
#define INLINE __forceinline
#undef FASTCALL
#define FASTCALL __fastcall
#undef CDECL
#define CDECL __cdecl
#undef STDCALL
#define STDCALL __stdcall
#undef NAKED
#define NAKED __declspec(naked)
#define ALIGN(n) __declspec(align(n))
#endif
#if defined(__ICC)
#define COMPILER_INTEL
#endif
#if defined(__GNUC__)
#if (__GNUC__ >= 3)
#define COMPILER_GCC_PATCHLEVEL __GNUC_PATCHLEVEL__
#else
#define COMPILER_GCC_PATCHLEVEL 0
#endif
#define COMPILER_GCC (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + COMPILER_GCC_PATCHLEVEL)
#define ROTL32(a,b) (((a) << (b)) | ((a) >> (32 - b)))
#define ROTR32(a,b) (((a) >> (b)) | ((a) << (32 - b)))
#define ROTL64(a,b) (((a) << (b)) | ((a) >> (64 - b)))
#define ROTR64(a,b) (((a) >> (b)) | ((a) << (64 - b)))
#undef NOINLINE
#if (COMPILER_GCC >= 30000)
#define NOINLINE __attribute__((noinline))
#else
#define NOINLINE
#endif
#undef NORETURN
#if (COMPILER_GCC >= 30000)
#define NORETURN __attribute__((noreturn))
#else
#define NORETURN
#endif
#undef INLINE
#if (COMPILER_GCC >= 30000)
#define INLINE __attribute__((always_inline))
#else
#define INLINE inline
#endif
#undef FASTCALL
#if (COMPILER_GCC >= 30400)
#define FASTCALL __attribute__((fastcall))
#else
#define FASTCALL
#endif
#undef CDECL
#define CDECL __attribute__((cdecl))
#undef STDCALL
#define STDCALL __attribute__((stdcall))
#define ALIGN(n) __attribute__((aligned(n)))
#include <stdint.h>
#endif
#if defined(__MINGW32__) || defined(__MINGW64__)
#define COMPILER_MINGW
#endif
#if defined(__PATHCC__)
#define COMPILER_PATHCC
#endif
#define OPTIONAL_INLINE
#if defined(OPTIONAL_INLINE)
#undef OPTIONAL_INLINE
#define OPTIONAL_INLINE INLINE
#else
#define OPTIONAL_INLINE
#endif
#define CRYPTO_FN NOINLINE STDCALL
/* determine cpu */
#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__ ) || defined(_M_X64)
#define CPU_X86_64
#elif defined(__i586__) || defined(__i686__) || (defined(_M_IX86) && (_M_IX86 >= 500))
#define CPU_X86 500
#elif defined(__i486__) || (defined(_M_IX86) && (_M_IX86 >= 400))
#define CPU_X86 400
#elif defined(__i386__) || (defined(_M_IX86) && (_M_IX86 >= 300)) || defined(__X86__) || defined(_X86_) || defined(__I86__)
#define CPU_X86 300
#elif defined(__ia64__) || defined(_IA64) || defined(__IA64__) || defined(_M_IA64) || defined(__ia64)
#define CPU_IA64
#endif
#if defined(__sparc__) || defined(__sparc) || defined(__sparcv9)
#define CPU_SPARC
#if defined(__sparcv9)
#define CPU_SPARC64
#endif
#endif
#if defined(CPU_X86_64) || defined(CPU_IA64) || defined(CPU_SPARC64) || defined(__64BIT__) || defined(__LP64__) || defined(_LP64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
#define CPU_64BITS
#undef FASTCALL
#define FASTCALL
#undef CDECL
#define CDECL
#undef STDCALL
#define STDCALL
#endif
#if defined(powerpc) || defined(__PPC__) || defined(__ppc__) || defined(_ARCH_PPC) || defined(__powerpc__) || defined(__powerpc) || defined(POWERPC) || defined(_M_PPC)
#define CPU_PPC
#if defined(_ARCH_PWR7)
#define CPU_POWER7
#elif defined(__64BIT__)
#define CPU_PPC64
#else
#define CPU_PPC32
#endif
#endif
#if defined(__hppa__) || defined(__hppa)
#define CPU_HPPA
#endif
#if defined(__alpha__) || defined(__alpha) || defined(_M_ALPHA)
#define CPU_ALPHA
#endif
/* endian */
#if ((defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && (__BYTE_ORDER == __LITTLE_ENDIAN)) || \
(defined(BYTE_ORDER) && defined(LITTLE_ENDIAN) && (BYTE_ORDER == LITTLE_ENDIAN)) || \
(defined(CPU_X86) || defined(CPU_X86_64)) || \
(defined(vax) || defined(MIPSEL) || defined(_MIPSEL)))
#define CPU_LE
#elif ((defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && (__BYTE_ORDER == __BIG_ENDIAN)) || \
(defined(BYTE_ORDER) && defined(BIG_ENDIAN) && (BYTE_ORDER == BIG_ENDIAN)) || \
(defined(CPU_SPARC) || defined(CPU_PPC) || defined(mc68000) || defined(sel)) || defined(_MIPSEB))
#define CPU_BE
#else
/* unknown endian! */
#endif
#define U8TO32_BE(p) \
(((uint32_t)((p)[0]) << 24) | ((uint32_t)((p)[1]) << 16) | \
((uint32_t)((p)[2]) << 8) | ((uint32_t)((p)[3]) ))
#define U8TO32_LE(p) \
(((uint32_t)((p)[0]) ) | ((uint32_t)((p)[1]) << 8) | \
((uint32_t)((p)[2]) << 16) | ((uint32_t)((p)[3]) << 24))
#define U32TO8_BE(p, v) \
(p)[0] = (uint8_t)((v) >> 24); (p)[1] = (uint8_t)((v) >> 16); \
(p)[2] = (uint8_t)((v) >> 8); (p)[3] = (uint8_t)((v) );
#define U32TO8_LE(p, v) \
(p)[0] = (uint8_t)((v) ); (p)[1] = (uint8_t)((v) >> 8); \
(p)[2] = (uint8_t)((v) >> 16); (p)[3] = (uint8_t)((v) >> 24);
#define U8TO64_BE(p) \
(((uint64_t)U8TO32_BE(p) << 32) | (uint64_t)U8TO32_BE((p) + 4))
#define U8TO64_LE(p) \
(((uint64_t)U8TO32_LE(p)) | ((uint64_t)U8TO32_LE((p) + 4) << 32))
#define U64TO8_BE(p, v) \
U32TO8_BE((p), (uint32_t)((v) >> 32)); \
U32TO8_BE((p) + 4, (uint32_t)((v) ));
#define U64TO8_LE(p, v) \
U32TO8_LE((p), (uint32_t)((v) )); \
U32TO8_LE((p) + 4, (uint32_t)((v) >> 32));
#define U32_SWAP(v) { \
(v) = (((v) << 8) & 0xFF00FF00 ) | (((v) >> 8) & 0xFF00FF ); \
(v) = ((v) << 16) | ((v) >> 16); \
}
#define U64_SWAP(v) { \
(v) = (((v) << 8) & 0xFF00FF00FF00FF00ull ) | (((v) >> 8) & 0x00FF00FF00FF00FFull ); \
(v) = (((v) << 16) & 0xFFFF0000FFFF0000ull ) | (((v) >> 16) & 0x0000FFFF0000FFFFull ); \
(v) = ((v) << 32) | ((v) >> 32); \
}
static int
scrypt_verify(const uint8_t *x, const uint8_t *y, size_t len) {
uint32_t differentbits = 0;
while (len--)
differentbits |= (*x++ ^ *y++);
return (1 & ((differentbits - 1) >> 8));
}
static void
scrypt_ensure_zero(void *p, size_t len) {
#if ((defined(CPU_X86) || defined(CPU_X86_64)) && defined(COMPILER_MSVC))
__stosb((unsigned char *)p, 0, len);
#elif (defined(CPU_X86) && defined(COMPILER_GCC))
__asm__ __volatile__(
"pushl %%edi;\n"
"pushl %%ecx;\n"
"rep stosb;\n"
"popl %%ecx;\n"
"popl %%edi;\n"
:: "a"(0), "D"(p), "c"(len) : "cc", "memory"
);
#elif (defined(CPU_X86_64) && defined(COMPILER_GCC))
__asm__ __volatile__(
"pushq %%rdi;\n"
"pushq %%rcx;\n"
"rep stosb;\n"
"popq %%rcx;\n"
"popq %%rdi;\n"
:: "a"(0), "D"(p), "c"(len) : "cc", "memory"
);
#else
volatile uint8_t *b = (volatile uint8_t *)p;
size_t i;
for (i = 0; i < len; i++)
b[i] = 0;
#endif
}
#include "scrypt-jane-portable-x86.h"
#if !defined(asm_calling_convention)
#define asm_calling_convention
#endif

View file

@ -0,0 +1,74 @@
#if !defined(SCRYPT_CHOOSE_COMPILETIME)
/* function type returned by scrypt_getROMix, used with cpu detection */
typedef void (FASTCALL *scrypt_ROMixfn)(scrypt_mix_word_t *X/*[chunkWords]*/, scrypt_mix_word_t *Y/*[chunkWords]*/, scrypt_mix_word_t *V/*[chunkWords * N]*/, uint32_t N, uint32_t r);
#endif
/* romix pre/post nop function */
static void asm_calling_convention
scrypt_romix_nop(scrypt_mix_word_t *blocks, size_t nblocks) {
(void)blocks; (void)nblocks;
}
/* romix pre/post endian conversion function */
static void asm_calling_convention
scrypt_romix_convert_endian(scrypt_mix_word_t *blocks, size_t nblocks) {
#if !defined(CPU_LE)
static const union { uint8_t b[2]; uint16_t w; } endian_test = {{1,0}};
size_t i;
if (endian_test.w == 0x100) {
nblocks *= SCRYPT_BLOCK_WORDS;
for (i = 0; i < nblocks; i++) {
SCRYPT_WORD_ENDIAN_SWAP(blocks[i]);
}
}
#else
(void)blocks; (void)nblocks;
#endif
}
/* chunkmix test function */
typedef void (asm_calling_convention *chunkmixfn)(scrypt_mix_word_t *Bout/*[chunkWords]*/, scrypt_mix_word_t *Bin/*[chunkWords]*/, scrypt_mix_word_t *Bxor/*[chunkWords]*/, uint32_t r);
typedef void (asm_calling_convention *blockfixfn)(scrypt_mix_word_t *blocks, size_t nblocks);
static int
scrypt_test_mix_instance(chunkmixfn mixfn, blockfixfn prefn, blockfixfn postfn, const uint8_t expected[16]) {
/* r = 2, (2 * r) = 4 blocks in a chunk, 4 * SCRYPT_BLOCK_WORDS total */
const uint32_t r = 2, blocks = 2 * r, words = blocks * SCRYPT_BLOCK_WORDS;
#if (defined(X86ASM_AVX2) || defined(X86_64ASM_AVX2) || defined(X86_INTRINSIC_AVX2))
scrypt_mix_word_t ALIGN(32) chunk[2][4 * SCRYPT_BLOCK_WORDS], v;
#else
scrypt_mix_word_t ALIGN(16) chunk[2][4 * SCRYPT_BLOCK_WORDS], v;
#endif
uint8_t final[16];
size_t i;
for (i = 0; i < words; i++) {
v = (scrypt_mix_word_t)i;
v = (v << 8) | v;
v = (v << 16) | v;
chunk[0][i] = v;
}
prefn(chunk[0], blocks);
mixfn(chunk[1], chunk[0], NULL, r);
postfn(chunk[1], blocks);
/* grab the last 16 bytes of the final block */
for (i = 0; i < 16; i += sizeof(scrypt_mix_word_t)) {
SCRYPT_WORDTO8_LE(final + i, chunk[1][words - (16 / sizeof(scrypt_mix_word_t)) + (i / sizeof(scrypt_mix_word_t))]);
}
return scrypt_verify(expected, final, 16);
}
/* returns a pointer to item i, where item is len scrypt_mix_word_t's long */
static scrypt_mix_word_t *
scrypt_item(scrypt_mix_word_t *base, scrypt_mix_word_t i, scrypt_mix_word_t len) {
return base + (i * len);
}
/* returns a pointer to block i */
static scrypt_mix_word_t *
scrypt_block(scrypt_mix_word_t *base, scrypt_mix_word_t i) {
return base + (i * SCRYPT_BLOCK_WORDS);
}

View file

@ -0,0 +1,122 @@
#if !defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_HAVE_ROMIX)
#if defined(SCRYPT_CHOOSE_COMPILETIME)
#undef SCRYPT_ROMIX_FN
#define SCRYPT_ROMIX_FN scrypt_ROMix
#endif
#undef SCRYPT_HAVE_ROMIX
#define SCRYPT_HAVE_ROMIX
#if !defined(SCRYPT_CHUNKMIX_FN)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_basic
/*
Bout = ChunkMix(Bin)
2*r: number of blocks in the chunk
*/
static void asm_calling_convention
SCRYPT_CHUNKMIX_FN(scrypt_mix_word_t *Bout/*[chunkWords]*/, scrypt_mix_word_t *Bin/*[chunkWords]*/, scrypt_mix_word_t *Bxor/*[chunkWords]*/, uint32_t r) {
#if (defined(X86ASM_AVX2) || defined(X86_64ASM_AVX2) || defined(X86_INTRINSIC_AVX2))
scrypt_mix_word_t ALIGN(32) X[SCRYPT_BLOCK_WORDS], *block;
#else
scrypt_mix_word_t ALIGN(16) X[SCRYPT_BLOCK_WORDS], *block;
#endif
uint32_t i, j, blocksPerChunk = r * 2, half = 0;
/* 1: X = B_{2r - 1} */
block = scrypt_block(Bin, blocksPerChunk - 1);
for (i = 0; i < SCRYPT_BLOCK_WORDS; i++)
X[i] = block[i];
if (Bxor) {
block = scrypt_block(Bxor, blocksPerChunk - 1);
for (i = 0; i < SCRYPT_BLOCK_WORDS; i++)
X[i] ^= block[i];
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
block = scrypt_block(Bin, i);
for (j = 0; j < SCRYPT_BLOCK_WORDS; j++)
X[j] ^= block[j];
if (Bxor) {
block = scrypt_block(Bxor, i);
for (j = 0; j < SCRYPT_BLOCK_WORDS; j++)
X[j] ^= block[j];
}
SCRYPT_MIX_FN(X);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
block = scrypt_block(Bout, (i / 2) + half);
for (j = 0; j < SCRYPT_BLOCK_WORDS; j++)
block[j] = X[j];
}
}
#endif
/*
X = ROMix(X)
X: chunk to mix
Y: scratch chunk
N: number of rounds
V[N]: array of chunks to randomly index in to
2*r: number of blocks in a chunk
*/
static void NOINLINE FASTCALL
SCRYPT_ROMIX_FN(scrypt_mix_word_t *X/*[chunkWords]*/, scrypt_mix_word_t *Y/*[chunkWords]*/, scrypt_mix_word_t *V/*[N * chunkWords]*/, uint32_t N, uint32_t r) {
uint32_t i, j, chunkWords = (uint32_t)(SCRYPT_BLOCK_WORDS * r * 2);
scrypt_mix_word_t *block = V;
SCRYPT_ROMIX_TANGLE_FN(X, r * 2);
/* 1: X = B */
/* implicit */
/* 2: for i = 0 to N - 1 do */
memcpy(block, X, chunkWords * sizeof(scrypt_mix_word_t));
for (i = 0; i < N - 1; i++, block += chunkWords) {
/* 3: V_i = X */
/* 4: X = H(X) */
SCRYPT_CHUNKMIX_FN(block + chunkWords, block, NULL, r);
}
SCRYPT_CHUNKMIX_FN(X, block, NULL, r);
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < N; i += 2) {
/* 7: j = Integerify(X) % N */
j = X[chunkWords - SCRYPT_BLOCK_WORDS] & (N - 1);
/* 8: X = H(Y ^ V_j) */
SCRYPT_CHUNKMIX_FN(Y, X, scrypt_item(V, j, chunkWords), r);
/* 7: j = Integerify(Y) % N */
j = Y[chunkWords - SCRYPT_BLOCK_WORDS] & (N - 1);
/* 8: X = H(Y ^ V_j) */
SCRYPT_CHUNKMIX_FN(X, Y, scrypt_item(V, j, chunkWords), r);
}
/* 10: B' = X */
/* implicit */
SCRYPT_ROMIX_UNTANGLE_FN(X, r * 2);
}
#endif /* !defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_HAVE_ROMIX) */
#undef SCRYPT_CHUNKMIX_FN
#undef SCRYPT_ROMIX_FN
#undef SCRYPT_MIX_FN
#undef SCRYPT_ROMIX_TANGLE_FN
#undef SCRYPT_ROMIX_UNTANGLE_FN

View file

@ -0,0 +1,27 @@
#if defined(SCRYPT_CHACHA)
#include "scrypt-jane-chacha.h"
#elif defined(SCRYPT_SALSA)
#include "scrypt-jane-salsa.h"
#elif defined(SCRYPT_SALSA64)
#include "scrypt-jane-salsa64.h"
#else
#define SCRYPT_MIX_BASE "ERROR"
typedef uint32_t scrypt_mix_word_t;
#define SCRYPT_WORDTO8_LE U32TO8_LE
#define SCRYPT_WORD_ENDIAN_SWAP U32_SWAP
#define SCRYPT_BLOCK_BYTES 64
#define SCRYPT_BLOCK_WORDS (SCRYPT_BLOCK_BYTES / sizeof(scrypt_mix_word_t))
#if !defined(SCRYPT_CHOOSE_COMPILETIME)
static void FASTCALL scrypt_ROMix_error(scrypt_mix_word_t *X/*[chunkWords]*/, scrypt_mix_word_t *Y/*[chunkWords]*/, scrypt_mix_word_t *V/*[chunkWords * N]*/, uint32_t N, uint32_t r) {}
static scrypt_ROMixfn scrypt_getROMix(void) { return scrypt_ROMix_error; }
#else
static void FASTCALL scrypt_ROMix(scrypt_mix_word_t *X, scrypt_mix_word_t *Y, scrypt_mix_word_t *V, uint32_t N, uint32_t r) {}
#endif
static int scrypt_test_mix(void) { return 0; }
#error must define a mix function!
#endif
#if !defined(SCRYPT_CHOOSE_COMPILETIME)
#undef SCRYPT_MIX
#define SCRYPT_MIX SCRYPT_MIX_BASE
#endif

View file

@ -0,0 +1,134 @@
#define SCRYPT_MIX_BASE "Salsa20/8"
typedef uint32_t scrypt_mix_word_t;
#define SCRYPT_WORDTO8_LE U32TO8_LE
#define SCRYPT_WORD_ENDIAN_SWAP U32_SWAP
#define SCRYPT_BLOCK_BYTES 64
#define SCRYPT_BLOCK_WORDS (SCRYPT_BLOCK_BYTES / sizeof(scrypt_mix_word_t))
/* must have these here in case block bytes is ever != 64 */
#include "scrypt-jane-romix-basic.h"
#include "scrypt-jane-mix_salsa-xop.h"
#include "scrypt-jane-mix_salsa-avx.h"
#include "scrypt-jane-mix_salsa-sse2.h"
#include "scrypt-jane-mix_salsa.h"
#if defined(SCRYPT_SALSA_XOP)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_xop
#define SCRYPT_ROMIX_FN scrypt_ROMix_xop
#define SCRYPT_ROMIX_TANGLE_FN salsa_core_tangle_sse2
#define SCRYPT_ROMIX_UNTANGLE_FN salsa_core_tangle_sse2
#include "scrypt-jane-romix-template.h"
#endif
#if defined(SCRYPT_SALSA_AVX)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_avx
#define SCRYPT_ROMIX_FN scrypt_ROMix_avx
#define SCRYPT_ROMIX_TANGLE_FN salsa_core_tangle_sse2
#define SCRYPT_ROMIX_UNTANGLE_FN salsa_core_tangle_sse2
#include "scrypt-jane-romix-template.h"
#endif
#if defined(SCRYPT_SALSA_SSE2)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_sse2
#define SCRYPT_ROMIX_FN scrypt_ROMix_sse2
#define SCRYPT_MIX_FN salsa_core_sse2
#define SCRYPT_ROMIX_TANGLE_FN salsa_core_tangle_sse2
#define SCRYPT_ROMIX_UNTANGLE_FN salsa_core_tangle_sse2
#include "scrypt-jane-romix-template.h"
#endif
/* cpu agnostic */
#define SCRYPT_ROMIX_FN scrypt_ROMix_basic
#define SCRYPT_MIX_FN salsa_core_basic
#define SCRYPT_ROMIX_TANGLE_FN scrypt_romix_convert_endian
#define SCRYPT_ROMIX_UNTANGLE_FN scrypt_romix_convert_endian
#include "scrypt-jane-romix-template.h"
#if !defined(SCRYPT_CHOOSE_COMPILETIME)
static scrypt_ROMixfn
scrypt_getROMix(void) {
size_t cpuflags = detect_cpu();
#if defined(SCRYPT_SALSA_XOP)
if (cpuflags & cpu_xop)
return scrypt_ROMix_xop;
else
#endif
#if defined(SCRYPT_SALSA_AVX)
if (cpuflags & cpu_avx)
return scrypt_ROMix_avx;
else
#endif
#if defined(SCRYPT_SALSA_SSE2)
if (cpuflags & cpu_sse2)
return scrypt_ROMix_sse2;
else
#endif
return scrypt_ROMix_basic;
}
#endif
#if defined(SCRYPT_TEST_SPEED)
static size_t
available_implementations(void) {
size_t cpuflags = detect_cpu();
size_t flags = 0;
#if defined(SCRYPT_SALSA_XOP)
if (cpuflags & cpu_xop)
flags |= cpu_xop;
#endif
#if defined(SCRYPT_SALSA_AVX)
if (cpuflags & cpu_avx)
flags |= cpu_avx;
#endif
#if defined(SCRYPT_SALSA_SSE2)
if (cpuflags & cpu_sse2)
flags |= cpu_sse2;
#endif
return flags;
}
#endif
static int
scrypt_test_mix(void) {
static const uint8_t expected[16] = {
0x41,0x1f,0x2e,0xa3,0xab,0xa3,0x1a,0x34,0x87,0x1d,0x8a,0x1c,0x76,0xa0,0x27,0x66,
};
int ret = 1;
size_t cpuflags = detect_cpu();
#if defined(SCRYPT_SALSA_XOP)
if (cpuflags & cpu_xop)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_xop, salsa_core_tangle_sse2, salsa_core_tangle_sse2, expected);
#endif
#if defined(SCRYPT_SALSA_AVX)
if (cpuflags & cpu_avx)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_avx, salsa_core_tangle_sse2, salsa_core_tangle_sse2, expected);
#endif
#if defined(SCRYPT_SALSA_SSE2)
if (cpuflags & cpu_sse2)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_sse2, salsa_core_tangle_sse2, salsa_core_tangle_sse2, expected);
#endif
#if defined(SCRYPT_SALSA_BASIC)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_basic, scrypt_romix_convert_endian, scrypt_romix_convert_endian, expected);
#endif
return ret;
}

View file

@ -0,0 +1,183 @@
#define SCRYPT_MIX_BASE "Salsa64/8"
typedef uint64_t scrypt_mix_word_t;
#define SCRYPT_WORDTO8_LE U64TO8_LE
#define SCRYPT_WORD_ENDIAN_SWAP U64_SWAP
#define SCRYPT_BLOCK_BYTES 128
#define SCRYPT_BLOCK_WORDS (SCRYPT_BLOCK_BYTES / sizeof(scrypt_mix_word_t))
/* must have these here in case block bytes is ever != 64 */
#include "scrypt-jane-romix-basic.h"
#include "scrypt-jane-mix_salsa64-avx2.h"
#include "scrypt-jane-mix_salsa64-xop.h"
#include "scrypt-jane-mix_salsa64-avx.h"
#include "scrypt-jane-mix_salsa64-ssse3.h"
#include "scrypt-jane-mix_salsa64-sse2.h"
#include "scrypt-jane-mix_salsa64.h"
#if defined(SCRYPT_SALSA64_AVX2)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_avx2
#define SCRYPT_ROMIX_FN scrypt_ROMix_avx2
#define SCRYPT_ROMIX_TANGLE_FN salsa64_core_tangle_sse2
#define SCRYPT_ROMIX_UNTANGLE_FN salsa64_core_tangle_sse2
#include "scrypt-jane-romix-template.h"
#endif
#if defined(SCRYPT_SALSA64_XOP)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_xop
#define SCRYPT_ROMIX_FN scrypt_ROMix_xop
#define SCRYPT_ROMIX_TANGLE_FN salsa64_core_tangle_sse2
#define SCRYPT_ROMIX_UNTANGLE_FN salsa64_core_tangle_sse2
#include "scrypt-jane-romix-template.h"
#endif
#if defined(SCRYPT_SALSA64_AVX)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_avx
#define SCRYPT_ROMIX_FN scrypt_ROMix_avx
#define SCRYPT_ROMIX_TANGLE_FN salsa64_core_tangle_sse2
#define SCRYPT_ROMIX_UNTANGLE_FN salsa64_core_tangle_sse2
#include "scrypt-jane-romix-template.h"
#endif
#if defined(SCRYPT_SALSA64_SSSE3)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_ssse3
#define SCRYPT_ROMIX_FN scrypt_ROMix_ssse3
#define SCRYPT_ROMIX_TANGLE_FN salsa64_core_tangle_sse2
#define SCRYPT_ROMIX_UNTANGLE_FN salsa64_core_tangle_sse2
#include "scrypt-jane-romix-template.h"
#endif
#if defined(SCRYPT_SALSA64_SSE2)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_sse2
#define SCRYPT_ROMIX_FN scrypt_ROMix_sse2
#define SCRYPT_ROMIX_TANGLE_FN salsa64_core_tangle_sse2
#define SCRYPT_ROMIX_UNTANGLE_FN salsa64_core_tangle_sse2
#include "scrypt-jane-romix-template.h"
#endif
/* cpu agnostic */
#define SCRYPT_ROMIX_FN scrypt_ROMix_basic
#define SCRYPT_MIX_FN salsa64_core_basic
#define SCRYPT_ROMIX_TANGLE_FN scrypt_romix_convert_endian
#define SCRYPT_ROMIX_UNTANGLE_FN scrypt_romix_convert_endian
#include "scrypt-jane-romix-template.h"
#if !defined(SCRYPT_CHOOSE_COMPILETIME)
static scrypt_ROMixfn
scrypt_getROMix(void) {
size_t cpuflags = detect_cpu();
#if defined(SCRYPT_SALSA64_AVX2)
if (cpuflags & cpu_avx2)
return scrypt_ROMix_avx2;
else
#endif
#if defined(SCRYPT_SALSA64_XOP)
if (cpuflags & cpu_xop)
return scrypt_ROMix_xop;
else
#endif
#if defined(SCRYPT_SALSA64_AVX)
if (cpuflags & cpu_avx)
return scrypt_ROMix_avx;
else
#endif
#if defined(SCRYPT_SALSA64_SSSE3)
if (cpuflags & cpu_ssse3)
return scrypt_ROMix_ssse3;
else
#endif
#if defined(SCRYPT_SALSA64_SSE2)
if (cpuflags & cpu_sse2)
return scrypt_ROMix_sse2;
else
#endif
return scrypt_ROMix_basic;
}
#endif
#if defined(SCRYPT_TEST_SPEED)
static size_t
available_implementations(void) {
size_t cpuflags = detect_cpu();
size_t flags = 0;
#if defined(SCRYPT_SALSA64_AVX2)
if (cpuflags & cpu_avx2)
flags |= cpu_avx2;
#endif
#if defined(SCRYPT_SALSA64_XOP)
if (cpuflags & cpu_xop)
flags |= cpu_xop;
#endif
#if defined(SCRYPT_SALSA64_AVX)
if (cpuflags & cpu_avx)
flags |= cpu_avx;
#endif
#if defined(SCRYPT_SALSA64_SSSE3)
if (cpuflags & cpu_ssse3)
flags |= cpu_ssse3;
#endif
#if defined(SCRYPT_SALSA64_SSE2)
if (cpuflags & cpu_sse2)
flags |= cpu_sse2;
#endif
return flags;
}
#endif
static int
scrypt_test_mix(void) {
static const uint8_t expected[16] = {
0xf8,0x92,0x9b,0xf8,0xcc,0x1d,0xce,0x2e,0x13,0x82,0xac,0x96,0xb2,0x6c,0xee,0x2c,
};
int ret = 1;
size_t cpuflags = detect_cpu();
#if defined(SCRYPT_SALSA64_AVX2)
if (cpuflags & cpu_avx2)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_avx2, salsa64_core_tangle_sse2, salsa64_core_tangle_sse2, expected);
#endif
#if defined(SCRYPT_SALSA64_XOP)
if (cpuflags & cpu_xop)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_xop, salsa64_core_tangle_sse2, salsa64_core_tangle_sse2, expected);
#endif
#if defined(SCRYPT_SALSA64_AVX)
if (cpuflags & cpu_avx)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_avx, salsa64_core_tangle_sse2, salsa64_core_tangle_sse2, expected);
#endif
#if defined(SCRYPT_SALSA64_SSSE3)
if (cpuflags & cpu_ssse3)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_ssse3, salsa64_core_tangle_sse2, salsa64_core_tangle_sse2, expected);
#endif
#if defined(SCRYPT_SALSA64_SSE2)
if (cpuflags & cpu_sse2)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_sse2, salsa64_core_tangle_sse2, salsa64_core_tangle_sse2, expected);
#endif
#if defined(SCRYPT_SALSA64_BASIC)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_basic, scrypt_romix_convert_endian, scrypt_romix_convert_endian, expected);
#endif
return ret;
}

View file

@ -0,0 +1,261 @@
typedef struct scrypt_test_setting_t {
const char *pw, *salt;
uint8_t Nfactor, rfactor, pfactor;
} scrypt_test_setting;
static const scrypt_test_setting post_settings[] = {
{"", "", 3, 0, 0},
{"password", "NaCl", 9, 3, 4},
{0, 0, 0, 0, 0}
};
#if defined(SCRYPT_SHA256)
#if defined(SCRYPT_SALSA)
/* sha256 + salsa20/8, the only 'official' test vectors! */
static const uint8_t post_vectors[][64] = {
{0x77,0xd6,0x57,0x62,0x38,0x65,0x7b,0x20,0x3b,0x19,0xca,0x42,0xc1,0x8a,0x04,0x97,
0xf1,0x6b,0x48,0x44,0xe3,0x07,0x4a,0xe8,0xdf,0xdf,0xfa,0x3f,0xed,0xe2,0x14,0x42,
0xfc,0xd0,0x06,0x9d,0xed,0x09,0x48,0xf8,0x32,0x6a,0x75,0x3a,0x0f,0xc8,0x1f,0x17,
0xe8,0xd3,0xe0,0xfb,0x2e,0x0d,0x36,0x28,0xcf,0x35,0xe2,0x0c,0x38,0xd1,0x89,0x06},
{0xfd,0xba,0xbe,0x1c,0x9d,0x34,0x72,0x00,0x78,0x56,0xe7,0x19,0x0d,0x01,0xe9,0xfe,
0x7c,0x6a,0xd7,0xcb,0xc8,0x23,0x78,0x30,0xe7,0x73,0x76,0x63,0x4b,0x37,0x31,0x62,
0x2e,0xaf,0x30,0xd9,0x2e,0x22,0xa3,0x88,0x6f,0xf1,0x09,0x27,0x9d,0x98,0x30,0xda,
0xc7,0x27,0xaf,0xb9,0x4a,0x83,0xee,0x6d,0x83,0x60,0xcb,0xdf,0xa2,0xcc,0x06,0x40}
};
#elif defined(SCRYPT_CHACHA)
static const uint8_t post_vectors[][64] = {
{0xef,0x8f,0x44,0x8f,0xc3,0xef,0x78,0x13,0xb2,0x26,0xa7,0x2a,0x40,0xa1,0x98,0x7f,
0xc8,0x7f,0x0d,0x5f,0x40,0x66,0xa2,0x05,0x07,0x4f,0xc7,0xac,0x3b,0x47,0x07,0x0c,
0xf5,0x20,0x46,0x76,0x20,0x7b,0xee,0x51,0x6d,0x5f,0xfa,0x9c,0x27,0xac,0xa9,0x36,
0x62,0xbd,0xde,0x0b,0xa3,0xc0,0x66,0x84,0xde,0x82,0xd0,0x1a,0xb4,0xd1,0xb5,0xfe},
{0xf1,0x94,0xf7,0x5f,0x15,0x12,0x10,0x4d,0x6e,0xfb,0x04,0x8c,0x35,0xc4,0x51,0xb6,
0x11,0x04,0xa7,0x9b,0xb0,0x46,0xaf,0x7b,0x47,0x39,0xf0,0xac,0xb2,0x8a,0xfa,0x45,
0x09,0x86,0x8f,0x10,0x4b,0xc6,0xee,0x00,0x11,0x38,0x73,0x7a,0x6a,0xd8,0x25,0x67,
0x85,0xa4,0x10,0x4e,0xa9,0x2f,0x15,0xfe,0xcf,0x63,0xe1,0xe8,0xcf,0xab,0xe8,0xbd}
};
#elif defined(SCRYPT_SALSA64)
static const uint8_t post_vectors[][64] = {
{0xf4,0x87,0x29,0xf4,0xc3,0x31,0x8c,0xe8,0xdf,0xe5,0xd8,0x73,0xff,0xca,0x32,0xcf,
0xd8,0xac,0xe7,0xf7,0x15,0xda,0x84,0x41,0x60,0x23,0x26,0x4a,0xc8,0x3e,0xee,0xa6,
0xa5,0x6e,0x52,0xd6,0x64,0x55,0x16,0x31,0x3e,0x66,0x7b,0x65,0xd5,0xe2,0xc9,0x95,
0x1b,0xf0,0x81,0x40,0xb7,0x2f,0xff,0xa6,0xe6,0x02,0xcc,0x63,0x08,0x4a,0x74,0x31},
{0x7a,0xd8,0xad,0x02,0x9c,0xa5,0xf4,0x42,0x6a,0x29,0xd2,0xb5,0x53,0xf1,0x6d,0x1d,
0x25,0xc8,0x70,0x48,0x80,0xb9,0xa3,0xf6,0x94,0xf8,0xfa,0xb8,0x52,0x42,0xcd,0x14,
0x26,0x46,0x28,0x06,0xc7,0xf6,0x1f,0xa7,0x89,0x6d,0xc5,0xa0,0x36,0xcc,0xde,0xcb,
0x73,0x0b,0xa4,0xe2,0xd3,0xd1,0x44,0x06,0x35,0x08,0xe0,0x35,0x5b,0xf8,0xd7,0xe7}
};
#endif
#elif defined(SCRYPT_SHA512)
#if defined(SCRYPT_SALSA)
static const uint8_t post_vectors[][64] = {
{0xae,0x54,0xe7,0x74,0xe4,0x51,0x6b,0x0f,0xe1,0xe7,0x28,0x03,0x17,0xe4,0x8c,0xfa,
0x2f,0x66,0x55,0x7f,0xdc,0x3b,0x40,0xab,0x47,0x84,0xc9,0x63,0x36,0x07,0x9d,0xe5,
0x86,0x43,0x95,0x89,0xb6,0xc0,0x6c,0x72,0x64,0x00,0xc1,0x2a,0xd7,0x69,0x21,0x92,
0x8e,0xba,0xa4,0x59,0x9f,0x00,0x14,0x3a,0x7c,0x12,0x58,0x91,0x09,0xa0,0x32,0xfe},
{0xc5,0xb3,0xd6,0xea,0x0a,0x4b,0x1e,0xcc,0x40,0x00,0xe5,0x98,0x5c,0xdc,0x06,0x06,
0x78,0x34,0x92,0x16,0xcf,0xe4,0x9f,0x03,0x96,0x2d,0x41,0x35,0x00,0x9b,0xff,0x74,
0x60,0x19,0x6e,0xe6,0xa6,0x46,0xf7,0x37,0xcb,0xfa,0xd0,0x9f,0x80,0x72,0x2e,0x85,
0x13,0x3e,0x1a,0x91,0x90,0x53,0xa1,0x33,0x85,0x51,0xdc,0x62,0x1c,0x0e,0x4d,0x30}
};
#elif defined(SCRYPT_CHACHA)
static const uint8_t post_vectors[][64] = {
{0xe2,0x05,0x7c,0x44,0xf9,0x55,0x9f,0x64,0xbe,0xd5,0x7f,0x85,0x69,0xc7,0x8c,0x7f,
0x2b,0x91,0xd6,0x9a,0x6c,0xf8,0x57,0x55,0x61,0x25,0x3d,0xee,0xb8,0xd5,0x8c,0xdc,
0x2d,0xd5,0x53,0x84,0x8c,0x06,0xaa,0x37,0x77,0xa6,0xf0,0xf1,0x35,0xfe,0xb5,0xcb,
0x61,0xd7,0x2c,0x67,0xf3,0x7e,0x8a,0x1b,0x04,0xa3,0xa3,0x43,0xa2,0xb2,0x29,0xf2},
{0x82,0xda,0x29,0xb2,0x08,0x27,0xfc,0x78,0x22,0xc4,0xb8,0x7e,0xbc,0x36,0xcf,0xcd,
0x17,0x4b,0xa1,0x30,0x16,0x4a,0x25,0x70,0xc7,0xcb,0xe0,0x2b,0x56,0xd3,0x16,0x4e,
0x85,0xb6,0x84,0xe7,0x9b,0x7f,0x8b,0xb5,0x94,0x33,0xcf,0x33,0x44,0x65,0xc8,0xa1,
0x46,0xf9,0xf5,0xfc,0x74,0x29,0x7e,0xd5,0x46,0xec,0xbd,0x95,0xc1,0x80,0x24,0xe4}
};
#elif defined(SCRYPT_SALSA64)
static const uint8_t post_vectors[][64] = {
{0xa6,0xcb,0x77,0x9a,0x64,0x1f,0x95,0x02,0x53,0xe7,0x5c,0x78,0xdb,0xa3,0x43,0xff,
0xbe,0x10,0x4c,0x7b,0xe4,0xe1,0x91,0xcf,0x67,0x69,0x5a,0x2c,0x12,0xd6,0x99,0x49,
0x92,0xfd,0x5a,0xaa,0x12,0x4c,0x2e,0xf6,0x95,0x46,0x8f,0x5e,0x77,0x62,0x16,0x29,
0xdb,0xe7,0xab,0x02,0x2b,0x9c,0x35,0x03,0xf8,0xd4,0x04,0x7d,0x2d,0x73,0x85,0xf1},
{0x54,0xb7,0xca,0xbb,0xaf,0x0f,0xb0,0x5f,0xb7,0x10,0x63,0x48,0xb3,0x15,0xd8,0xb5,
0x62,0x64,0x89,0x6a,0x59,0xc6,0x0f,0x86,0x96,0x38,0xf0,0xcf,0xd4,0x62,0x90,0x61,
0x7d,0xce,0xd6,0x13,0x85,0x67,0x4a,0xf5,0x32,0x03,0x74,0x30,0x0b,0x5a,0x2f,0x86,
0x82,0x6e,0x0c,0x3e,0x40,0x7a,0xde,0xbe,0x42,0x6e,0x80,0x2b,0xaf,0xdb,0xcc,0x94}
};
#endif
#elif defined(SCRYPT_BLAKE512)
#if defined(SCRYPT_SALSA)
static const uint8_t post_vectors[][64] = {
{0x4a,0x48,0xb3,0xfa,0xdc,0xb0,0xb8,0xdb,0x54,0xee,0xf3,0x5c,0x27,0x65,0x6c,0x20,
0xab,0x61,0x9a,0x5b,0xd5,0x1d,0xd9,0x95,0xab,0x88,0x0e,0x4d,0x1e,0x71,0x2f,0x11,
0x43,0x2e,0xef,0x23,0xca,0x8a,0x49,0x3b,0x11,0x38,0xa5,0x28,0x61,0x2f,0xb7,0x89,
0x5d,0xef,0x42,0x4c,0xc1,0x74,0xea,0x8a,0x56,0xbe,0x4a,0x82,0x76,0x15,0x1a,0x87},
{0x96,0x24,0xbf,0x40,0xeb,0x03,0x8e,0xfe,0xc0,0xd5,0xa4,0x81,0x85,0x7b,0x09,0x88,
0x52,0xb5,0xcb,0xc4,0x48,0xe1,0xb9,0x1d,0x3f,0x8b,0x3a,0xc6,0x38,0x32,0xc7,0x55,
0x30,0x28,0x7a,0x42,0xa9,0x5d,0x54,0x33,0x62,0xf3,0xd9,0x3c,0x96,0x40,0xd1,0x80,
0xe4,0x0e,0x7e,0xf0,0x64,0x53,0xfe,0x7b,0xd7,0x15,0xba,0xad,0x16,0x80,0x01,0xb5}
};
#elif defined(SCRYPT_CHACHA)
static const uint8_t post_vectors[][64] = {
{0x45,0x42,0x22,0x31,0x26,0x13,0x5f,0x94,0xa4,0x00,0x04,0x47,0xe8,0x50,0x6d,0xd6,
0xdd,0xd5,0x08,0xd4,0x90,0x64,0xe0,0x59,0x70,0x46,0xff,0xfc,0x29,0xb3,0x6a,0xc9,
0x4d,0x45,0x97,0x95,0xa8,0xf0,0x53,0xe7,0xee,0x4b,0x6b,0x5d,0x1e,0xa5,0xb2,0x58,
0x4b,0x93,0xc9,0x89,0x4c,0xa8,0xab,0x03,0x74,0x38,0xbd,0x54,0x97,0x6b,0xab,0x4a},
{0x4b,0x4a,0x63,0x96,0x73,0x34,0x9f,0x39,0x64,0x51,0x0e,0x2e,0x3b,0x07,0xd5,0x1c,
0xd2,0xf7,0xce,0x60,0xab,0xac,0x89,0xa4,0x16,0x0c,0x58,0x82,0xb3,0xd3,0x25,0x5b,
0xd5,0x62,0x32,0xf4,0x86,0x5d,0xb2,0x4b,0xbf,0x8e,0xc6,0xc0,0xac,0x40,0x48,0xb4,
0x69,0x08,0xba,0x40,0x4b,0x07,0x2a,0x13,0x9c,0x98,0x3b,0x8b,0x20,0x0c,0xac,0x9e}
};
#elif defined(SCRYPT_SALSA64)
static const uint8_t post_vectors[][64] = {
{0xcb,0x4b,0xc2,0xd1,0xf4,0x77,0x32,0x3c,0x42,0x9d,0xf7,0x7d,0x1f,0x22,0x64,0xa4,
0xe2,0x88,0x30,0x2d,0x54,0x9d,0xb6,0x26,0x89,0x25,0x30,0xc3,0x3d,0xdb,0xba,0x99,
0xe9,0x8e,0x1e,0x5e,0x57,0x66,0x75,0x7c,0x24,0xda,0x00,0x6f,0x79,0xf7,0x47,0xf5,
0xea,0x40,0x70,0x37,0xd2,0x91,0xc7,0x4d,0xdf,0x46,0xb6,0x3e,0x95,0x7d,0xcb,0xc1},
{0x25,0xc2,0xcb,0x7f,0xc8,0x50,0xb7,0x0b,0x11,0x9e,0x1d,0x10,0xb2,0xa8,0x35,0x23,
0x91,0x39,0xfb,0x45,0xf2,0xbf,0xe4,0xd0,0x84,0xec,0x72,0x33,0x6d,0x09,0xed,0x41,
0x9a,0x7e,0x4f,0x10,0x73,0x97,0x22,0x76,0x58,0x93,0x39,0x24,0xdf,0xd2,0xaa,0x2f,
0x6b,0x2b,0x64,0x48,0xa5,0xb7,0xf5,0x56,0x77,0x02,0xa7,0x71,0x46,0xe5,0x0e,0x8d},
};
#endif
#elif defined(SCRYPT_BLAKE256)
#if defined(SCRYPT_SALSA)
static const uint8_t post_vectors[][64] = {
{0xf1,0xf1,0x91,0x1a,0x81,0xe6,0x9f,0xc1,0xce,0x43,0xab,0xb1,0x1a,0x02,0x1e,0x16,
0x08,0xc6,0xf9,0x00,0x50,0x1b,0x6d,0xf1,0x31,0x06,0x95,0x48,0x5d,0xf7,0x6c,0x00,
0xa2,0x4c,0xb1,0x0e,0x52,0x66,0x94,0x7e,0x84,0xfc,0xa5,0x34,0xfd,0xf0,0xe9,0x57,
0x85,0x2d,0x8c,0x05,0x5c,0x0f,0x04,0xd4,0x8d,0x3e,0x13,0x52,0x3d,0x90,0x2d,0x2c},
{0xd5,0x42,0xd2,0x7b,0x06,0xae,0x63,0x90,0x9e,0x30,0x00,0x0e,0xd8,0xa4,0x3a,0x0b,
0xee,0x4a,0xef,0xb2,0xc4,0x95,0x0d,0x72,0x07,0x70,0xcc,0xa3,0xf9,0x1e,0xc2,0x75,
0xcf,0xaf,0xe1,0x44,0x1c,0x8c,0xe2,0x3e,0x0c,0x81,0xf3,0x92,0xe1,0x13,0xe6,0x4f,
0x2d,0x27,0xc3,0x87,0xe5,0xb6,0xf9,0xd7,0x02,0x04,0x37,0x64,0x78,0x36,0x6e,0xb3}
};
#elif defined(SCRYPT_CHACHA)
static const uint8_t post_vectors[][64] = {
{0xad,0x1b,0x4b,0xca,0xe3,0x26,0x1a,0xfd,0xb7,0x77,0x8c,0xde,0x8d,0x26,0x14,0xe1,
0x54,0x38,0x42,0xf3,0xb3,0x66,0x29,0xf9,0x90,0x04,0xf1,0x82,0x7c,0x5a,0x6f,0xa8,
0x7d,0xd6,0x08,0x0d,0x8b,0x78,0x04,0xad,0x31,0xea,0xd4,0x87,0x2d,0xf7,0x74,0x9a,
0xe5,0xce,0x97,0xef,0xa3,0xbb,0x90,0x46,0x7c,0xf4,0x51,0x38,0xc7,0x60,0x53,0x21},
{0x39,0xbb,0x56,0x3d,0x0d,0x7b,0x74,0x82,0xfe,0x5a,0x78,0x3d,0x66,0xe8,0x3a,0xdf,
0x51,0x6f,0x3e,0xf4,0x86,0x20,0x8d,0xe1,0x81,0x22,0x02,0xf7,0x0d,0xb5,0x1a,0x0f,
0xfc,0x59,0xb6,0x60,0xc9,0xdb,0x38,0x0b,0x5b,0x95,0xa5,0x94,0xda,0x42,0x2d,0x90,
0x47,0xeb,0x73,0x31,0x9f,0x20,0xf6,0x81,0xc2,0xef,0x33,0x77,0x51,0xd8,0x2c,0xe4}
};
#elif defined(SCRYPT_SALSA64)
static const uint8_t post_vectors[][64] = {
{0x9e,0xf2,0x60,0x7c,0xbd,0x7c,0x19,0x5c,0x79,0xc6,0x1b,0x7e,0xb0,0x65,0x1b,0xc3,
0x70,0x0d,0x89,0xfc,0x72,0xb2,0x03,0x72,0x15,0xcb,0x8e,0x8c,0x49,0x50,0x4c,0x27,
0x99,0xda,0x47,0x32,0x5e,0xb4,0xa2,0x07,0x83,0x51,0x6b,0x06,0x37,0x60,0x42,0xc4,
0x59,0x49,0x99,0xdd,0xc0,0xd2,0x08,0x94,0x7f,0xe3,0x9e,0x4e,0x43,0x8e,0x5b,0xba},
{0x86,0x6f,0x3b,0x11,0xb8,0xca,0x4b,0x6e,0xa7,0x6f,0xc2,0xc9,0x33,0xb7,0x8b,0x9f,
0xa3,0xb9,0xf5,0xb5,0x62,0xa6,0x17,0x66,0xe4,0xc3,0x9d,0x9b,0xca,0x51,0xb0,0x2f,
0xda,0x09,0xc1,0x77,0xed,0x8b,0x89,0xc2,0x69,0x5a,0x34,0x05,0x4a,0x1f,0x4d,0x76,
0xcb,0xd5,0xa4,0x78,0xfa,0x1b,0xb9,0x5b,0xbc,0x3d,0xce,0x04,0x63,0x99,0xad,0x54}
};
#endif
#elif defined(SCRYPT_SKEIN512)
#if defined(SCRYPT_SALSA)
static const uint8_t post_vectors[][64] = {
{0xe4,0x36,0xa0,0x9a,0xdb,0xf0,0xd1,0x45,0x56,0xda,0x25,0x53,0x00,0xf9,0x2c,0x69,
0xa4,0xc2,0xa5,0x8e,0x1a,0x85,0xfa,0x53,0xbd,0x55,0x3d,0x11,0x2a,0x44,0x13,0x87,
0x8f,0x81,0x88,0x13,0x1e,0x49,0xa8,0xc4,0xc5,0xcd,0x1f,0xe1,0x5f,0xf5,0xcb,0x2f,
0x8b,0xab,0x57,0x38,0x59,0xeb,0x6b,0xac,0x3b,0x73,0x10,0xa6,0xe1,0xfe,0x17,0x3e},
{0x6d,0x61,0xde,0x43,0xa9,0x38,0x53,0x5f,0xd8,0xf2,0x6d,0xf3,0xe4,0xd6,0xd8,0x5e,
0x81,0x89,0xd0,0x0b,0x86,0x16,0xb1,0x91,0x65,0x76,0xd8,0xc1,0xf7,0x3b,0xca,0x8b,
0x35,0x07,0x58,0xba,0x77,0xdf,0x11,0x6c,0xbc,0x58,0xee,0x11,0x59,0xf2,0xfe,0xcb,
0x51,0xdc,0xcd,0x35,0x2e,0x46,0x22,0xa0,0xaa,0x55,0x60,0x7c,0x91,0x15,0xb8,0x00}
};
#elif defined(SCRYPT_CHACHA)
static const uint8_t post_vectors[][64] = {
{0xd1,0x12,0x6d,0x64,0x10,0x0e,0x98,0x6c,0xbe,0x70,0x21,0xd9,0xc6,0x04,0x62,0xa4,
0x29,0x13,0x9a,0x3c,0xf8,0xe9,0x1e,0x87,0x9f,0x88,0xf4,0x98,0x01,0x41,0x8e,0xce,
0x60,0xf7,0xbe,0x17,0x0a,0xec,0xd6,0x30,0x80,0xcf,0x6b,0x1e,0xcf,0x95,0xa0,0x4d,
0x37,0xed,0x3a,0x09,0xd1,0xeb,0x0c,0x80,0x82,0x22,0x8e,0xd3,0xb1,0x7f,0xd6,0xa8},
{0x5c,0x5c,0x05,0xe2,0x75,0xa5,0xa4,0xec,0x81,0x97,0x9c,0x5b,0xd7,0x26,0xb3,0x16,
0xb4,0x02,0x8c,0x56,0xe6,0x32,0x57,0x33,0x47,0x19,0x06,0x6c,0xde,0x68,0x41,0x37,
0x5b,0x7d,0xa7,0xb3,0x73,0xeb,0x82,0xca,0x0f,0x86,0x2e,0x6b,0x47,0xa2,0x70,0x39,
0x35,0xfd,0x2d,0x2e,0x7b,0xc3,0x68,0xbb,0x52,0x42,0x19,0x3b,0x78,0x96,0xe7,0xc8}
};
#elif defined(SCRYPT_SALSA64)
static const uint8_t post_vectors[][64] = {
{0xd2,0xad,0x32,0x05,0xee,0x80,0xe3,0x44,0x70,0xc6,0x34,0xde,0x05,0xb6,0xcf,0x60,
0x89,0x98,0x70,0xc0,0xb8,0xf5,0x54,0xf1,0xa6,0xb2,0xc8,0x76,0x34,0xec,0xc4,0x59,
0x8e,0x64,0x42,0xd0,0xa9,0xed,0xe7,0x19,0xb2,0x8a,0x11,0xc6,0xa6,0xbf,0xa7,0xa9,
0x4e,0x44,0x32,0x7e,0x12,0x91,0x9d,0xfe,0x52,0x48,0xa8,0x27,0xb3,0xfc,0xb1,0x89},
{0xd6,0x67,0xd2,0x3e,0x30,0x1e,0x9d,0xe2,0x55,0x68,0x17,0x3d,0x2b,0x75,0x5a,0xe5,
0x04,0xfb,0x3d,0x0e,0x86,0xe0,0xaa,0x1d,0xd4,0x72,0xda,0xb0,0x79,0x41,0xb7,0x99,
0x68,0xe5,0xd9,0x55,0x79,0x7d,0xc3,0xd1,0xa6,0x56,0xc1,0xbe,0x0b,0x6c,0x62,0x23,
0x66,0x67,0x91,0x47,0x99,0x13,0x6b,0xe3,0xda,0x59,0x55,0x18,0x67,0x8f,0x2e,0x3b}
};
#endif
#elif defined(SCRYPT_KECCAK512)
#if defined(SCRYPT_SALSA)
static const uint8_t post_vectors[][64] = {
{0xc2,0x7b,0xbe,0x1d,0xf1,0x99,0xd8,0xe7,0x1b,0xac,0xe0,0x9d,0xeb,0x5a,0xfe,0x21,
0x71,0xff,0x41,0x51,0x4f,0xbe,0x41,0x01,0x15,0xe2,0xb7,0xb9,0x55,0x15,0x25,0xa1,
0x40,0x4c,0x66,0x29,0x32,0xb7,0xc9,0x62,0x60,0x88,0xe0,0x99,0x39,0xae,0xce,0x25,
0x3c,0x11,0x89,0xdd,0xc6,0x14,0xd7,0x3e,0xa3,0x6d,0x07,0x2e,0x56,0xa0,0xff,0x97},
{0x3c,0x91,0x12,0x4a,0x37,0x7d,0xd6,0x96,0xd2,0x9b,0x5d,0xea,0xb8,0xb9,0x82,0x4e,
0x4f,0x6b,0x60,0x4c,0x59,0x01,0xe5,0x73,0xfd,0xf6,0xb8,0x9a,0x5a,0xd3,0x7c,0x7a,
0xd2,0x4f,0x8e,0x74,0xc1,0x90,0x88,0xa0,0x3f,0x55,0x75,0x79,0x10,0xd0,0x09,0x79,
0x0f,0x6c,0x74,0x0c,0x05,0x08,0x3c,0x8c,0x94,0x7b,0x30,0x56,0xca,0xdf,0xdf,0x34}
};
#elif defined(SCRYPT_CHACHA)
static const uint8_t post_vectors[][64] = {
{0x77,0xcb,0x70,0xbf,0xae,0xd4,0x4c,0x5b,0xbc,0xd3,0xec,0x8a,0x82,0x43,0x8d,0xb3,
0x7f,0x1f,0xfb,0x70,0x36,0x32,0x4d,0xa6,0xb7,0x13,0x37,0x77,0x30,0x0c,0x3c,0xfb,
0x2c,0x20,0x8f,0x2a,0xf4,0x47,0x4d,0x69,0x8e,0xae,0x2d,0xad,0xba,0x35,0xe9,0x2f,
0xe6,0x99,0x7a,0xf8,0xcf,0x70,0x78,0xbb,0x0c,0x72,0x64,0x95,0x8b,0x36,0x77,0x3d},
{0xc6,0x43,0x17,0x16,0x87,0x09,0x5f,0x12,0xed,0x21,0xe2,0xb4,0xad,0x55,0xa1,0xa1,
0x49,0x50,0x90,0x70,0xab,0x81,0x83,0x7a,0xcd,0xdf,0x23,0x52,0x19,0xc0,0xa2,0xd8,
0x8e,0x98,0xeb,0xf0,0x37,0xab,0xad,0xfd,0x1c,0x04,0x97,0x18,0x42,0x85,0xf7,0x4b,
0x18,0x2c,0x55,0xd3,0xa9,0xe6,0x89,0xfb,0x58,0x0a,0xb2,0x37,0xb9,0xf8,0xfb,0xc5}
};
#elif defined(SCRYPT_SALSA64)
static const uint8_t post_vectors[][64] = {
{0xc7,0x34,0x95,0x02,0x5e,0x31,0x0d,0x1f,0x10,0x38,0x9c,0x3f,0x04,0x53,0xed,0x05,
0x27,0x38,0xc1,0x3f,0x6a,0x0f,0xc5,0xa3,0x9b,0x73,0x8a,0x28,0x7e,0x5d,0x3c,0xdc,
0x9d,0x5a,0x09,0xbf,0x8c,0x0a,0xad,0xe4,0x73,0x52,0xe3,0x6d,0xaa,0xd1,0x8b,0xbf,
0xa3,0xb7,0xf0,0x58,0xad,0x22,0x24,0xc9,0xaa,0x96,0xb7,0x5d,0xfc,0x5f,0xb0,0xcf},
{0x76,0x22,0xfd,0xe8,0xa2,0x79,0x8e,0x9d,0x43,0x8c,0x7a,0xba,0x78,0xb7,0x84,0xf1,
0xc8,0xee,0x3b,0xae,0x31,0x89,0xbf,0x7e,0xd0,0x4b,0xc1,0x2d,0x58,0x5d,0x84,0x6b,
0xec,0x86,0x56,0xe0,0x87,0x94,0x7f,0xbc,0xf9,0x48,0x92,0xef,0x54,0x7f,0x23,0x8d,
0x4f,0x8b,0x0a,0x75,0xa7,0x39,0x0e,0x46,0x6e,0xee,0x58,0xc8,0xfa,0xea,0x90,0x53}
};
#endif
#elif defined(SCRYPT_KECCAK256)
#if defined(SCRYPT_SALSA)
static const uint8_t post_vectors[][64] = {
{0x2e,0x96,0xd8,0x87,0x45,0xcd,0xd6,0xc8,0xf6,0xd2,0x87,0x33,0x50,0xc7,0x04,0xe5,
0x3c,0x4b,0x48,0x44,0x57,0xc1,0x74,0x09,0x76,0x02,0xaa,0xd3,0x7b,0xf3,0xbf,0xed,
0x4b,0x72,0xd7,0x1b,0x49,0x6b,0xe0,0x44,0x83,0xee,0x8f,0xaf,0xa1,0xb5,0x33,0xa9,
0x9e,0x86,0xab,0xe2,0x9f,0xcf,0x68,0x6e,0x7e,0xbd,0xf5,0x7a,0x83,0x4b,0x1c,0x10},
{0x42,0x7e,0xf9,0x4b,0x72,0x61,0xda,0x2d,0xb3,0x27,0x0e,0xe1,0xd9,0xde,0x5f,0x3e,
0x64,0x2f,0xd6,0xda,0x90,0x59,0xce,0xbf,0x02,0x5b,0x32,0xf7,0x6d,0x94,0x51,0x7b,
0xb6,0xa6,0x0d,0x99,0x3e,0x7f,0x39,0xbe,0x1b,0x1d,0x6c,0x97,0x12,0xd8,0xb7,0xfd,
0x5b,0xb5,0xf3,0x73,0x5a,0x89,0xb2,0xdd,0xcc,0x3d,0x74,0x2e,0x3d,0x9e,0x3c,0x22}
};
#elif defined(SCRYPT_CHACHA)
static const uint8_t post_vectors[][64] = {
{0x76,0x1d,0x5b,0x8f,0xa9,0xe1,0xa6,0x01,0xcb,0xc5,0x7a,0x5f,0x02,0x23,0xb6,0x82,
0x57,0x79,0x60,0x2f,0x05,0x7f,0xb8,0x0a,0xcb,0x5e,0x54,0x11,0x49,0x2e,0xdd,0x85,
0x83,0x30,0x67,0xb3,0x24,0x5c,0xce,0xfc,0x32,0xcf,0x12,0xc3,0xff,0xe0,0x79,0x36,
0x74,0x17,0xa6,0x3e,0xcd,0xa0,0x7e,0xcb,0x37,0xeb,0xcb,0xb6,0xe1,0xb9,0xf5,0x15},
{0xf5,0x66,0xa7,0x4c,0xe4,0xdc,0x18,0x56,0x2f,0x3e,0x86,0x4d,0x92,0xa5,0x5c,0x5a,
0x8f,0xc3,0x6b,0x32,0xdb,0xe5,0x72,0x50,0x84,0xfc,0x6e,0x5d,0x15,0x77,0x3d,0xca,
0xc5,0x2b,0x20,0x3c,0x78,0x37,0x80,0x78,0x23,0x56,0x91,0xa0,0xce,0xa4,0x06,0x5a,
0x7f,0xe3,0xbf,0xab,0x51,0x57,0x32,0x2c,0x0a,0xf0,0xc5,0x6f,0xf4,0xcb,0xff,0x42}
};
#elif defined(SCRYPT_SALSA64)
static const uint8_t post_vectors[][64] = {
{0xb0,0xb7,0x10,0xb5,0x1f,0x2b,0x7f,0xaf,0x9d,0x95,0x5f,0x4c,0x2d,0x98,0x7c,0xc1,
0xbc,0x37,0x2f,0x50,0x8d,0xb2,0x9f,0xfd,0x48,0x0d,0xe0,0x44,0x19,0xdf,0x28,0x6c,
0xab,0xbf,0x1e,0x17,0x26,0xcc,0x57,0x95,0x18,0x17,0x83,0x4c,0x12,0x48,0xd9,0xee,
0x4b,0x00,0x29,0x06,0x31,0x01,0x6b,0x8c,0x26,0x39,0xbf,0xe4,0xe4,0xd4,0x6a,0x26},
{0xa0,0x40,0xb2,0xf2,0x11,0xb6,0x5f,0x3d,0x4c,0x1e,0xef,0x59,0xd4,0x98,0xdb,0x14,
0x01,0xff,0xe3,0x34,0xd7,0x19,0xcd,0xeb,0xde,0x52,0x1c,0xf4,0x86,0x43,0xc9,0xe2,
0xfb,0xf9,0x4f,0x0a,0xbb,0x1f,0x5c,0x6a,0xdf,0xb9,0x28,0xfa,0xac,0xc4,0x48,0xed,
0xcc,0xd2,0x2e,0x25,0x5f,0xf3,0x56,0x1d,0x2d,0x23,0x22,0xc1,0xbc,0xff,0x78,0x80}
};
#endif
#else
static const uint8_t post_vectors[][64] = {{0}};
#endif

13
bf/scrypt-jane/example.c Normal file
View file

@ -0,0 +1,13 @@
#include <stdio.h>
#include "scrypt-jane.h"
int main(void) {
unsigned char digest[16];
int i;
scrypt("pw", 2, "salt", 4, 0, 0, 0, digest, 16);
for (i = 0; i < sizeof(digest); i++)
printf("%02x, ", digest[i]);
printf("\n");
return 0;
}

View file

@ -0,0 +1,121 @@
#define SCRYPT_TEST_SPEED
#include "scrypt-jane.c"
/* ticks - not tested on anything other than x86 */
static uint64_t
get_ticks(void) {
#if defined(CPU_X86) || defined(CPU_X86_64)
#if defined(COMPILER_INTEL)
return _rdtsc();
#elif defined(COMPILER_MSVC)
return __rdtsc();
#elif defined(COMPILER_GCC)
uint32_t lo, hi;
__asm__ __volatile__("rdtsc" : "=a" (lo), "=d" (hi));
return ((uint64_t)lo | ((uint64_t)hi << 32));
#else
need rdtsc for this compiler
#endif
#elif defined(OS_SOLARIS)
return (uint64_t)gethrtime();
#elif defined(CPU_SPARC) && !defined(OS_OPENBSD)
uint64_t t;
__asm__ __volatile__("rd %%tick, %0" : "=r" (t));
return t;
#elif defined(CPU_PPC)
uint32_t lo = 0, hi = 0;
__asm__ __volatile__("mftbu %0; mftb %1" : "=r" (hi), "=r" (lo));
return ((uint64_t)lo | ((uint64_t)hi << 32));
#elif defined(CPU_IA64)
uint64_t t;
__asm__ __volatile__("mov %0=ar.itc" : "=r" (t));
return t;
#elif defined(OS_NIX)
timeval t2;
gettimeofday(&t2, NULL);
t = ((uint64_t)t2.tv_usec << 32) | (uint64_t)t2.tv_sec;
return t;
#else
need ticks for this platform
#endif
}
#define timeit(x,minvar) { \
ticks = get_ticks(); \
x; \
ticks = get_ticks() - ticks; \
if (ticks < minvar) \
minvar = ticks; \
}
#define maxticks 0xffffffffffffffffull
typedef struct scrypt_speed_settings_t {
const char *desc;
uint8_t Nfactor, rfactor, pfactor;
} scrypt_speed_settings;
/* scrypt_r_32kb is set to a 32kb chunk, so (1 << (scrypt_r_32kb - 5)) = 1kb chunk */
static const scrypt_speed_settings settings[] = {
{"scrypt high volume ( ~4mb)", 11, scrypt_r_32kb - 5, 0},
{"scrypt interactive (~16mb)", 13, scrypt_r_32kb - 5, 0},
{"scrypt non-interactive (~ 1gb)", 19, scrypt_r_32kb - 5, 0},
{0}
};
int main(void) {
const scrypt_speed_settings *s;
uint8_t password[64], salt[24], digest[64];
uint64_t minticks, ticks;
size_t i, passes;
size_t cpuflags, topbit;
for (i = 0; i < sizeof(password); i++)
password[i] = (uint8_t)i;
for (i = 0; i < sizeof(salt); i++)
salt[i] = 255 - (uint8_t)i;
/* warm up a little */
scrypt(password, sizeof(password), salt, sizeof(salt), 15, 3, 4, digest, sizeof(digest));
cpuflags = available_implementations();
topbit = 0;
for (i = cpuflags; i != 0; i >>= 1)
topbit++;
topbit = ((size_t)1 << topbit);
while (1) {
#if defined(SCRYPT_CHOOSE_COMPILETIME)
printf("speed test for scrypt[%s,%s]\n", SCRYPT_HASH, SCRYPT_MIX);
#else
printf("speed test for scrypt[%s,%s,%s]\n", SCRYPT_HASH, SCRYPT_MIX, get_top_cpuflag_desc(cpuflags));
#endif
cpu_detect_mask = cpuflags;
for (i = 0; settings[i].desc; i++) {
s = &settings[i];
minticks = maxticks;
for (passes = 0; passes < 16; passes++)
timeit(scrypt(password, sizeof(password), salt, sizeof(salt), s->Nfactor, s->rfactor, s->pfactor, digest, sizeof(digest)), minticks)
printf("%s, %.0f ticks\n", s->desc, (double)minticks);
}
#if defined(SCRYPT_CHOOSE_COMPILETIME)
break;
#else
while (topbit && ((cpuflags & topbit) == 0))
topbit >>= 1;
cpuflags &= ~topbit;
/* (cpuflags == 0) is the basic/portable version, don't bother timing it */
if (!cpuflags)
break;
#endif
}
printf("\n\n");
return 0;
}

View file

@ -0,0 +1,12 @@
#define SCRYPT_TEST
#include "scrypt-jane.c"
int main(void) {
int res = scrypt_power_on_self_test();
printf("%s: test %s\n", SCRYPT_MIX, (res & 1) ? "ok" : "FAILED");
printf("%s: test %s\n", SCRYPT_HASH, (res & 2) ? "ok" : "FAILED");
printf("scrypt: test vectors %s\n", (res & 4) ? "ok" : "FAILED");
return ((res & 7) == 7) ? 0 : 1;
}

View file

@ -0,0 +1,193 @@
/*
scrypt-jane by Andrew M, https://github.com/floodyberry/scrypt-jane
Public Domain or MIT License, whichever is easier
*/
#include <string.h>
#include "scrypt-jane.h"
#include "code/scrypt-jane-portable.h"
#include "code/scrypt-jane-hash.h"
#include "code/scrypt-jane-romix.h"
#include "code/scrypt-jane-test-vectors.h"
#define scrypt_maxNfactor 30 /* (1 << (30 + 1)) = ~2 billion */
#if (SCRYPT_BLOCK_BYTES == 64)
#define scrypt_r_32kb 8 /* (1 << 8) = 256 * 2 blocks in a chunk * 64 bytes = Max of 32kb in a chunk */
#elif (SCRYPT_BLOCK_BYTES == 128)
#define scrypt_r_32kb 7 /* (1 << 7) = 128 * 2 blocks in a chunk * 128 bytes = Max of 32kb in a chunk */
#elif (SCRYPT_BLOCK_BYTES == 256)
#define scrypt_r_32kb 6 /* (1 << 6) = 64 * 2 blocks in a chunk * 256 bytes = Max of 32kb in a chunk */
#elif (SCRYPT_BLOCK_BYTES == 512)
#define scrypt_r_32kb 5 /* (1 << 5) = 32 * 2 blocks in a chunk * 512 bytes = Max of 32kb in a chunk */
#endif
#define scrypt_maxrfactor scrypt_r_32kb /* 32kb */
#define scrypt_maxpfactor 25 /* (1 << 25) = ~33 million */
#include <stdio.h>
//#include <malloc.h>
static void NORETURN
scrypt_fatal_error_default(const char *msg) {
fprintf(stderr, "%s\n", msg);
exit(1);
}
static scrypt_fatal_errorfn scrypt_fatal_error = scrypt_fatal_error_default;
void
scrypt_set_fatal_error(scrypt_fatal_errorfn fn) {
scrypt_fatal_error = fn;
}
static int
scrypt_power_on_self_test(void) {
const scrypt_test_setting *t;
uint8_t test_digest[64];
uint32_t i;
int res = 7, scrypt_valid;
if (!scrypt_test_mix()) {
#if !defined(SCRYPT_TEST)
scrypt_fatal_error("scrypt: mix function power-on-self-test failed");
#endif
res &= ~1;
}
if (!scrypt_test_hash()) {
#if !defined(SCRYPT_TEST)
scrypt_fatal_error("scrypt: hash function power-on-self-test failed");
#endif
res &= ~2;
}
for (i = 0, scrypt_valid = 1; post_settings[i].pw; i++) {
t = post_settings + i;
scrypt((uint8_t *)t->pw, strlen(t->pw), (uint8_t *)t->salt, strlen(t->salt), t->Nfactor, t->rfactor, t->pfactor, test_digest, sizeof(test_digest));
scrypt_valid &= scrypt_verify(post_vectors[i], test_digest, sizeof(test_digest));
}
if (!scrypt_valid) {
#if !defined(SCRYPT_TEST)
scrypt_fatal_error("scrypt: scrypt power-on-self-test failed");
#endif
res &= ~4;
}
return res;
}
typedef struct scrypt_aligned_alloc_t {
uint8_t *mem, *ptr;
} scrypt_aligned_alloc;
#if defined(SCRYPT_TEST_SPEED)
static uint8_t *mem_base = (uint8_t *)0;
static size_t mem_bump = 0;
/* allocations are assumed to be multiples of 64 bytes and total allocations not to exceed ~1.01gb */
static scrypt_aligned_alloc
scrypt_alloc(uint64_t size) {
scrypt_aligned_alloc aa;
if (!mem_base) {
mem_base = (uint8_t *)malloc((1024 * 1024 * 1024) + (1024 * 1024) + (SCRYPT_BLOCK_BYTES - 1));
if (!mem_base)
scrypt_fatal_error("scrypt: out of memory");
mem_base = (uint8_t *)(((size_t)mem_base + (SCRYPT_BLOCK_BYTES - 1)) & ~(SCRYPT_BLOCK_BYTES - 1));
}
aa.mem = mem_base + mem_bump;
aa.ptr = aa.mem;
mem_bump += (size_t)size;
return aa;
}
static void
scrypt_free(scrypt_aligned_alloc *aa) {
mem_bump = 0;
}
#else
static scrypt_aligned_alloc
scrypt_alloc(uint64_t size) {
static const size_t max_alloc = (size_t)-1;
scrypt_aligned_alloc aa;
size += (SCRYPT_BLOCK_BYTES - 1);
if (size > max_alloc)
scrypt_fatal_error("scrypt: not enough address space on this CPU to allocate required memory");
aa.mem = (uint8_t *)malloc((size_t)size);
aa.ptr = (uint8_t *)(((size_t)aa.mem + (SCRYPT_BLOCK_BYTES - 1)) & ~(SCRYPT_BLOCK_BYTES - 1));
//fprintf(stderr, "scrypt_alloc(%zu)\n", size);
if (!aa.mem)
scrypt_fatal_error("scrypt: out of memory");
return aa;
}
static void
scrypt_free(scrypt_aligned_alloc *aa) {
free(aa->mem);
}
#endif
void
scrypt(const uint8_t *password, size_t password_len, const uint8_t *salt, size_t salt_len, uint8_t Nfactor, uint8_t rfactor, uint8_t pfactor, uint8_t *out, size_t bytes) {
static scrypt_aligned_alloc YX, V;
uint8_t *X, *Y;
uint32_t N, r, p, chunk_bytes, i;
static int last_Nfactor = -1, last_rfactor = -1, last_pfactor = -1;
#if !defined(SCRYPT_CHOOSE_COMPILETIME)
scrypt_ROMixfn scrypt_ROMix = scrypt_getROMix();
#endif
#if !defined(SCRYPT_TEST)
static int power_on_self_test = 0;
if (!power_on_self_test) {
power_on_self_test = 1;
if (!scrypt_power_on_self_test())
scrypt_fatal_error("scrypt: power on self test failed");
}
#endif
if (Nfactor > scrypt_maxNfactor)
scrypt_fatal_error("scrypt: N out of range");
if (rfactor > scrypt_maxrfactor)
scrypt_fatal_error("scrypt: r out of range");
if (pfactor > scrypt_maxpfactor)
scrypt_fatal_error("scrypt: p out of range");
N = (1 << (Nfactor + 1));
r = (1 << rfactor);
p = (1 << pfactor);
//fprintf(stderr, "scrypt(%u,%u,%u) %p %p %p %p\n", N, r, p, &V, V.mem, &YX, YX.mem);
chunk_bytes = SCRYPT_BLOCK_BYTES * r * 2;
if (Nfactor != last_Nfactor || rfactor != last_rfactor || pfactor != last_pfactor) {
//fprintf(stderr, "need to reallocate\n");
if (last_Nfactor >= 0 || last_rfactor >= 0 || last_pfactor >= 0) {
//fprintf(stderr, "freeing old V and YX\n");
scrypt_free(&V);
scrypt_free(&YX);
}
V = scrypt_alloc((uint64_t)N * chunk_bytes);
YX = scrypt_alloc((p + 1) * chunk_bytes);
last_Nfactor = Nfactor;
last_rfactor = rfactor;
last_pfactor = pfactor;
}
/* 1: X = PBKDF2(password, salt) */
Y = YX.ptr;
X = Y + chunk_bytes;
scrypt_pbkdf2(password, password_len, salt, salt_len, 1, X, chunk_bytes * p);
/* 2: X = ROMix(X) */
for (i = 0; i < p; i++)
scrypt_ROMix((scrypt_mix_word_t *)(X + (chunk_bytes * i)), (scrypt_mix_word_t *)Y, (scrypt_mix_word_t *)V.ptr, N, r);
/* 3: Out = PBKDF2(password, X) */
scrypt_pbkdf2(password, password_len, X, chunk_bytes * p, 1, out, bytes);
//scrypt_ensure_zero(YX.ptr, (p + 1) * chunk_bytes);
}

View file

@ -0,0 +1,27 @@
#ifndef SCRYPT_JANE_H
#define SCRYPT_JANE_H
/*
Nfactor: Increases CPU & Memory Hardness
N = (1 << (Nfactor + 1)): How many times to mix a chunk and how many temporary chunks are used
rfactor: Increases Memory Hardness
r = (1 << rfactor): How large a chunk is
pfactor: Increases CPU Hardness
p = (1 << pfactor): Number of times to mix the main chunk
A block is the basic mixing unit (salsa/chacha block = 64 bytes)
A chunk is (2 * r) blocks
~Memory used = (N + 2) * ((2 * r) * block size)
*/
#include <stdlib.h>
typedef void (*scrypt_fatal_errorfn)(const char *msg);
void scrypt_set_fatal_error(scrypt_fatal_errorfn fn);
void scrypt(const unsigned char *password, size_t password_len, const unsigned char *salt, size_t salt_len, unsigned char Nfactor, unsigned char rfactor, unsigned char pfactor, unsigned char *out, size_t bytes);
#endif /* SCRYPT_JANE_H */

View file

@ -0,0 +1,38 @@
#!/bin/sh
test() {
sleep 0.25 # mingw is stupid and will occasionally not have permission to overwrite scrypt_speed
gcc scrypt-jane-speed.c -O3 -DSCRYPT_$1 -DSCRYPT_$2 $3 -o scrypt_speed 2>/dev/null
local RC=$?
if [ $RC -ne 0 ]; then
echo "$1/$2: failed to compile "
return
fi
./scrypt_speed
}
testhash() {
test $1 SALSA $2
test $1 CHACHA $2
test $1 SALSA64 $2
}
testhashes() {
testhash SHA256 $1
testhash SHA512 $1
testhash BLAKE256 $1
testhash BLAKE512 $1
testhash SKEIN512 $1
testhash KECCAK256 $1
testhash KECCAK512 $1
}
if [ -z $1 ]; then
testhashes
elif [ $1 -eq 32 ]; then
testhashes -m32
elif [ $1 -eq 64 ]; then
testhashes -m64
fi
rm -f scrypt_speed

44
bf/scrypt-jane/test.sh Normal file
View file

@ -0,0 +1,44 @@
#!/bin/sh
test() {
sleep 0.25 # mingw is stupid and will occasionally not have permission to overwrite scrypt_test
gcc scrypt-jane-test.c -O3 -DSCRYPT_$1 -DSCRYPT_$2 $3 -o scrypt_test 2>/dev/null
local RC=$?
if [ $RC -ne 0 ]; then
echo "$1/$2: failed to compile "
return
fi
./scrypt_test >/dev/null
local RC=$?
if [ $RC -ne 0 ]; then
echo "$1/$2: validation failed"
return
fi
echo "$1/$2: OK"
}
testhash() {
test $1 SALSA $2
test $1 CHACHA $2
test $1 SALSA64 $2
}
testhashes() {
testhash SHA256 $1
testhash SHA512 $1
testhash BLAKE256 $1
testhash BLAKE512 $1
testhash SKEIN512 $1
testhash KECCAK256 $1
testhash KECCAK512 $1
}
if [ -z $1 ]; then
testhashes
elif [ $1 -eq 32 ]; then
testhashes -m32
elif [ $1 -eq 64 ]; then
testhashes -m64
fi
rm -f scrypt_test

41
bf/secp256k1/.gitignore vendored Normal file
View file

@ -0,0 +1,41 @@
bench_inv
bench_ecdh
bench_sign
bench_verify
bench_schnorr_verify
bench_recover
bench_internal
tests
gen_context
*.exe
*.so
*.a
!.gitignore
Makefile
configure
.libs/
Makefile.in
aclocal.m4
autom4te.cache/
config.log
config.status
*.tar.gz
*.la
libtool
.deps/
.dirstamp
build-aux/
*.lo
*.o
*~
src/libsecp256k1-config.h
src/libsecp256k1-config.h.in
src/ecmult_static_context.h
m4/libtool.m4
m4/ltoptions.m4
m4/ltsugar.m4
m4/ltversion.m4
m4/lt~obsolete.m4
src/stamp-h1
libsecp256k1.pc

62
bf/secp256k1/.travis.yml Normal file
View file

@ -0,0 +1,62 @@
language: c
sudo: false
addons:
apt:
packages: libgmp-dev
compiler:
- clang
- gcc
env:
global:
- FIELD=auto BIGNUM=auto SCALAR=auto ENDOMORPHISM=no STATICPRECOMPUTATION=yes ASM=no BUILD=check EXTRAFLAGS= HOST= ECDH=no schnorr=NO
matrix:
- SCALAR=32bit
- SCALAR=32bit FIELD=32bit ECDH=yes
- SCALAR=64bit
- FIELD=64bit
- FIELD=64bit ENDOMORPHISM=yes
- FIELD=64bit ENDOMORPHISM=yes ECDH=yes
- FIELD=64bit ASM=x86_64
- FIELD=64bit ENDOMORPHISM=yes ASM=x86_64
- FIELD=32bit SCHNORR=yes
- FIELD=32bit ENDOMORPHISM=yes
- BIGNUM=no
- BIGNUM=no ENDOMORPHISM=yes SCHNORR=yes
- BIGNUM=no STATICPRECOMPUTATION=no
- BUILD=distcheck
- EXTRAFLAGS=CFLAGS=-DDETERMINISTIC
matrix:
fast_finish: true
include:
- compiler: clang
env: HOST=i686-linux-gnu ENDOMORPHISM=yes
addons:
apt:
packages:
- gcc-multilib
- libgmp-dev:i386
- compiler: clang
env: HOST=i686-linux-gnu
addons:
apt:
packages:
- gcc-multilib
- compiler: gcc
env: HOST=i686-linux-gnu ENDOMORPHISM=yes
addons:
apt:
packages:
- gcc-multilib
- compiler: gcc
env: HOST=i686-linux-gnu
addons:
apt:
packages:
- gcc-multilib
- libgmp-dev:i386
before_script: ./autogen.sh
script:
- if [ -n "$HOST" ]; then export USE_HOST="--host=$HOST"; fi
- if [ "x$HOST" = "xi686-linux-gnu" ]; then export CC="$CC -m32"; fi
- ./configure --enable-endomorphism=$ENDOMORPHISM --with-field=$FIELD --with-bignum=$BIGNUM --with-scalar=$SCALAR --enable-ecmult-static-precomputation=$STATICPRECOMPUTATION --enable-module-ecdh=$ECDH --enable-module-schnorr=$SCHNORR $EXTRAFLAGS $USE_HOST && make -j2 $BUILD
os: linux

19
bf/secp256k1/COPYING Normal file
View file

@ -0,0 +1,19 @@
Copyright (c) 2013 Pieter Wuille
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

108
bf/secp256k1/Makefile.am Normal file
View file

@ -0,0 +1,108 @@
ACLOCAL_AMFLAGS = -I build-aux/m4
lib_LTLIBRARIES = libsecp256k1.la
include_HEADERS = include/secp256k1.h
noinst_HEADERS =
noinst_HEADERS += src/scalar.h
noinst_HEADERS += src/scalar_4x64.h
noinst_HEADERS += src/scalar_8x32.h
noinst_HEADERS += src/scalar_impl.h
noinst_HEADERS += src/scalar_4x64_impl.h
noinst_HEADERS += src/scalar_8x32_impl.h
noinst_HEADERS += src/group.h
noinst_HEADERS += src/group_impl.h
noinst_HEADERS += src/num_gmp.h
noinst_HEADERS += src/num_gmp_impl.h
noinst_HEADERS += src/ecdsa.h
noinst_HEADERS += src/ecdsa_impl.h
noinst_HEADERS += src/eckey.h
noinst_HEADERS += src/eckey_impl.h
noinst_HEADERS += src/ecmult.h
noinst_HEADERS += src/ecmult_impl.h
noinst_HEADERS += src/ecmult_const.h
noinst_HEADERS += src/ecmult_const_impl.h
noinst_HEADERS += src/ecmult_gen.h
noinst_HEADERS += src/ecmult_gen_impl.h
noinst_HEADERS += src/num.h
noinst_HEADERS += src/num_impl.h
noinst_HEADERS += src/field_10x26.h
noinst_HEADERS += src/field_10x26_impl.h
noinst_HEADERS += src/field_5x52.h
noinst_HEADERS += src/field_5x52_impl.h
noinst_HEADERS += src/field_5x52_int128_impl.h
noinst_HEADERS += src/field_5x52_asm_impl.h
noinst_HEADERS += src/java/org_bitcoin_NativeSecp256k1.h
noinst_HEADERS += src/util.h
noinst_HEADERS += src/testrand.h
noinst_HEADERS += src/testrand_impl.h
noinst_HEADERS += src/hash.h
noinst_HEADERS += src/hash_impl.h
noinst_HEADERS += src/field.h
noinst_HEADERS += src/field_impl.h
noinst_HEADERS += src/bench.h
pkgconfigdir = $(libdir)/pkgconfig
pkgconfig_DATA = libsecp256k1.pc
libsecp256k1_la_SOURCES = src/secp256k1.c
libsecp256k1_la_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES)
libsecp256k1_la_LIBADD = $(SECP_LIBS)
noinst_PROGRAMS =
if USE_BENCHMARK
noinst_PROGRAMS += bench_verify bench_recover bench_sign bench_internal
bench_verify_SOURCES = src/bench_verify.c
bench_verify_LDADD = libsecp256k1.la $(SECP_LIBS)
bench_verify_LDFLAGS = -static
bench_recover_SOURCES = src/bench_recover.c
bench_recover_LDADD = libsecp256k1.la $(SECP_LIBS)
bench_recover_LDFLAGS = -static
bench_sign_SOURCES = src/bench_sign.c
bench_sign_LDADD = libsecp256k1.la $(SECP_LIBS)
bench_sign_LDFLAGS = -static
bench_internal_SOURCES = src/bench_internal.c
bench_internal_LDADD = $(SECP_LIBS)
bench_internal_LDFLAGS = -static
bench_internal_CPPFLAGS = $(SECP_INCLUDES)
endif
if USE_TESTS
noinst_PROGRAMS += tests
tests_SOURCES = src/tests.c
tests_CPPFLAGS = -DVERIFY -I$(top_srcdir)/src $(SECP_INCLUDES) $(SECP_TEST_INCLUDES)
tests_LDADD = $(SECP_LIBS) $(SECP_TEST_LIBS)
tests_LDFLAGS = -static
TESTS = tests
endif
if USE_ECMULT_STATIC_PRECOMPUTATION
CPPFLAGS_FOR_BUILD +=-I$(top_srcdir)/
CFLAGS_FOR_BUILD += -Wall -Wextra -Wno-unused-function
gen_context_OBJECTS = gen_context.o
gen_context_BIN = gen_context$(BUILD_EXEEXT)
gen_%.o: src/gen_%.c
$(CC_FOR_BUILD) $(CPPFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) -c $< -o $@
$(gen_context_BIN): $(gen_context_OBJECTS)
$(CC_FOR_BUILD) $^ -o $@
$(libsecp256k1_la_OBJECTS): src/ecmult_static_context.h
$(tests_OBJECTS): src/ecmult_static_context.h
src/ecmult_static_context.h: $(gen_context_BIN)
./$(gen_context_BIN)
CLEANFILES = $(gen_context_BIN) src/ecmult_static_context.h
endif
EXTRA_DIST = autogen.sh src/gen_context.c src/basic-config.h
if ENABLE_MODULE_ECDH
include src/modules/ecdh/Makefile.am.include
endif
if ENABLE_MODULE_SCHNORR
include src/modules/schnorr/Makefile.am.include
endif

61
bf/secp256k1/README.md Normal file
View file

@ -0,0 +1,61 @@
libsecp256k1
============
[![Build Status](https://travis-ci.org/bitcoin/secp256k1.svg?branch=master)](https://travis-ci.org/bitcoin/secp256k1)
Optimized C library for EC operations on curve secp256k1.
This library is a work in progress and is being used to research best practices. Use at your own risk.
Features:
* secp256k1 ECDSA signing/verification and key generation.
* Adding/multiplying private/public keys.
* Serialization/parsing of private keys, public keys, signatures.
* Constant time, constant memory access signing and pubkey generation.
* Derandomized DSA (via RFC6979 or with a caller provided function.)
* Very efficient implementation.
Implementation details
----------------------
* General
* No runtime heap allocation.
* Extensive testing infrastructure.
* Structured to facilitate review and analysis.
* Intended to be portable to any system with a C89 compiler and uint64_t support.
* Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.")
* Field operations
* Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1).
* Using 5 52-bit limbs (including hand-optimized assembly for x86_64, by Diederik Huys).
* Using 10 26-bit limbs.
* Field inverses and square roots using a sliding window over blocks of 1s (by Peter Dettman).
* Scalar operations
* Optimized implementation without data-dependent branches of arithmetic modulo the curve's order.
* Using 4 64-bit limbs (relying on __int128 support in the compiler).
* Using 8 32-bit limbs.
* Group operations
* Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7).
* Use addition between points in Jacobian and affine coordinates where possible.
* Use a unified addition/doubling formula where necessary to avoid data-dependent branches.
* Point/x comparison without a field inversion by comparison in the Jacobian coordinate space.
* Point multiplication for verification (a*P + b*G).
* Use wNAF notation for point multiplicands.
* Use a much larger window for multiples of G, using precomputed multiples.
* Use Shamir's trick to do the multiplication with the public key and the generator simultaneously.
* Optionally (off by default) use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
* Point multiplication for signing
* Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions.
* Access the table with branch-free conditional moves so memory access is uniform.
* No data-dependent branches
* The precomputed tables add and eventually subtract points for which no known scalar (private key) is known, preventing even an attacker with control over the private key used to control the data internally.
Build steps
-----------
libsecp256k1 is built using autotools:
$ ./autogen.sh
$ ./configure
$ make
$ ./tests
$ sudo make install # optional

3
bf/secp256k1/TODO Normal file
View file

@ -0,0 +1,3 @@
* Unit tests for fieldelem/groupelem, including ones intended to
trigger fieldelem's boundary cases.
* Complete constant-time operations for signing/keygen

3
bf/secp256k1/autogen.sh Executable file
View file

@ -0,0 +1,3 @@
#!/bin/sh
set -e
autoreconf -if --warnings=all

366
bf/secp256k1/configure.ac Normal file
View file

@ -0,0 +1,366 @@
AC_PREREQ([2.60])
AC_INIT([libsecp256k1],[0.1])
AC_CONFIG_AUX_DIR([build-aux])
AC_CONFIG_MACRO_DIR([build-aux/m4])
AC_CANONICAL_HOST
AH_TOP([#ifndef LIBSECP256K1_CONFIG_H])
AH_TOP([#define LIBSECP256K1_CONFIG_H])
AH_BOTTOM([#endif /*LIBSECP256K1_CONFIG_H*/])
AM_INIT_AUTOMAKE([foreign subdir-objects])
LT_INIT
dnl make the compilation flags quiet unless V=1 is used
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
PKG_PROG_PKG_CONFIG
AC_PATH_TOOL(AR, ar)
AC_PATH_TOOL(RANLIB, ranlib)
AC_PATH_TOOL(STRIP, strip)
AX_PROG_CC_FOR_BUILD
if test "x$CFLAGS" = "x"; then
CFLAGS="-O3 -g"
fi
AM_PROG_CC_C_O
AC_PROG_CC_C89
if test x"$ac_cv_prog_cc_c89" = x"no"; then
AC_MSG_ERROR([c89 compiler support required])
fi
case $host in
*mingw*)
use_pkgconfig=no
;;
*)
use_pkgconfig=yes
;;
esac
case $host_os in
*darwin*)
if test x$cross_compiling != xyes; then
AC_PATH_PROG([BREW],brew,)
if test x$BREW != x; then
dnl These Homebrew packages may be keg-only, meaning that they won't be found
dnl in expected paths because they may conflict with system files. Ask
dnl Homebrew where each one is located, then adjust paths accordingly.
openssl_prefix=`$BREW --prefix openssl 2>/dev/null`
gmp_prefix=`$BREW --prefix gmp 2>/dev/null`
if test x$openssl_prefix != x; then
PKG_CONFIG_PATH="$openssl_prefix/lib/pkgconfig:$PKG_CONFIG_PATH"
export PKG_CONFIG_PATH
fi
if test x$gmp_prefix != x; then
GMP_CPPFLAGS="-I$gmp_prefix/include"
GMP_LIBS="-L$gmp_prefix/lib"
fi
else
AC_PATH_PROG([PORT],port,)
dnl if homebrew isn't installed and macports is, add the macports default paths
dnl as a last resort.
if test x$PORT != x; then
CPPFLAGS="$CPPFLAGS -isystem /opt/local/include"
LDFLAGS="$LDFLAGS -L/opt/local/lib"
fi
fi
fi
;;
esac
CFLAGS="$CFLAGS -W"
warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wno-unused-function -Wno-long-long -Wno-overlength-strings"
saved_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $warn_CFLAGS"
AC_MSG_CHECKING([if ${CC} supports ${warn_CFLAGS}])
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])],
[ AC_MSG_RESULT([yes]) ],
[ AC_MSG_RESULT([no])
CFLAGS="$saved_CFLAGS"
])
AC_ARG_ENABLE(benchmark,
AS_HELP_STRING([--enable-benchmark],[compile benchmark (default is no)]),
[use_benchmark=$enableval],
[use_benchmark=no])
AC_ARG_ENABLE(tests,
AS_HELP_STRING([--enable-tests],[compile tests (default is yes)]),
[use_tests=$enableval],
[use_tests=yes])
AC_ARG_ENABLE(endomorphism,
AS_HELP_STRING([--enable-endomorphism],[enable endomorphism (default is no)]),
[use_endomorphism=$enableval],
[use_endomorphism=no])
AC_ARG_ENABLE(ecmult_static_precomputation,
AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing (default is yes)]),
[use_ecmult_static_precomputation=$enableval],
[use_ecmult_static_precomputation=yes])
AC_ARG_ENABLE(module_ecdh,
AS_HELP_STRING([--enable-module-ecdh],[enable ECDH shared secret computation (default is no)]),
[enable_module_ecdh=$enableval],
[enable_module_ecdh=no])
AC_ARG_ENABLE(module_schnorr,
AS_HELP_STRING([--enable-module-schnorr],[enable Schnorr signature module (default is no)]),
[enable_module_schnorr=$enableval],
[enable_module_schnorr=no])
AC_ARG_WITH([field], [AS_HELP_STRING([--with-field=64bit|32bit|auto],
[Specify Field Implementation. Default is auto])],[req_field=$withval], [req_field=auto])
AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|no|auto],
[Specify Bignum Implementation. Default is auto])],[req_bignum=$withval], [req_bignum=auto])
AC_ARG_WITH([scalar], [AS_HELP_STRING([--with-scalar=64bit|32bit|auto],
[Specify scalar implementation. Default is auto])],[req_scalar=$withval], [req_scalar=auto])
AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|no|auto]
[Specify assembly optimizations to use. Default is auto])],[req_asm=$withval], [req_asm=auto])
AC_CHECK_TYPES([__int128])
AC_MSG_CHECKING([for __builtin_expect])
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[void myfunc() {__builtin_expect(0,0);}]])],
[ AC_MSG_RESULT([yes]);AC_DEFINE(HAVE_BUILTIN_EXPECT,1,[Define this symbol if __builtin_expect is available]) ],
[ AC_MSG_RESULT([no])
])
if test x"$req_asm" = x"auto"; then
SECP_64BIT_ASM_CHECK
if test x"$has_64bit_asm" = x"yes"; then
set_asm=x86_64
fi
if test x"$set_asm" = x; then
set_asm=no
fi
else
set_asm=$req_asm
case $set_asm in
x86_64)
SECP_64BIT_ASM_CHECK
if test x"$has_64bit_asm" != x"yes"; then
AC_MSG_ERROR([x86_64 assembly optimization requested but not available])
fi
;;
no)
;;
*)
AC_MSG_ERROR([invalid assembly optimization selection])
;;
esac
fi
if test x"$req_field" = x"auto"; then
if test x"set_asm" = x"x86_64"; then
set_field=64bit
fi
if test x"$set_field" = x; then
SECP_INT128_CHECK
if test x"$has_int128" = x"yes"; then
set_field=64bit
fi
fi
if test x"$set_field" = x; then
set_field=32bit
fi
else
set_field=$req_field
case $set_field in
64bit)
if test x"$set_asm" != x"x86_64"; then
SECP_INT128_CHECK
if test x"$has_int128" != x"yes"; then
AC_MSG_ERROR([64bit field explicitly requested but neither __int128 support or x86_64 assembly available])
fi
fi
;;
32bit)
;;
*)
AC_MSG_ERROR([invalid field implementation selection])
;;
esac
fi
if test x"$req_scalar" = x"auto"; then
SECP_INT128_CHECK
if test x"$has_int128" = x"yes"; then
set_scalar=64bit
fi
if test x"$set_scalar" = x; then
set_scalar=32bit
fi
else
set_scalar=$req_scalar
case $set_scalar in
64bit)
SECP_INT128_CHECK
if test x"$has_int128" != x"yes"; then
AC_MSG_ERROR([64bit scalar explicitly requested but __int128 support not available])
fi
;;
32bit)
;;
*)
AC_MSG_ERROR([invalid scalar implementation selected])
;;
esac
fi
if test x"$req_bignum" = x"auto"; then
SECP_GMP_CHECK
if test x"$has_gmp" = x"yes"; then
set_bignum=gmp
fi
if test x"$set_bignum" = x; then
set_bignum=no
fi
else
set_bignum=$req_bignum
case $set_bignum in
gmp)
SECP_GMP_CHECK
if test x"$has_gmp" != x"yes"; then
AC_MSG_ERROR([gmp bignum explicitly requested but libgmp not available])
fi
;;
no)
;;
*)
AC_MSG_ERROR([invalid bignum implementation selection])
;;
esac
fi
# select assembly optimization
case $set_asm in
x86_64)
AC_DEFINE(USE_ASM_X86_64, 1, [Define this symbol to enable x86_64 assembly optimizations])
;;
no)
;;
*)
AC_MSG_ERROR([invalid assembly optimizations])
;;
esac
# select field implementation
case $set_field in
64bit)
AC_DEFINE(USE_FIELD_5X52, 1, [Define this symbol to use the FIELD_5X52 implementation])
;;
32bit)
AC_DEFINE(USE_FIELD_10X26, 1, [Define this symbol to use the FIELD_10X26 implementation])
;;
*)
AC_MSG_ERROR([invalid field implementation])
;;
esac
# select bignum implementation
case $set_bignum in
gmp)
AC_DEFINE(HAVE_LIBGMP, 1, [Define this symbol if libgmp is installed])
AC_DEFINE(USE_NUM_GMP, 1, [Define this symbol to use the gmp implementation for num])
AC_DEFINE(USE_FIELD_INV_NUM, 1, [Define this symbol to use the num-based field inverse implementation])
AC_DEFINE(USE_SCALAR_INV_NUM, 1, [Define this symbol to use the num-based scalar inverse implementation])
;;
no)
AC_DEFINE(USE_NUM_NONE, 1, [Define this symbol to use no num implementation])
AC_DEFINE(USE_FIELD_INV_BUILTIN, 1, [Define this symbol to use the native field inverse implementation])
AC_DEFINE(USE_SCALAR_INV_BUILTIN, 1, [Define this symbol to use the native scalar inverse implementation])
;;
*)
AC_MSG_ERROR([invalid bignum implementation])
;;
esac
#select scalar implementation
case $set_scalar in
64bit)
AC_DEFINE(USE_SCALAR_4X64, 1, [Define this symbol to use the 4x64 scalar implementation])
;;
32bit)
AC_DEFINE(USE_SCALAR_8X32, 1, [Define this symbol to use the 8x32 scalar implementation])
;;
*)
AC_MSG_ERROR([invalid scalar implementation])
;;
esac
if test x"$use_tests" = x"yes"; then
SECP_OPENSSL_CHECK
if test x"$has_openssl_ec" = x"yes"; then
AC_DEFINE(ENABLE_OPENSSL_TESTS, 1, [Define this symbol if OpenSSL EC functions are available])
SECP_TEST_INCLUDES="$SSL_CFLAGS $CRYPTO_CFLAGS"
SECP_TEST_LIBS="$CRYPTO_LIBS"
case $host in
*mingw*)
SECP_TEST_LIBS="$SECP_TEST_LIBS -lgdi32"
;;
esac
fi
fi
if test x"$set_bignum" = x"gmp"; then
SECP_LIBS="$SECP_LIBS $GMP_LIBS"
SECP_INCLUDES="$SECP_INCLUDES $GMP_CPPFLAGS"
fi
if test x"$use_endomorphism" = x"yes"; then
AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization])
fi
if test x"$use_ecmult_static_precomputation" = x"yes"; then
AC_DEFINE(USE_ECMULT_STATIC_PRECOMPUTATION, 1, [Define this symbol to use a statically generated ecmult table])
fi
if test x"$enable_module_ecdh" = x"yes"; then
AC_DEFINE(ENABLE_MODULE_ECDH, 1, [Define this symbol to enable the ECDH module])
fi
if test x"$enable_module_schnorr" = x"yes"; then
AC_DEFINE(ENABLE_MODULE_SCHNORR, 1, [Define this symbol to enable the Schnorr signature module])
fi
AC_C_BIGENDIAN()
AC_MSG_NOTICE([Using assembly optimizations: $set_asm])
AC_MSG_NOTICE([Using field implementation: $set_field])
AC_MSG_NOTICE([Using bignum implementation: $set_bignum])
AC_MSG_NOTICE([Using scalar implementation: $set_scalar])
AC_MSG_NOTICE([Using endomorphism optimizations: $use_endomorphism])
AC_MSG_NOTICE([Building ECDH module: $enable_module_ecdh])
AC_MSG_NOTICE([Building Schnorr signatures module: $enable_module_schnorr])
AC_CONFIG_HEADERS([src/libsecp256k1-config.h])
AC_CONFIG_FILES([Makefile libsecp256k1.pc])
AC_SUBST(SECP_INCLUDES)
AC_SUBST(SECP_LIBS)
AC_SUBST(SECP_TEST_LIBS)
AC_SUBST(SECP_TEST_INCLUDES)
AM_CONDITIONAL([USE_TESTS], [test x"$use_tests" != x"no"])
AM_CONDITIONAL([USE_BENCHMARK], [test x"$use_benchmark" = x"yes"])
AM_CONDITIONAL([USE_ECMULT_STATIC_PRECOMPUTATION], [test x"$use_ecmult_static_precomputation" = x"yes"])
AM_CONDITIONAL([ENABLE_MODULE_ECDH], [test x"$enable_module_ecdh" = x"yes"])
AM_CONDITIONAL([ENABLE_MODULE_SCHNORR], [test x"$enable_module_schnorr" = x"yes"])
dnl make sure nothing new is exported so that we don't break the cache
PKGCONFIG_PATH_TEMP="$PKG_CONFIG_PATH"
unset PKG_CONFIG_PATH
PKG_CONFIG_PATH="$PKGCONFIG_PATH_TEMP"
AC_OUTPUT

View file

@ -0,0 +1,453 @@
#ifndef _SECP256K1_
# define _SECP256K1_
# ifdef __cplusplus
extern "C" {
# endif
# if !defined(SECP256K1_GNUC_PREREQ)
# if defined(__GNUC__)&&defined(__GNUC_MINOR__)
# define SECP256K1_GNUC_PREREQ(_maj,_min) \
((__GNUC__<<16)+__GNUC_MINOR__>=((_maj)<<16)+(_min))
# else
# define SECP256K1_GNUC_PREREQ(_maj,_min) 0
# endif
# endif
# if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) )
# if SECP256K1_GNUC_PREREQ(2,7)
# define SECP256K1_INLINE __inline__
# elif (defined(_MSC_VER))
# define SECP256K1_INLINE __inline
# else
# define SECP256K1_INLINE
# endif
# else
# define SECP256K1_INLINE inline
# endif
/**Warning attributes
* NONNULL is not used if SECP256K1_BUILD is set to avoid the compiler optimizing out
* some paranoid null checks. */
# if defined(__GNUC__) && SECP256K1_GNUC_PREREQ(3, 4)
# define SECP256K1_WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__))
# else
# define SECP256K1_WARN_UNUSED_RESULT
# endif
# if !defined(SECP256K1_BUILD) && defined(__GNUC__) && SECP256K1_GNUC_PREREQ(3, 4)
# define SECP256K1_ARG_NONNULL(_x) __attribute__ ((__nonnull__(_x)))
# else
# define SECP256K1_ARG_NONNULL(_x)
# endif
/** Opaque data structure that holds context information (precomputed tables etc.).
* Only functions that take a pointer to a non-const context require exclusive
* access to it. Multiple functions that take a pointer to a const context may
* run simultaneously.
*/
typedef struct secp256k1_context_struct secp256k1_context_t;
/** Flags to pass to secp256k1_context_create. */
# define SECP256K1_CONTEXT_VERIFY (1 << 0)
# define SECP256K1_CONTEXT_SIGN (1 << 1)
/** Create a secp256k1 context object.
* Returns: a newly created context object.
* In: flags: which parts of the context to initialize.
*/
secp256k1_context_t* secp256k1_context_create(
int flags
) SECP256K1_WARN_UNUSED_RESULT;
/** Copies a secp256k1 context object.
* Returns: a newly created context object.
* In: ctx: an existing context to copy
*/
secp256k1_context_t* secp256k1_context_clone(
const secp256k1_context_t* ctx
) SECP256K1_WARN_UNUSED_RESULT;
/** Destroy a secp256k1 context object.
* The context pointer may not be used afterwards.
*/
void secp256k1_context_destroy(
secp256k1_context_t* ctx
) SECP256K1_ARG_NONNULL(1);
/** Set a callback function to be called when an illegal argument is passed to
* an API call. The philosophy is that these shouldn't be dealt with through a
* specific return value, as calling code should not have branches to deal with
* the case that this code itself is broken.
* On the other hand, during debug stage, one would want to be informed about
* such mistakes, and the default (crashing) may be inadvisable.
* When this callback is triggered, the API function called is guaranteed not
* to cause a crash, though its return value and output arguments are
* undefined.
*/
void secp256k1_context_set_illegal_callback(
secp256k1_context_t* ctx,
void (*fun)(const char* message, void* data),
void* data
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
/** Set a callback function to be called when an internal consistency check
* fails. The default is crashing.
* This can only trigger in case of a hardware failure, miscompilation,
* memory corruption, serious bug in the library, or other error would can
* otherwise result in undefined behaviour. It will not trigger due to mere
* incorrect usage of the API (see secp256k1_context_set_illegal_callback
* for that). After this callback returns, anything may happen, including
* crashing.
*/
void secp256k1_context_set_error_callback(
secp256k1_context_t* ctx,
void (*fun)(const char* message, void* data),
void* data
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
/** Data type to hold a parsed and valid public key.
This data type should be considered opaque to the user, and only created
through API functions. It is not guaranteed to be compatible between
different implementations. If you need to convert to a format suitable
for storage or transmission, use secp256k1_ec_pubkey_serialize and
secp256k1_ec_pubkey_parse.
*/
typedef struct {
unsigned char data[64];
} secp256k1_pubkey_t;
/** Parse a variable-length public key into the pubkey object.
* Returns: 1 if the public key was fully valid.
* 0 if the public key could not be parsed or is invalid.
* In: ctx: a secp256k1 context object.
* input: pointer to a serialized public key
* inputlen: length of the array pointed to by input
* Out: pubkey: pointer to a pubkey object. If 1 is returned, it is set to a
* parsed version of input. If not, its value is undefined.
* This function supports parsing compressed (33 bytes, header byte 0x02 or
* 0x03), uncompressed (65 bytes, header byte 0x04), or hybrid (65 bytes, header
* byte 0x06 or 0x07) format public keys.
*/
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_parse(
const secp256k1_context_t* ctx,
secp256k1_pubkey_t* pubkey,
const unsigned char *input,
int inputlen
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Serialize a pubkey object into a serialized byte sequence.
* Returns: 1 always.
* In: ctx: a secp256k1 context object.
* pubkey: a pointer to a secp256k1_pubkey_t containing an initialized
* public key.
* compressed: whether to serialize in compressed format.
* Out: output: a pointer to a 65-byte (if compressed==0) or 33-byte (if
* compressed==1) byte array to place the serialized key in.
* outputlen: a pointer to an integer which will contain the serialized
* size.
*/
int secp256k1_ec_pubkey_serialize(
const secp256k1_context_t* ctx,
unsigned char *output,
int *outputlen,
const secp256k1_pubkey_t* pubkey,
int compressed
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Data type to hold a parsed ECDSA signature, optionally supporting pubkey
* recovery.
This data type should be considered opaque to the user, and only created
through API functions. It is not guaranteed to be compatible between
different implementations. If you need to convert to a format suitable
for storage or transmission, use secp256k1_ecdsa_signature_serialize_* and
secp256k1_ecdsa_signature_parse_* functions. */
typedef struct {
unsigned char data[65];
} secp256k1_ecdsa_signature_t;
/** Parse a DER ECDSA signature.
* Returns: 1 when the signature could be parsed, 0 otherwise.
* In: ctx: a secp256k1 context object
* input: a pointer to the signature to be parsed
* inputlen: the length of the array pointed to be input
* Out: sig: a pointer to a signature object
*
* Note that this function also supports some violations of DER.
*
* The resulting signature object will not support pubkey recovery.
*/
int secp256k1_ecdsa_signature_parse_der(
const secp256k1_context_t* ctx,
secp256k1_ecdsa_signature_t* sig,
const unsigned char *input,
int inputlen
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Parse a compact ECDSA signature (64 bytes + recovery id).
* Returns: 1 when the signature could be parsed, 0 otherwise
* In: ctx: a secp256k1 context object
* input64: a pointer to a 64-byte compact signature
* recid: the recovery id (0, 1, 2 or 3, or -1 for unknown)
* Out: sig: a pointer to a signature object
*
* If recid is not -1, the resulting signature object will support pubkey
* recovery.
*/
int secp256k1_ecdsa_signature_parse_compact(
const secp256k1_context_t* ctx,
secp256k1_ecdsa_signature_t* sig,
const unsigned char *input64,
int recid
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Serialize an ECDSA signature in DER format.
* Returns: 1 if enough space was available to serialize, 0 otherwise
* In: ctx: a secp256k1 context object
* sig: a pointer to an initialized signature object
* Out: output: a pointer to an array to store the DER serialization
* In/Out: outputlen: a pointer to a length integer. Initially, this integer
* should be set to the length of output. After the call
* it will be set to the length of the serialization (even
* if 0 was returned).
*/
int secp256k1_ecdsa_signature_serialize_der(
const secp256k1_context_t* ctx,
unsigned char *output,
int *outputlen,
const secp256k1_ecdsa_signature_t* sig
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Serialize an ECDSA signature in compact format (64 bytes + recovery id).
* Returns: 1
* In: ctx: a secp256k1 context object
* sig: a pointer to an initialized signature object (cannot be NULL)
* Out: output64: a pointer to a 64-byte array of the compact signature (cannot be NULL)
* recid: a pointer to an integer to hold the recovery id (can be NULL).
*
* If recid is not NULL, the signature must support pubkey recovery.
*/
int secp256k1_ecdsa_signature_serialize_compact(
const secp256k1_context_t* ctx,
unsigned char *output64,
int *recid,
const secp256k1_ecdsa_signature_t* sig
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4);
/** Verify an ECDSA signature.
* Returns: 1: correct signature
* 0: incorrect or unparseable signature
* In: ctx: a secp256k1 context object, initialized for verification.
* msg32: the 32-byte message hash being verified (cannot be NULL)
* sig: the signature being verified (cannot be NULL)
* pubkey: pointer to an initialized public key to verify with (cannot be NULL)
*/
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdsa_verify(
const secp256k1_context_t* ctx,
const unsigned char *msg32,
const secp256k1_ecdsa_signature_t *sig,
const secp256k1_pubkey_t *pubkey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** A pointer to a function to deterministically generate a nonce.
* Returns: 1 if a nonce was successfully generated. 0 will cause signing to fail.
* In: msg32: the 32-byte message hash being verified (will not be NULL)
* key32: pointer to a 32-byte secret key (will not be NULL)
* algo16: pointer to a 16-byte array describing the signature
* algorithm (will be NULL for ECDSA for compatibility).
* attempt: how many iterations we have tried to find a nonce.
* This will almost always be 0, but different attempt values
* are required to result in a different nonce.
* data: Arbitrary data pointer that is passed through.
* Out: nonce32: pointer to a 32-byte array to be filled by the function.
* Except for test cases, this function should compute some cryptographic hash of
* the message, the key and the attempt.
*/
typedef int (*secp256k1_nonce_function_t)(
unsigned char *nonce32,
const unsigned char *msg32,
const unsigned char *key32,
const unsigned char *algo16,
unsigned int attempt,
const void *data
);
/** An implementation of RFC6979 (using HMAC-SHA256) as nonce generation function.
* If a data pointer is passed, it is assumed to be a pointer to 32 bytes of
* extra entropy.
*/
extern const secp256k1_nonce_function_t secp256k1_nonce_function_rfc6979;
/** A default safe nonce generation function (currently equal to secp256k1_nonce_function_rfc6979). */
extern const secp256k1_nonce_function_t secp256k1_nonce_function_default;
/** Create an ECDSA signature.
* Returns: 1: signature created
* 0: the nonce generation function failed, or the private key was invalid.
* In: ctx: pointer to a context object, initialized for signing (cannot be NULL)
* msg32: the 32-byte message hash being signed (cannot be NULL)
* seckey: pointer to a 32-byte secret key (cannot be NULL)
* noncefp:pointer to a nonce generation function. If NULL, secp256k1_nonce_function_default is used
* ndata: pointer to arbitrary data used by the nonce generation function (can be NULL)
* Out: sig: pointer to an array where the signature will be placed (cannot be NULL)
*
* The resulting signature will support pubkey recovery.
*
* The sig always has an s value in the lower half of the range (From 0x1
* to 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0,
* inclusive), unlike many other implementations.
* With ECDSA a third-party can can forge a second distinct signature
* of the same message given a single initial signature without knowing
* the key by setting s to its additive inverse mod-order, 'flipping' the
* sign of the random point R which is not included in the signature.
* Since the forgery is of the same message this isn't universally
* problematic, but in systems where message malleability or uniqueness
* of signatures is important this can cause issues. This forgery can be
* blocked by all verifiers forcing signers to use a canonical form. The
* lower-S form reduces the size of signatures slightly on average when
* variable length encodings (such as DER) are used and is cheap to
* verify, making it a good choice. Security of always using lower-S is
* assured because anyone can trivially modify a signature after the
* fact to enforce this property. Adjusting it inside the signing
* function avoids the need to re-serialize or have curve specific
* constants outside of the library. By always using a canonical form
* even in applications where it isn't needed it becomes possible to
* impose a requirement later if a need is discovered.
* No other forms of ECDSA malleability are known and none seem likely,
* but there is no formal proof that ECDSA, even with this additional
* restriction, is free of other malleability. Commonly used serialization
* schemes will also accept various non-unique encodings, so care should
* be taken when this property is required for an application.
*/
int secp256k1_ecdsa_sign(
const secp256k1_context_t* ctx,
const unsigned char *msg32,
secp256k1_ecdsa_signature_t *sig,
const unsigned char *seckey,
secp256k1_nonce_function_t noncefp,
const void *ndata
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Recover an ECDSA public key from a signature.
* Returns: 1: public key successfully recovered (which guarantees a correct signature).
* 0: otherwise.
* In: ctx: pointer to a context object, initialized for verification (cannot be NULL)
* msg32: the 32-byte message hash assumed to be signed (cannot be NULL)
* sig64: pointer to initialized signature that supports pubkey recovery (cannot be NULL)
* Out: pubkey: pointer to the recoved public key (cannot be NULL)
*/
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdsa_recover(
const secp256k1_context_t* ctx,
const unsigned char *msg32,
const secp256k1_ecdsa_signature_t *sig,
secp256k1_pubkey_t *pubkey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Verify an ECDSA secret key.
* Returns: 1: secret key is valid
* 0: secret key is invalid
* In: ctx: pointer to a context object (cannot be NULL)
* seckey: pointer to a 32-byte secret key (cannot be NULL)
*/
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_seckey_verify(
const secp256k1_context_t* ctx,
const unsigned char *seckey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
/** Compute the public key for a secret key.
* In: ctx: pointer to a context object, initialized for signing (cannot be NULL)
* seckey: pointer to a 32-byte private key (cannot be NULL)
* Out: pubkey: pointer to the created public key (cannot be NULL)
* Returns: 1: secret was valid, public key stores
* 0: secret was invalid, try again
*/
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_create(
const secp256k1_context_t* ctx,
secp256k1_pubkey_t *pubkey,
const unsigned char *seckey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Export a private key in DER format.
* In: ctx: pointer to a context object, initialized for signing (cannot be NULL)
*/
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_export(
const secp256k1_context_t* ctx,
const unsigned char *seckey,
unsigned char *privkey,
int *privkeylen,
int compressed
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Import a private key in DER format. */
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_import(
const secp256k1_context_t* ctx,
unsigned char *seckey,
const unsigned char *privkey,
int privkeylen
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Tweak a private key by adding tweak to it. */
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_add(
const secp256k1_context_t* ctx,
unsigned char *seckey,
const unsigned char *tweak
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Tweak a public key by adding tweak times the generator to it.
* In: ctx: pointer to a context object, initialized for verification (cannot be NULL)
*/
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_add(
const secp256k1_context_t* ctx,
secp256k1_pubkey_t *pubkey,
const unsigned char *tweak
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Tweak a private key by multiplying it with tweak. */
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_mul(
const secp256k1_context_t* ctx,
unsigned char *seckey,
const unsigned char *tweak
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Tweak a public key by multiplying it with tweak.
* In: ctx: pointer to a context object, initialized for verification (cannot be NULL)
*/
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_mul(
const secp256k1_context_t* ctx,
secp256k1_pubkey_t *pubkey,
const unsigned char *tweak
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Updates the context randomization.
* Returns: 1: randomization successfully updated
* 0: error
* In: ctx: pointer to a context object (cannot be NULL)
* seed32: pointer to a 32-byte random seed (NULL resets to initial state)
*/
SECP256K1_WARN_UNUSED_RESULT int secp256k1_context_randomize(
secp256k1_context_t* ctx,
const unsigned char *seed32
) SECP256K1_ARG_NONNULL(1);
/** Add a number of public keys together.
* Returns: 1: the sum of the public keys is valid.
* 0: the sum of the public keys is not valid.
* In: ctx: pointer to a context object
* out: pointer to pubkey for placing the resulting public key
* (cannot be NULL)
* n: the number of public keys to add together (must be at least 1)
* ins: pointer to array of pointers to public keys (cannot be NULL)
* Use secp256k1_ec_pubkey_compress and secp256k1_ec_pubkey_decompress if the
* uncompressed format is needed.
*/
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_combine(
const secp256k1_context_t* ctx,
secp256k1_pubkey_t *out,
int n,
const secp256k1_pubkey_t * const * ins
) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4);
# ifdef __cplusplus
}
# endif
#endif

View file

@ -0,0 +1,30 @@
#ifndef _SECP256K1_ECDH_
# define _SECP256K1_ECDH_
# include "secp256k1.h"
# ifdef __cplusplus
extern "C" {
# endif
/** Compute an EC Diffie-Hellman secret in constant time
* Returns: 1: exponentiation was successful
* 0: scalar was invalid (zero or overflow)
* In: ctx: pointer to a context object (cannot be NULL)
* point: pointer to a public point
* scalar: a 32-byte scalar with which to multiply the point
* Out: result: a 32-byte array which will be populated by an ECDH
* secret computed from the point and scalar
*/
SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdh(
const secp256k1_context_t* ctx,
unsigned char *result,
const secp256k1_pubkey_t *point,
const unsigned char *scalar
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
# ifdef __cplusplus
}
# endif
#endif

View file

@ -0,0 +1,173 @@
#ifndef _SECP256K1_SCHNORR_
# define _SECP256K1_SCHNORR_
# include "secp256k1.h"
# ifdef __cplusplus
extern "C" {
# endif
/** Create a signature using a custom EC-Schnorr-SHA256 construction. It
* produces non-malleable 64-byte signatures which support public key recovery
* batch validation, and multiparty signing.
* Returns: 1: signature created
* 0: the nonce generation function failed, or the private key was
* invalid.
* In: ctx: pointer to a context object, initialized for signing
* (cannot be NULL)
* msg32: the 32-byte message hash being signed (cannot be NULL)
* seckey: pointer to a 32-byte secret key (cannot be NULL)
* noncefp:pointer to a nonce generation function. If NULL,
* secp256k1_nonce_function_default is used
* ndata: pointer to arbitrary data used by the nonce generation
* function (can be NULL)
* Out: sig64: pointer to a 64-byte array where the signature will be
* placed (cannot be NULL)
*/
int secp256k1_schnorr_sign(
const secp256k1_context_t* ctx,
const unsigned char *msg32,
unsigned char *sig64,
const unsigned char *seckey,
secp256k1_nonce_function_t noncefp,
const void *ndata
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Verify a signature created by secp256k1_schnorr_sign.
* Returns: 1: correct signature
* 0: incorrect signature
* In: ctx: a secp256k1 context object, initialized for verification.
* msg32: the 32-byte message hash being verified (cannot be NULL)
* sig64: the 64-byte signature being verified (cannot be NULL)
* pubkey: the public key to verify with (cannot be NULL)
*/
SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorr_verify(
const secp256k1_context_t* ctx,
const unsigned char *msg32,
const unsigned char *sig64,
const secp256k1_pubkey_t *pubkey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Recover an EC public key from a Schnorr signature created using
* secp256k1_schnorr_sign.
* Returns: 1: public key successfully recovered (which guarantees a correct
* signature).
* 0: otherwise.
* In: ctx: pointer to a context object, initialized for
* verification (cannot be NULL)
* msg32: the 32-byte message hash assumed to be signed (cannot
* be NULL)
* sig64: signature as 64 byte array (cannot be NULL)
* Out: pubkey: pointer to a pubkey to set to the recovered public key
* (cannot be NULL).
*/
int secp256k1_schnorr_recover(
const secp256k1_context_t* ctx,
const unsigned char *msg32,
const unsigned char *sig64,
secp256k1_pubkey_t *pubkey
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Generate a nonce pair deterministically for use with
* secp256k1_schnorr_partial_sign.
* Returns: 1: valid nonce pair was generated.
* 0: otherwise (nonce generation function failed)
* In: ctx: pointer to a context object, initialized for signing
* (cannot be NULL)
* msg32: the 32-byte message hash assumed to be signed (cannot
* be NULL)
* sec32: the 32-byte private key (cannot be NULL)
* noncefp: pointer to a nonce generation function. If NULL,
* secp256k1_nonce_function_default is used
* noncedata: pointer to arbitrary data used by the nonce generation
* function (can be NULL)
* Out: pubnonce: public side of the nonce (cannot be NULL)
* privnonce32: private side of the nonce (32 byte) (cannot be NULL)
*
* Do not use the output as a private/public key pair for signing/validation.
*/
int secp256k1_schnorr_generate_nonce_pair(
const secp256k1_context_t* ctx,
const unsigned char *msg32,
const unsigned char *sec32,
secp256k1_nonce_function_t noncefp,
const void* noncedata,
secp256k1_pubkey_t *pubnonce,
unsigned char *privnonce32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(6) SECP256K1_ARG_NONNULL(7);
/** Produce a partial Schnorr signature, which can be combined using
* secp256k1_schnorr_partial_combine, to end up with a full signature that is
* verifiable using secp256k1_schnorr_verify.
* Returns: 1: signature created succesfully.
* 0: no valid signature exists with this combination of keys, nonces
* and message (chance around 1 in 2^128)
* -1: invalid private key, nonce, or public nonces.
* In: ctx: pointer to context object, initialized for signing (cannot
* be NULL)
* msg32: pointer to 32-byte message to sign
* sec32: pointer to 32-byte private key
* secnonce32: pointer to 32-byte array containing our nonce
* pubnonce_others: pointer to pubkey containing the sum of the other's
* nonces (see secp256k1_ec_pubkey_combine)
* Out: sig64: pointer to 64-byte array to put partial signature in
*
* The intended procedure for creating a multiparty signature is:
* - Each signer S[i] with private key x[i] and public key Q[i] runs
* secp256k1_schnorr_generate_nonce_pair to produce a pair (k[i],R[i]) of
* private/public nonces.
* - All signers communicate their public nonces to each other (revealing your
* private nonce can lead to discovery of your private key, so it should be
* considered secret).
* - All signers combine all the public nonces they received (excluding their
* own) using secp256k1_ec_pubkey_combine to obtain an
* Rall[i] = sum(R[0..i-1,i+1..n]).
* - All signers produce a partial signature using
* secp256k1_schnorr_partial_sign, passing in their own private key x[i],
* their own private nonce k[i], and the sum of the others' public nonces
* Rall[i].
* - All signers communicate their partial signatures to each other.
* - Someone combines all partial signatures using
* secp256k1_schnorr_partial_combine, to obtain a full signature.
* - The resulting signature is validatable using secp256k1_schnorr_verify, with
* public key equal to the result of secp256k1_ec_pubkey_combine of the
* signers' public keys (sum(Q[0..n])).
*
* Note that secp256k1_schnorr_partial_combine and secp256k1_ec_pubkey_combine
* function take their arguments in any order, and it is possible to
* pre-combine several inputs already with one call, and add more inputs later
* by calling the function again (they are commutative and associative).
*/
SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorr_partial_sign(
const secp256k1_context_t* ctx,
const unsigned char *msg32,
unsigned char *sig64,
const unsigned char *sec32,
const unsigned char *secnonce32,
const secp256k1_pubkey_t *pubnonce_others
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(6);
/** Combine multiple Schnorr partial signatures.
* Returns: 1: the passed signatures were succesfully combined.
* 0: the resulting signature is not valid (chance of 1 in 2^256)
* -1: some inputs were invalid, or the signatures were not created
* using the same set of nonces
* In: ctx: pointer to a context object
* sig64: pointer to a 64-byte array to place the combined signature
* (cannot be NULL)
* n: the number of signatures to combine (at least 1)
* Out: sig64sin: pointer to an array of n pointers to 64-byte input
* signatures
*/
SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorr_partial_combine(
const secp256k1_context_t* ctx,
unsigned char *sig64,
int n,
const unsigned char * const * sig64sin
) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4);
# ifdef __cplusplus
}
# endif
#endif

View file

@ -0,0 +1,13 @@
prefix=@prefix@
exec_prefix=@exec_prefix@
libdir=@libdir@
includedir=@includedir@
Name: libsecp256k1
Description: Optimized C library for EC operations on curve secp256k1
URL: https://github.com/bitcoin/secp256k1
Version: @PACKAGE_VERSION@
Cflags: -I${includedir}
Libs.private: @SECP_LIBS@
Libs: -L${libdir} -lsecp256k1

0
bf/secp256k1/obj/.gitignore vendored Normal file
View file

View file

@ -0,0 +1,32 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef _SECP256K1_BASIC_CONFIG_
#define _SECP256K1_BASIC_CONFIG_
#ifdef USE_BASIC_CONFIG
#undef USE_ASM_X86_64
#undef USE_ENDOMORPHISM
#undef USE_FIELD_10X26
#undef USE_FIELD_5X52
#undef USE_FIELD_INV_BUILTIN
#undef USE_FIELD_INV_NUM
#undef USE_NUM_GMP
#undef USE_NUM_NONE
#undef USE_SCALAR_4X64
#undef USE_SCALAR_8X32
#undef USE_SCALAR_INV_BUILTIN
#undef USE_SCALAR_INV_NUM
#define USE_NUM_NONE 1
#define USE_FIELD_INV_BUILTIN 1
#define USE_SCALAR_INV_BUILTIN 1
#define USE_FIELD_10X26 1
#define USE_SCALAR_8X32 1
#endif // USE_BASIC_CONFIG
#endif // _SECP256K1_BASIC_CONFIG_

56
bf/secp256k1/src/bench.h Normal file
View file

@ -0,0 +1,56 @@
/**********************************************************************
* Copyright (c) 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef _SECP256K1_BENCH_H_
#define _SECP256K1_BENCH_H_
#include <stdio.h>
#include <math.h>
#include "sys/time.h"
static double gettimedouble(void) {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_usec * 0.000001 + tv.tv_sec;
}
void print_number(double x) {
double y = x;
int c = 0;
if (y < 0.0) y = -y;
while (y < 100.0) {
y *= 10.0;
c++;
}
printf("%.*f", c, x);
}
void run_benchmark(char *name, void (*benchmark)(void*), void (*setup)(void*), void (*teardown)(void*), void* data, int count, int iter) {
int i;
double min = HUGE_VAL;
double sum = 0.0;
double max = 0.0;
for (i = 0; i < count; i++) {
double begin, total;
if (setup) setup(data);
begin = gettimedouble();
benchmark(data);
total = gettimedouble() - begin;
if (teardown) teardown(data);
if (total < min) min = total;
if (total > max) max = total;
sum += total;
}
printf("%s: min ", name);
print_number(min * 1000000.0 / iter);
printf("us / avg ");
print_number((sum / count) * 1000000.0 / iter);
printf("us / max ");
print_number(max * 1000000.0 / iter);
printf("us\n");
}
#endif

View file

@ -0,0 +1,51 @@
/**********************************************************************
* Copyright (c) 2015 Pieter Wuille, Andrew Poelstra *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#include <string.h>
#include "include/secp256k1.h"
#include "include/secp256k1_ecdh.h"
#include "util.h"
#include "bench.h"
typedef struct {
secp256k1_context_t *ctx;
secp256k1_pubkey_t point;
unsigned char scalar[32];
} bench_ecdh_t;
static void bench_ecdh_setup(void* arg) {
int i;
bench_ecdh_t *data = (bench_ecdh_t*)arg;
const unsigned char point[] = {
0x03,
0x54, 0x94, 0xc1, 0x5d, 0x32, 0x09, 0x97, 0x06,
0xc2, 0x39, 0x5f, 0x94, 0x34, 0x87, 0x45, 0xfd,
0x75, 0x7c, 0xe3, 0x0e, 0x4e, 0x8c, 0x90, 0xfb,
0xa2, 0xba, 0xd1, 0x84, 0xf8, 0x83, 0xc6, 0x9f
};
data->ctx = secp256k1_context_create(0);
for (i = 0; i < 32; i++) data->scalar[i] = i + 1;
CHECK(secp256k1_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1);
}
static void bench_ecdh(void* arg) {
int i;
unsigned char res[32];
bench_ecdh_t *data = (bench_ecdh_t*)arg;
for (i = 0; i < 20000; i++) {
CHECK(secp256k1_ecdh(data->ctx, res, &data->point, data->scalar) == 1);
}
}
int main(void) {
bench_ecdh_t data;
run_benchmark("ecdh", bench_ecdh, bench_ecdh_setup, NULL, &data, 10, 20000);
return 0;
}

View file

@ -0,0 +1,331 @@
/**********************************************************************
* Copyright (c) 2014-2015 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#include <stdio.h>
#include "include/secp256k1.h"
#include "util.h"
#include "hash_impl.h"
#include "num_impl.h"
#include "field_impl.h"
#include "group_impl.h"
#include "scalar_impl.h"
#include "ecmult_const_impl.h"
#include "ecmult_impl.h"
#include "bench.h"
typedef struct {
secp256k1_scalar_t scalar_x, scalar_y;
secp256k1_fe_t fe_x, fe_y;
secp256k1_ge_t ge_x, ge_y;
secp256k1_gej_t gej_x, gej_y;
unsigned char data[64];
int wnaf[256];
} bench_inv_t;
void bench_setup(void* arg) {
bench_inv_t *data = (bench_inv_t*)arg;
static const unsigned char init_x[32] = {
0x02, 0x03, 0x05, 0x07, 0x0b, 0x0d, 0x11, 0x13,
0x17, 0x1d, 0x1f, 0x25, 0x29, 0x2b, 0x2f, 0x35,
0x3b, 0x3d, 0x43, 0x47, 0x49, 0x4f, 0x53, 0x59,
0x61, 0x65, 0x67, 0x6b, 0x6d, 0x71, 0x7f, 0x83
};
static const unsigned char init_y[32] = {
0x82, 0x83, 0x85, 0x87, 0x8b, 0x8d, 0x81, 0x83,
0x97, 0xad, 0xaf, 0xb5, 0xb9, 0xbb, 0xbf, 0xc5,
0xdb, 0xdd, 0xe3, 0xe7, 0xe9, 0xef, 0xf3, 0xf9,
0x11, 0x15, 0x17, 0x1b, 0x1d, 0xb1, 0xbf, 0xd3
};
secp256k1_scalar_set_b32(&data->scalar_x, init_x, NULL);
secp256k1_scalar_set_b32(&data->scalar_y, init_y, NULL);
secp256k1_fe_set_b32(&data->fe_x, init_x);
secp256k1_fe_set_b32(&data->fe_y, init_y);
CHECK(secp256k1_ge_set_xo_var(&data->ge_x, &data->fe_x, 0));
CHECK(secp256k1_ge_set_xo_var(&data->ge_y, &data->fe_y, 1));
secp256k1_gej_set_ge(&data->gej_x, &data->ge_x);
secp256k1_gej_set_ge(&data->gej_y, &data->ge_y);
memcpy(data->data, init_x, 32);
memcpy(data->data + 32, init_y, 32);
}
void bench_scalar_add(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 2000000; i++) {
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
void bench_scalar_negate(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 2000000; i++) {
secp256k1_scalar_negate(&data->scalar_x, &data->scalar_x);
}
}
void bench_scalar_sqr(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_scalar_sqr(&data->scalar_x, &data->scalar_x);
}
}
void bench_scalar_mul(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_scalar_mul(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
#ifdef USE_ENDOMORPHISM
void bench_scalar_split(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_scalar_t l, r;
secp256k1_scalar_split_lambda(&l, &r, &data->scalar_x);
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
#endif
void bench_scalar_inverse(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 2000; i++) {
secp256k1_scalar_inverse(&data->scalar_x, &data->scalar_x);
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
void bench_scalar_inverse_var(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 2000; i++) {
secp256k1_scalar_inverse_var(&data->scalar_x, &data->scalar_x);
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
void bench_field_normalize(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 2000000; i++) {
secp256k1_fe_normalize(&data->fe_x);
}
}
void bench_field_normalize_weak(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 2000000; i++) {
secp256k1_fe_normalize_weak(&data->fe_x);
}
}
void bench_field_mul(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_fe_mul(&data->fe_x, &data->fe_x, &data->fe_y);
}
}
void bench_field_sqr(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_fe_sqr(&data->fe_x, &data->fe_x);
}
}
void bench_field_inverse(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_fe_inv(&data->fe_x, &data->fe_x);
secp256k1_fe_add(&data->fe_x, &data->fe_y);
}
}
void bench_field_inverse_var(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_fe_inv_var(&data->fe_x, &data->fe_x);
secp256k1_fe_add(&data->fe_x, &data->fe_y);
}
}
void bench_field_sqrt_var(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_fe_sqrt_var(&data->fe_x, &data->fe_x);
secp256k1_fe_add(&data->fe_x, &data->fe_y);
}
}
void bench_group_double_var(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_gej_double_var(&data->gej_x, &data->gej_x, NULL);
}
}
void bench_group_add_var(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_gej_add_var(&data->gej_x, &data->gej_x, &data->gej_y, NULL);
}
}
void bench_group_add_affine(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_gej_add_ge(&data->gej_x, &data->gej_x, &data->ge_y);
}
}
void bench_group_add_affine_var(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 200000; i++) {
secp256k1_gej_add_ge_var(&data->gej_x, &data->gej_x, &data->ge_y, NULL);
}
}
void bench_ecmult_wnaf(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_ecmult_wnaf(data->wnaf, 256, &data->scalar_x, WINDOW_A);
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
void bench_wnaf_const(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_wnaf_const(data->wnaf, data->scalar_x, WINDOW_A);
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
void bench_sha256(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
secp256k1_sha256_t sha;
for (i = 0; i < 20000; i++) {
secp256k1_sha256_initialize(&sha);
secp256k1_sha256_write(&sha, data->data, 32);
secp256k1_sha256_finalize(&sha, data->data);
}
}
void bench_hmac_sha256(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
secp256k1_hmac_sha256_t hmac;
for (i = 0; i < 20000; i++) {
secp256k1_hmac_sha256_initialize(&hmac, data->data, 32);
secp256k1_hmac_sha256_write(&hmac, data->data, 32);
secp256k1_hmac_sha256_finalize(&hmac, data->data);
}
}
void bench_rfc6979_hmac_sha256(void* arg) {
int i;
bench_inv_t *data = (bench_inv_t*)arg;
secp256k1_rfc6979_hmac_sha256_t rng;
for (i = 0; i < 20000; i++) {
secp256k1_rfc6979_hmac_sha256_initialize(&rng, data->data, 64);
secp256k1_rfc6979_hmac_sha256_generate(&rng, data->data, 32);
}
}
int have_flag(int argc, char** argv, char *flag) {
char** argm = argv + argc;
argv++;
if (argv == argm) {
return 1;
}
while (argv != NULL && argv != argm) {
if (strcmp(*argv, flag) == 0) return 1;
argv++;
}
return 0;
}
int main(int argc, char **argv) {
bench_inv_t data;
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "add")) run_benchmark("scalar_add", bench_scalar_add, bench_setup, NULL, &data, 10, 2000000);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, 2000000);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "sqr")) run_benchmark("scalar_sqr", bench_scalar_sqr, bench_setup, NULL, &data, 10, 200000);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, 200000);
#ifdef USE_ENDOMORPHISM
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "split")) run_benchmark("scalar_split", bench_scalar_split, bench_setup, NULL, &data, 10, 20000);
#endif
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, 2000);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, 2000);
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize", bench_field_normalize, bench_setup, NULL, &data, 10, 2000000);
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize_weak", bench_field_normalize_weak, bench_setup, NULL, &data, 10, 2000000);
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "sqr")) run_benchmark("field_sqr", bench_field_sqr, bench_setup, NULL, &data, 10, 200000);
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "mul")) run_benchmark("field_mul", bench_field_mul, bench_setup, NULL, &data, 10, 200000);
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse", bench_field_inverse, bench_setup, NULL, &data, 10, 20000);
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse_var", bench_field_inverse_var, bench_setup, NULL, &data, 10, 20000);
if (have_flag(argc, argv, "field") || have_flag(argc, argv, "sqrt")) run_benchmark("field_sqrt_var", bench_field_sqrt_var, bench_setup, NULL, &data, 10, 20000);
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "double")) run_benchmark("group_double_var", bench_group_double_var, bench_setup, NULL, &data, 10, 200000);
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_var", bench_group_add_var, bench_setup, NULL, &data, 10, 200000);
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, 200000);
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, 200000);
if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, 20000);
if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("ecmult_wnaf", bench_ecmult_wnaf, bench_setup, NULL, &data, 10, 20000);
if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "sha256")) run_benchmark("hash_sha256", bench_sha256, bench_setup, NULL, &data, 10, 20000);
if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "hmac")) run_benchmark("hash_hmac_sha256", bench_hmac_sha256, bench_setup, NULL, &data, 10, 20000);
if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "rng6979")) run_benchmark("hash_rfc6979_hmac_sha256", bench_rfc6979_hmac_sha256, bench_setup, NULL, &data, 10, 20000);
return 0;
}

View file

@ -0,0 +1,55 @@
/**********************************************************************
* Copyright (c) 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#include "include/secp256k1.h"
#include "util.h"
#include "bench.h"
typedef struct {
secp256k1_context_t *ctx;
unsigned char msg[32];
unsigned char sig[64];
} bench_recover_t;
void bench_recover(void* arg) {
int i;
bench_recover_t *data = (bench_recover_t*)arg;
secp256k1_pubkey_t pubkey;
unsigned char pubkeyc[33];
for (i = 0; i < 20000; i++) {
int j;
int pubkeylen = 33;
secp256k1_ecdsa_signature_t sig;
CHECK(secp256k1_ecdsa_signature_parse_compact(data->ctx, &sig, data->sig, i % 2));
CHECK(secp256k1_ecdsa_recover(data->ctx, data->msg, &sig, &pubkey));
CHECK(secp256k1_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, 1));
for (j = 0; j < 32; j++) {
data->sig[j + 32] = data->msg[j]; /* Move former message to S. */
data->msg[j] = data->sig[j]; /* Move former R to message. */
data->sig[j] = pubkeyc[j + 1]; /* Move recovered pubkey X coordinate to R (which must be a valid X coordinate). */
}
}
}
void bench_recover_setup(void* arg) {
int i;
bench_recover_t *data = (bench_recover_t*)arg;
for (i = 0; i < 32; i++) data->msg[i] = 1 + i;
for (i = 0; i < 64; i++) data->sig[i] = 65 + i;
}
int main(void) {
bench_recover_t data;
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
run_benchmark("ecdsa_recover", bench_recover, bench_recover_setup, NULL, &data, 10, 20000);
secp256k1_context_destroy(data.ctx);
return 0;
}

View file

@ -0,0 +1,69 @@
/**********************************************************************
* Copyright (c) 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#include <stdio.h>
#include <string.h>
#include "include/secp256k1.h"
#include "include/secp256k1_schnorr.h"
#include "util.h"
#include "bench.h"
typedef struct {
unsigned char key[32];
unsigned char sig[64];
unsigned char pubkey[33];
int pubkeylen;
} benchmark_schnorr_sig_t;
typedef struct {
secp256k1_context_t *ctx;
unsigned char msg[32];
benchmark_schnorr_sig_t sigs[64];
int numsigs;
} benchmark_schnorr_verify_t;
static void benchmark_schnorr_init(void* arg) {
int i, k;
benchmark_schnorr_verify_t* data = (benchmark_schnorr_verify_t*)arg;
for (i = 0; i < 32; i++) data->msg[i] = 1 + i;
for (k = 0; k < data->numsigs; k++) {
secp256k1_pubkey_t pubkey;
for (i = 0; i < 32; i++) data->sigs[k].key[i] = 33 + i + k;
secp256k1_schnorr_sign(data->ctx, data->msg, data->sigs[k].sig, data->sigs[k].key, NULL, NULL);
data->sigs[k].pubkeylen = 33;
CHECK(secp256k1_ec_pubkey_create(data->ctx, &pubkey, data->sigs[k].key));
CHECK(secp256k1_ec_pubkey_serialize(data->ctx, data->sigs[k].pubkey, &data->sigs[k].pubkeylen, &pubkey, 1));
}
}
static void benchmark_schnorr_verify(void* arg) {
int i;
benchmark_schnorr_verify_t* data = (benchmark_schnorr_verify_t*)arg;
for (i = 0; i < 20000 / data->numsigs; i++) {
secp256k1_pubkey_t pubkey;
data->sigs[0].sig[(i >> 8) % 64] ^= (i & 0xFF);
CHECK(secp256k1_ec_pubkey_parse(data->ctx, &pubkey, data->sigs[0].pubkey, data->sigs[0].pubkeylen));
CHECK(secp256k1_schnorr_verify(data->ctx, data->msg, data->sigs[0].sig, &pubkey) == ((i & 0xFF) == 0));
data->sigs[0].sig[(i >> 8) % 64] ^= (i & 0xFF);
}
}
int main(void) {
benchmark_schnorr_verify_t data;
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
data.numsigs = 1;
run_benchmark("schnorr_verify", benchmark_schnorr_verify, benchmark_schnorr_init, NULL, &data, 10, 20000);
secp256k1_context_destroy(data.ctx);
return 0;
}

View file

@ -0,0 +1,52 @@
/**********************************************************************
* Copyright (c) 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#include "include/secp256k1.h"
#include "util.h"
#include "bench.h"
typedef struct {
secp256k1_context_t* ctx;
unsigned char msg[32];
unsigned char key[32];
} bench_sign_t;
static void bench_sign_setup(void* arg) {
int i;
bench_sign_t *data = (bench_sign_t*)arg;
for (i = 0; i < 32; i++) data->msg[i] = i + 1;
for (i = 0; i < 32; i++) data->key[i] = i + 65;
}
static void bench_sign(void* arg) {
int i;
bench_sign_t *data = (bench_sign_t*)arg;
unsigned char sig[64];
for (i = 0; i < 20000; i++) {
int j;
int recid = 0;
secp256k1_ecdsa_signature_t signature;
CHECK(secp256k1_ecdsa_sign(data->ctx, data->msg, &signature, data->key, NULL, NULL));
CHECK(secp256k1_ecdsa_signature_serialize_compact(data->ctx, sig, &recid, &signature));
for (j = 0; j < 32; j++) {
data->msg[j] = sig[j]; /* Move former R to message. */
data->key[j] = sig[j + 32]; /* Move former S to key. */
}
}
}
int main(void) {
bench_sign_t data;
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
run_benchmark("ecdsa_sign", bench_sign, bench_sign_setup, NULL, &data, 10, 20000);
secp256k1_context_destroy(data.ctx);
return 0;
}

View file

@ -0,0 +1,63 @@
/**********************************************************************
* Copyright (c) 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#include <stdio.h>
#include <string.h>
#include "include/secp256k1.h"
#include "util.h"
#include "bench.h"
typedef struct {
secp256k1_context_t *ctx;
unsigned char msg[32];
unsigned char key[32];
unsigned char sig[72];
int siglen;
unsigned char pubkey[33];
int pubkeylen;
} benchmark_verify_t;
static void benchmark_verify(void* arg) {
int i;
benchmark_verify_t* data = (benchmark_verify_t*)arg;
for (i = 0; i < 20000; i++) {
secp256k1_pubkey_t pubkey;
secp256k1_ecdsa_signature_t sig;
data->sig[data->siglen - 1] ^= (i & 0xFF);
data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF);
data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF);
CHECK(secp256k1_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1);
CHECK(secp256k1_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1);
CHECK(secp256k1_ecdsa_verify(data->ctx, data->msg, &sig, &pubkey) == (i == 0));
data->sig[data->siglen - 1] ^= (i & 0xFF);
data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF);
data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF);
}
}
int main(void) {
int i;
secp256k1_pubkey_t pubkey;
secp256k1_ecdsa_signature_t sig;
benchmark_verify_t data;
data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
for (i = 0; i < 32; i++) data.msg[i] = 1 + i;
for (i = 0; i < 32; i++) data.key[i] = 33 + i;
data.siglen = 72;
CHECK(secp256k1_ecdsa_sign(data.ctx, data.msg, &sig, data.key, NULL, NULL));
CHECK(secp256k1_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig));
CHECK(secp256k1_ec_pubkey_create(data.ctx, &pubkey, data.key));
CHECK(secp256k1_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, 1) == 1);
run_benchmark("ecdsa_verify", benchmark_verify, NULL, NULL, &data, 10, 20000);
secp256k1_context_destroy(data.ctx);
return 0;
}

20
bf/secp256k1/src/ecdsa.h Normal file
View file

@ -0,0 +1,20 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef _SECP256K1_ECDSA_
#define _SECP256K1_ECDSA_
#include "scalar.h"
#include "group.h"
#include "ecmult.h"
static int secp256k1_ecdsa_sig_parse(secp256k1_scalar_t *r, secp256k1_scalar_t *s, const unsigned char *sig, int size);
static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, int *size, const secp256k1_scalar_t *r, const secp256k1_scalar_t *s);
static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context_t *ctx, const secp256k1_scalar_t* r, const secp256k1_scalar_t* s, const secp256k1_ge_t *pubkey, const secp256k1_scalar_t *message);
static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context_t *ctx, secp256k1_scalar_t* r, secp256k1_scalar_t* s, const secp256k1_scalar_t *seckey, const secp256k1_scalar_t *message, const secp256k1_scalar_t *nonce, int *recid);
static int secp256k1_ecdsa_sig_recover(const secp256k1_ecmult_context_t *ctx, const secp256k1_scalar_t* r, const secp256k1_scalar_t* s, secp256k1_ge_t *pubkey, const secp256k1_scalar_t *message, int recid);
#endif

View file

@ -0,0 +1,264 @@
/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef _SECP256K1_ECDSA_IMPL_H_
#define _SECP256K1_ECDSA_IMPL_H_
#include "scalar.h"
#include "field.h"
#include "group.h"
#include "ecmult.h"
#include "ecmult_gen.h"
#include "ecdsa.h"
/** Group order for secp256k1 defined as 'n' in "Standards for Efficient Cryptography" (SEC2) 2.7.1
* sage: for t in xrange(1023, -1, -1):
* .. p = 2**256 - 2**32 - t
* .. if p.is_prime():
* .. print '%x'%p
* .. break
* 'fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f'
* sage: a = 0
* sage: b = 7
* sage: F = FiniteField (p)
* sage: '%x' % (EllipticCurve ([F (a), F (b)]).order())
* 'fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141'
*/
static const secp256k1_fe_t secp256k1_ecdsa_const_order_as_fe = SECP256K1_FE_CONST(
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
0xBAAEDCE6UL, 0xAF48A03BUL, 0xBFD25E8CUL, 0xD0364141UL
);
/** Difference between field and order, values 'p' and 'n' values defined in
* "Standards for Efficient Cryptography" (SEC2) 2.7.1.
* sage: p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
* sage: a = 0
* sage: b = 7
* sage: F = FiniteField (p)
* sage: '%x' % (p - EllipticCurve ([F (a), F (b)]).order())
* '14551231950b75fc4402da1722fc9baee'
*/
static const secp256k1_fe_t secp256k1_ecdsa_const_p_minus_order = SECP256K1_FE_CONST(
0, 0, 0, 1, 0x45512319UL, 0x50B75FC4UL, 0x402DA172UL, 0x2FC9BAEEUL
);
static int secp256k1_ecdsa_sig_parse(secp256k1_scalar_t *rr, secp256k1_scalar_t *rs, const unsigned char *sig, int size) {
unsigned char ra[32] = {0}, sa[32] = {0};
const unsigned char *rp;
const unsigned char *sp;
int lenr;
int lens;
int overflow;
if (sig[0] != 0x30) {
return 0;
}
lenr = sig[3];
if (5+lenr >= size) {
return 0;
}
lens = sig[lenr+5];
if (sig[1] != lenr+lens+4) {
return 0;
}
if (lenr+lens+6 > size) {
return 0;
}
if (sig[2] != 0x02) {
return 0;
}
if (lenr == 0) {
return 0;
}
if (sig[lenr+4] != 0x02) {
return 0;
}
if (lens == 0) {
return 0;
}
sp = sig + 6 + lenr;
while (lens > 0 && sp[0] == 0) {
lens--;
sp++;
}
if (lens > 32) {
return 0;
}
rp = sig + 4;
while (lenr > 0 && rp[0] == 0) {
lenr--;
rp++;
}
if (lenr > 32) {
return 0;
}
memcpy(ra + 32 - lenr, rp, lenr);
memcpy(sa + 32 - lens, sp, lens);
overflow = 0;
secp256k1_scalar_set_b32(rr, ra, &overflow);
if (overflow) {
return 0;
}
secp256k1_scalar_set_b32(rs, sa, &overflow);
if (overflow) {
return 0;
}
return 1;
}
static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, int *size, const secp256k1_scalar_t* ar, const secp256k1_scalar_t* as) {
unsigned char r[33] = {0}, s[33] = {0};
unsigned char *rp = r, *sp = s;
int lenR = 33, lenS = 33;
secp256k1_scalar_get_b32(&r[1], ar);
secp256k1_scalar_get_b32(&s[1], as);
while (lenR > 1 && rp[0] == 0 && rp[1] < 0x80) { lenR--; rp++; }
while (lenS > 1 && sp[0] == 0 && sp[1] < 0x80) { lenS--; sp++; }
if (*size < 6+lenS+lenR) {
*size = 6 + lenS + lenR;
return 0;
}
*size = 6 + lenS + lenR;
sig[0] = 0x30;
sig[1] = 4 + lenS + lenR;
sig[2] = 0x02;
sig[3] = lenR;
memcpy(sig+4, rp, lenR);
sig[4+lenR] = 0x02;
sig[5+lenR] = lenS;
memcpy(sig+lenR+6, sp, lenS);
return 1;
}
static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context_t *ctx, const secp256k1_scalar_t *sigr, const secp256k1_scalar_t *sigs, const secp256k1_ge_t *pubkey, const secp256k1_scalar_t *message) {
unsigned char c[32];
secp256k1_scalar_t sn, u1, u2;
secp256k1_fe_t xr;
secp256k1_gej_t pubkeyj;
secp256k1_gej_t pr;
if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) {
return 0;
}
secp256k1_scalar_inverse_var(&sn, sigs);
secp256k1_scalar_mul(&u1, &sn, message);
secp256k1_scalar_mul(&u2, &sn, sigr);
secp256k1_gej_set_ge(&pubkeyj, pubkey);
secp256k1_ecmult(ctx, &pr, &pubkeyj, &u2, &u1);
if (secp256k1_gej_is_infinity(&pr)) {
return 0;
}
secp256k1_scalar_get_b32(c, sigr);
secp256k1_fe_set_b32(&xr, c);
/** We now have the recomputed R point in pr, and its claimed x coordinate (modulo n)
* in xr. Naively, we would extract the x coordinate from pr (requiring a inversion modulo p),
* compute the remainder modulo n, and compare it to xr. However:
*
* xr == X(pr) mod n
* <=> exists h. (xr + h * n < p && xr + h * n == X(pr))
* [Since 2 * n > p, h can only be 0 or 1]
* <=> (xr == X(pr)) || (xr + n < p && xr + n == X(pr))
* [In Jacobian coordinates, X(pr) is pr.x / pr.z^2 mod p]
* <=> (xr == pr.x / pr.z^2 mod p) || (xr + n < p && xr + n == pr.x / pr.z^2 mod p)
* [Multiplying both sides of the equations by pr.z^2 mod p]
* <=> (xr * pr.z^2 mod p == pr.x) || (xr + n < p && (xr + n) * pr.z^2 mod p == pr.x)
*
* Thus, we can avoid the inversion, but we have to check both cases separately.
* secp256k1_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test.
*/
if (secp256k1_gej_eq_x_var(&xr, &pr)) {
/* xr.x == xr * xr.z^2 mod p, so the signature is valid. */
return 1;
}
if (secp256k1_fe_cmp_var(&xr, &secp256k1_ecdsa_const_p_minus_order) >= 0) {
/* xr + p >= n, so we can skip testing the second case. */
return 0;
}
secp256k1_fe_add(&xr, &secp256k1_ecdsa_const_order_as_fe);
if (secp256k1_gej_eq_x_var(&xr, &pr)) {
/* (xr + n) * pr.z^2 mod p == pr.x, so the signature is valid. */
return 1;
}
return 0;
}
static int secp256k1_ecdsa_sig_recover(const secp256k1_ecmult_context_t *ctx, const secp256k1_scalar_t *sigr, const secp256k1_scalar_t* sigs, secp256k1_ge_t *pubkey, const secp256k1_scalar_t *message, int recid) {
unsigned char brx[32];
secp256k1_fe_t fx;
secp256k1_ge_t x;
secp256k1_gej_t xj;
secp256k1_scalar_t rn, u1, u2;
secp256k1_gej_t qj;
if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) {
return 0;
}
secp256k1_scalar_get_b32(brx, sigr);
VERIFY_CHECK(secp256k1_fe_set_b32(&fx, brx)); /* brx comes from a scalar, so is less than the order; certainly less than p */
if (recid & 2) {
if (secp256k1_fe_cmp_var(&fx, &secp256k1_ecdsa_const_p_minus_order) >= 0) {
return 0;
}
secp256k1_fe_add(&fx, &secp256k1_ecdsa_const_order_as_fe);
}
if (!secp256k1_ge_set_xo_var(&x, &fx, recid & 1)) {
return 0;
}
secp256k1_gej_set_ge(&xj, &x);
secp256k1_scalar_inverse_var(&rn, sigr);
secp256k1_scalar_mul(&u1, &rn, message);
secp256k1_scalar_negate(&u1, &u1);
secp256k1_scalar_mul(&u2, &rn, sigs);
secp256k1_ecmult(ctx, &qj, &xj, &u2, &u1);
secp256k1_ge_set_gej_var(pubkey, &qj);
return !secp256k1_gej_is_infinity(&qj);
}
static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context_t *ctx, secp256k1_scalar_t *sigr, secp256k1_scalar_t *sigs, const secp256k1_scalar_t *seckey, const secp256k1_scalar_t *message, const secp256k1_scalar_t *nonce, int *recid) {
unsigned char b[32];
secp256k1_gej_t rp;
secp256k1_ge_t r;
secp256k1_scalar_t n;
int overflow = 0;
secp256k1_ecmult_gen(ctx, &rp, nonce);
secp256k1_ge_set_gej(&r, &rp);
secp256k1_fe_normalize(&r.x);
secp256k1_fe_normalize(&r.y);
secp256k1_fe_get_b32(b, &r.x);
secp256k1_scalar_set_b32(sigr, b, &overflow);
if (secp256k1_scalar_is_zero(sigr)) {
/* P.x = order is on the curve, so technically sig->r could end up zero, which would be an invalid signature. */
secp256k1_gej_clear(&rp);
secp256k1_ge_clear(&r);
return 0;
}
if (recid) {
*recid = (overflow ? 2 : 0) | (secp256k1_fe_is_odd(&r.y) ? 1 : 0);
}
secp256k1_scalar_mul(&n, sigr, seckey);
secp256k1_scalar_add(&n, &n, message);
secp256k1_scalar_inverse(sigs, nonce);
secp256k1_scalar_mul(sigs, sigs, &n);
secp256k1_scalar_clear(&n);
secp256k1_gej_clear(&rp);
secp256k1_ge_clear(&r);
if (secp256k1_scalar_is_zero(sigs)) {
return 0;
}
if (secp256k1_scalar_is_high(sigs)) {
secp256k1_scalar_negate(sigs, sigs);
if (recid) {
*recid ^= 1;
}
}
return 1;
}
#endif

Some files were not shown because too many files have changed in this diff Show more