aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArt Forz <artforz@dumbo.lan>2011-09-20 12:20:26 +0200
committerArt Forz <artforz@dumbo.lan>2011-09-20 12:20:26 +0200
commitac92e27c8e016bddd5e4c49e43ecd6244defeaf5 (patch)
tree3bf2ea7772d42cdc031b05be464aad11481230a2
parent602e31b96c9b94ed8e4acaa010622cb1aad2bd5e (diff)
add scrypt function
-rw-r--r--Makefile.am2
-rw-r--r--cpu-miner.c14
-rw-r--r--miner.h3
-rw-r--r--scrypt.c711
4 files changed, 729 insertions, 1 deletions
diff --git a/Makefile.am b/Makefile.am
index 7bc13e5..b435083 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -14,7 +14,7 @@ INCLUDES = $(PTHREAD_FLAGS) -fno-strict-aliasing $(JANSSON_INCLUDES)
bin_PROGRAMS = minerd
minerd_SOURCES = elist.h miner.h compat.h \
- cpu-miner.c util.c \
+ cpu-miner.c util.c scrypt.c \
sha256_generic.c sha256_4way.c sha256_via.c \
sha256_cryptopp.c sha256_sse2_amd64.c
minerd_LDFLAGS = $(PTHREAD_FLAGS)
diff --git a/cpu-miner.c b/cpu-miner.c
index 29af042..7eb7af1 100644
--- a/cpu-miner.c
+++ b/cpu-miner.c
@@ -87,6 +87,7 @@ enum sha256_algos {
ALGO_CRYPTOPP, /* Crypto++ (C) */
ALGO_CRYPTOPP_ASM32, /* Crypto++ 32-bit assembly */
ALGO_SSE2_64, /* SSE2 for x86_64 */
+ ALGO_SCRYPT, /* scrypt(1024,1,1) */
};
static const char *algo_names[] = {
@@ -104,6 +105,7 @@ static const char *algo_names[] = {
#ifdef WANT_X8664_SSE2
[ALGO_SSE2_64] = "sse2_64",
#endif
+ [ALGO_SCRYPT] = "scrypt",
};
bool opt_debug = false;
@@ -552,6 +554,7 @@ static void *miner_thread(void *userdata)
struct thr_info *mythr = userdata;
int thr_id = mythr->id;
uint32_t max_nonce = 0xffffff;
+ unsigned char *scratchbuf = NULL;
/* Set worker threads to nice 19 and then preferentially to SCHED_IDLE
* and if that fails, then SCHED_BATCH. No need for this to be an
@@ -563,6 +566,12 @@ static void *miner_thread(void *userdata)
* of the number of CPUs */
if (!(opt_n_threads % num_processors))
affine_to_cpu(mythr->id, mythr->id % num_processors);
+
+ if (opt_algo == ALGO_SCRYPT)
+ {
+ scratchbuf = malloc(131583);
+ max_nonce = 0xffff;
+ }
while (1) {
struct work work __attribute__((aligned(128)));
@@ -633,6 +642,11 @@ static void *miner_thread(void *userdata)
break;
#endif
+ case ALGO_SCRYPT:
+ rc = scanhash_scrypt(thr_id, work.data, scratchbuf,
+ work.target, max_nonce, &hashes_done);
+ break;
+
default:
/* should never happen */
goto out;
diff --git a/miner.h b/miner.h
index e72404f..a5bdfff 100644
--- a/miner.h
+++ b/miner.h
@@ -173,6 +173,9 @@ extern int scanhash_sse2_64(int, const unsigned char *pmidstate, unsigned char *
unsigned char *phash1, unsigned char *phash,
const unsigned char *ptarget,
uint32_t max_nonce, unsigned long *nHashesDone);
+extern int scanhash_scrypt(int, unsigned char *pdata, unsigned char *scratchbuf,
+ const unsigned char *ptarget,
+ uint32_t max_nonce, unsigned long *nHashesDone);
extern int
timeval_subtract (struct timeval *result, struct timeval *x, struct timeval *y);
diff --git a/scrypt.c b/scrypt.c
new file mode 100644
index 0000000..e1a1262
--- /dev/null
+++ b/scrypt.c
@@ -0,0 +1,711 @@
+/*-
+ * Copyright 2009 Colin Percival, 2011 ArtForz
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * This file was originally written by Colin Percival as part of the Tarsnap
+ * online backup system.
+ */
+
+#include "cpuminer-config.h"
+#include "miner.h"
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+
+
+static inline uint32_t
+be32dec(const void *pp)
+{
+ const uint8_t *p = (uint8_t const *)pp;
+
+ return ((uint32_t)(p[3]) + ((uint32_t)(p[2]) << 8) +
+ ((uint32_t)(p[1]) << 16) + ((uint32_t)(p[0]) << 24));
+}
+
+static inline void
+be32enc(void *pp, uint32_t x)
+{
+ uint8_t * p = (uint8_t *)pp;
+
+ p[3] = x & 0xff;
+ p[2] = (x >> 8) & 0xff;
+ p[1] = (x >> 16) & 0xff;
+ p[0] = (x >> 24) & 0xff;
+}
+
+static inline uint32_t
+le32dec(const void *pp)
+{
+ const uint8_t *p = (uint8_t const *)pp;
+
+ return ((uint32_t)(p[0]) + ((uint32_t)(p[1]) << 8) +
+ ((uint32_t)(p[2]) << 16) + ((uint32_t)(p[3]) << 24));
+}
+
+static inline void
+le32enc(void *pp, uint32_t x)
+{
+ uint8_t * p = (uint8_t *)pp;
+
+ p[0] = x & 0xff;
+ p[1] = (x >> 8) & 0xff;
+ p[2] = (x >> 16) & 0xff;
+ p[3] = (x >> 24) & 0xff;
+}
+
+
+typedef struct SHA256Context {
+ uint32_t state[8];
+ uint32_t count[2];
+ unsigned char buf[64];
+} SHA256_CTX;
+
+typedef struct HMAC_SHA256Context {
+ SHA256_CTX ictx;
+ SHA256_CTX octx;
+} HMAC_SHA256_CTX;
+
+/*
+ * Encode a length len/4 vector of (uint32_t) into a length len vector of
+ * (unsigned char) in big-endian form. Assumes len is a multiple of 4.
+ */
+static void
+be32enc_vect(unsigned char *dst, const uint32_t *src, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len / 4; i++)
+ be32enc(dst + i * 4, src[i]);
+}
+
+/*
+ * Decode a big-endian length len vector of (unsigned char) into a length
+ * len/4 vector of (uint32_t). Assumes len is a multiple of 4.
+ */
+static void
+be32dec_vect(uint32_t *dst, const unsigned char *src, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len / 4; i++)
+ dst[i] = be32dec(src + i * 4);
+}
+
+/* Elementary functions used by SHA256 */
+#define Ch(x, y, z) ((x & (y ^ z)) ^ z)
+#define Maj(x, y, z) ((x & (y | z)) | (y & z))
+#define SHR(x, n) (x >> n)
+#define ROTR(x, n) ((x >> n) | (x << (32 - n)))
+#define S0(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
+#define S1(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
+#define s0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHR(x, 3))
+#define s1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHR(x, 10))
+
+/* SHA256 round function */
+#define RND(a, b, c, d, e, f, g, h, k) \
+ t0 = h + S1(e) + Ch(e, f, g) + k; \
+ t1 = S0(a) + Maj(a, b, c); \
+ d += t0; \
+ h = t0 + t1;
+
+/* Adjusted round function for rotating state */
+#define RNDr(S, W, i, k) \
+ RND(S[(64 - i) % 8], S[(65 - i) % 8], \
+ S[(66 - i) % 8], S[(67 - i) % 8], \
+ S[(68 - i) % 8], S[(69 - i) % 8], \
+ S[(70 - i) % 8], S[(71 - i) % 8], \
+ W[i] + k)
+
+/*
+ * SHA256 block compression function. The 256-bit state is transformed via
+ * the 512-bit input block to produce a new state.
+ */
+static void
+SHA256_Transform(uint32_t * state, const unsigned char block[64])
+{
+ uint32_t W[64];
+ uint32_t S[8];
+ uint32_t t0, t1;
+ int i;
+
+ /* 1. Prepare message schedule W. */
+ be32dec_vect(W, block, 64);
+ for (i = 16; i < 64; i++)
+ W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
+
+ /* 2. Initialize working variables. */
+ memcpy(S, state, 32);
+
+ /* 3. Mix. */
+ RNDr(S, W, 0, 0x428a2f98);
+ RNDr(S, W, 1, 0x71374491);
+ RNDr(S, W, 2, 0xb5c0fbcf);
+ RNDr(S, W, 3, 0xe9b5dba5);
+ RNDr(S, W, 4, 0x3956c25b);
+ RNDr(S, W, 5, 0x59f111f1);
+ RNDr(S, W, 6, 0x923f82a4);
+ RNDr(S, W, 7, 0xab1c5ed5);
+ RNDr(S, W, 8, 0xd807aa98);
+ RNDr(S, W, 9, 0x12835b01);
+ RNDr(S, W, 10, 0x243185be);
+ RNDr(S, W, 11, 0x550c7dc3);
+ RNDr(S, W, 12, 0x72be5d74);
+ RNDr(S, W, 13, 0x80deb1fe);
+ RNDr(S, W, 14, 0x9bdc06a7);
+ RNDr(S, W, 15, 0xc19bf174);
+ RNDr(S, W, 16, 0xe49b69c1);
+ RNDr(S, W, 17, 0xefbe4786);
+ RNDr(S, W, 18, 0x0fc19dc6);
+ RNDr(S, W, 19, 0x240ca1cc);
+ RNDr(S, W, 20, 0x2de92c6f);
+ RNDr(S, W, 21, 0x4a7484aa);
+ RNDr(S, W, 22, 0x5cb0a9dc);
+ RNDr(S, W, 23, 0x76f988da);
+ RNDr(S, W, 24, 0x983e5152);
+ RNDr(S, W, 25, 0xa831c66d);
+ RNDr(S, W, 26, 0xb00327c8);
+ RNDr(S, W, 27, 0xbf597fc7);
+ RNDr(S, W, 28, 0xc6e00bf3);
+ RNDr(S, W, 29, 0xd5a79147);
+ RNDr(S, W, 30, 0x06ca6351);
+ RNDr(S, W, 31, 0x14292967);
+ RNDr(S, W, 32, 0x27b70a85);
+ RNDr(S, W, 33, 0x2e1b2138);
+ RNDr(S, W, 34, 0x4d2c6dfc);
+ RNDr(S, W, 35, 0x53380d13);
+ RNDr(S, W, 36, 0x650a7354);
+ RNDr(S, W, 37, 0x766a0abb);
+ RNDr(S, W, 38, 0x81c2c92e);
+ RNDr(S, W, 39, 0x92722c85);
+ RNDr(S, W, 40, 0xa2bfe8a1);
+ RNDr(S, W, 41, 0xa81a664b);
+ RNDr(S, W, 42, 0xc24b8b70);
+ RNDr(S, W, 43, 0xc76c51a3);
+ RNDr(S, W, 44, 0xd192e819);
+ RNDr(S, W, 45, 0xd6990624);
+ RNDr(S, W, 46, 0xf40e3585);
+ RNDr(S, W, 47, 0x106aa070);
+ RNDr(S, W, 48, 0x19a4c116);
+ RNDr(S, W, 49, 0x1e376c08);
+ RNDr(S, W, 50, 0x2748774c);
+ RNDr(S, W, 51, 0x34b0bcb5);
+ RNDr(S, W, 52, 0x391c0cb3);
+ RNDr(S, W, 53, 0x4ed8aa4a);
+ RNDr(S, W, 54, 0x5b9cca4f);
+ RNDr(S, W, 55, 0x682e6ff3);
+ RNDr(S, W, 56, 0x748f82ee);
+ RNDr(S, W, 57, 0x78a5636f);
+ RNDr(S, W, 58, 0x84c87814);
+ RNDr(S, W, 59, 0x8cc70208);
+ RNDr(S, W, 60, 0x90befffa);
+ RNDr(S, W, 61, 0xa4506ceb);
+ RNDr(S, W, 62, 0xbef9a3f7);
+ RNDr(S, W, 63, 0xc67178f2);
+
+ /* 4. Mix local working variables into global state */
+ for (i = 0; i < 8; i++)
+ state[i] += S[i];
+
+ /* Clean the stack. */
+ memset(W, 0, 256);
+ memset(S, 0, 32);
+ t0 = t1 = 0;
+}
+
+static unsigned char PAD[64] = {
+ 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+/* SHA-256 initialization. Begins a SHA-256 operation. */
+static void
+SHA256_Init(SHA256_CTX * ctx)
+{
+
+ /* Zero bits processed so far */
+ ctx->count[0] = ctx->count[1] = 0;
+
+ /* Magic initialization constants */
+ ctx->state[0] = 0x6A09E667;
+ ctx->state[1] = 0xBB67AE85;
+ ctx->state[2] = 0x3C6EF372;
+ ctx->state[3] = 0xA54FF53A;
+ ctx->state[4] = 0x510E527F;
+ ctx->state[5] = 0x9B05688C;
+ ctx->state[6] = 0x1F83D9AB;
+ ctx->state[7] = 0x5BE0CD19;
+}
+
+/* Add bytes into the hash */
+static void
+SHA256_Update(SHA256_CTX * ctx, const void *in, size_t len)
+{
+ uint32_t bitlen[2];
+ uint32_t r;
+ const unsigned char *src = in;
+
+ /* Number of bytes left in the buffer from previous updates */
+ r = (ctx->count[1] >> 3) & 0x3f;
+
+ /* Convert the length into a number of bits */
+ bitlen[1] = ((uint32_t)len) << 3;
+ bitlen[0] = (uint32_t)(len >> 29);
+
+ /* Update number of bits */
+ if ((ctx->count[1] += bitlen[1]) < bitlen[1])
+ ctx->count[0]++;
+ ctx->count[0] += bitlen[0];
+
+ /* Handle the case where we don't need to perform any transforms */
+ if (len < 64 - r) {
+ memcpy(&ctx->buf[r], src, len);
+ return;
+ }
+
+ /* Finish the current block */
+ memcpy(&ctx->buf[r], src, 64 - r);
+ SHA256_Transform(ctx->state, ctx->buf);
+ src += 64 - r;
+ len -= 64 - r;
+
+ /* Perform complete blocks */
+ while (len >= 64) {
+ SHA256_Transform(ctx->state, src);
+ src += 64;
+ len -= 64;
+ }
+
+ /* Copy left over data into buffer */
+ memcpy(ctx->buf, src, len);
+}
+
+/* Add padding and terminating bit-count. */
+static void
+SHA256_Pad(SHA256_CTX * ctx)
+{
+ unsigned char len[8];
+ uint32_t r, plen;
+
+ /*
+ * Convert length to a vector of bytes -- we do this now rather
+ * than later because the length will change after we pad.
+ */
+ be32enc_vect(len, ctx->count, 8);
+
+ /* Add 1--64 bytes so that the resulting length is 56 mod 64 */
+ r = (ctx->count[1] >> 3) & 0x3f;
+ plen = (r < 56) ? (56 - r) : (120 - r);
+ SHA256_Update(ctx, PAD, (size_t)plen);
+
+ /* Add the terminating bit-count */
+ SHA256_Update(ctx, len, 8);
+}
+
+/*
+ * SHA-256 finalization. Pads the input data, exports the hash value,
+ * and clears the context state.
+ */
+static void
+SHA256_Final(unsigned char digest[32], SHA256_CTX * ctx)
+{
+
+ /* Add padding */
+ SHA256_Pad(ctx);
+
+ /* Write the hash */
+ be32enc_vect(digest, ctx->state, 32);
+
+ /* Clear the context state */
+ memset((void *)ctx, 0, sizeof(*ctx));
+}
+
+/* Initialize an HMAC-SHA256 operation with the given key. */
+static void
+HMAC_SHA256_Init(HMAC_SHA256_CTX * ctx, const void * _K, size_t Klen)
+{
+ unsigned char pad[64];
+ unsigned char khash[32];
+ const unsigned char * K = _K;
+ size_t i;
+
+ /* If Klen > 64, the key is really SHA256(K). */
+ if (Klen > 64) {
+ SHA256_Init(&ctx->ictx);
+ SHA256_Update(&ctx->ictx, K, Klen);
+ SHA256_Final(khash, &ctx->ictx);
+ K = khash;
+ Klen = 32;
+ }
+
+ /* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
+ SHA256_Init(&ctx->ictx);
+ memset(pad, 0x36, 64);
+ for (i = 0; i < Klen; i++)
+ pad[i] ^= K[i];
+ SHA256_Update(&ctx->ictx, pad, 64);
+
+ /* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
+ SHA256_Init(&ctx->octx);
+ memset(pad, 0x5c, 64);
+ for (i = 0; i < Klen; i++)
+ pad[i] ^= K[i];
+ SHA256_Update(&ctx->octx, pad, 64);
+
+ /* Clean the stack. */
+ memset(khash, 0, 32);
+}
+
+/* Add bytes to the HMAC-SHA256 operation. */
+static void
+HMAC_SHA256_Update(HMAC_SHA256_CTX * ctx, const void *in, size_t len)
+{
+
+ /* Feed data to the inner SHA256 operation. */
+ SHA256_Update(&ctx->ictx, in, len);
+}
+
+/* Finish an HMAC-SHA256 operation. */
+static void
+HMAC_SHA256_Final(unsigned char digest[32], HMAC_SHA256_CTX * ctx)
+{
+ unsigned char ihash[32];
+
+ /* Finish the inner SHA256 operation. */
+ SHA256_Final(ihash, &ctx->ictx);
+
+ /* Feed the inner hash to the outer SHA256 operation. */
+ SHA256_Update(&ctx->octx, ihash, 32);
+
+ /* Finish the outer SHA256 operation. */
+ SHA256_Final(digest, &ctx->octx);
+
+ /* Clean the stack. */
+ memset(ihash, 0, 32);
+}
+
+/**
+ * PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
+ * Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
+ * write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
+ */
+static void
+PBKDF2_SHA256(const uint8_t * passwd, size_t passwdlen, const uint8_t * salt,
+ size_t saltlen, uint64_t c, uint8_t * buf, size_t dkLen)
+{
+ HMAC_SHA256_CTX PShctx, hctx;
+ size_t i;
+ uint8_t ivec[4];
+ uint8_t U[32];
+ uint8_t T[32];
+ uint64_t j;
+ int k;
+ size_t clen;
+
+ /* Compute HMAC state after processing P and S. */
+ HMAC_SHA256_Init(&PShctx, passwd, passwdlen);
+ HMAC_SHA256_Update(&PShctx, salt, saltlen);
+
+ /* Iterate through the blocks. */
+ for (i = 0; i * 32 < dkLen; i++) {
+ /* Generate INT(i + 1). */
+ be32enc(ivec, (uint32_t)(i + 1));
+
+ /* Compute U_1 = PRF(P, S || INT(i)). */
+ memcpy(&hctx, &PShctx, sizeof(HMAC_SHA256_CTX));
+ HMAC_SHA256_Update(&hctx, ivec, 4);
+ HMAC_SHA256_Final(U, &hctx);
+
+ /* T_i = U_1 ... */
+ memcpy(T, U, 32);
+
+ for (j = 2; j <= c; j++) {
+ /* Compute U_j. */
+ HMAC_SHA256_Init(&hctx, passwd, passwdlen);
+ HMAC_SHA256_Update(&hctx, U, 32);
+ HMAC_SHA256_Final(U, &hctx);
+
+ /* ... xor U_j ... */
+ for (k = 0; k < 32; k++)
+ T[k] ^= U[k];
+ }
+
+ /* Copy as many bytes as necessary into buf. */
+ clen = dkLen - i * 32;
+ if (clen > 32)
+ clen = 32;
+ memcpy(&buf[i * 32], T, clen);
+ }
+
+ /* Clean PShctx, since we never called _Final on it. */
+ memset(&PShctx, 0, sizeof(HMAC_SHA256_CTX));
+}
+
+
+static void blkcpy(void *, void *, size_t);
+static void blkxor(void *, void *, size_t);
+static void salsa20_8(uint32_t[16]);
+static void blockmix_salsa8(uint32_t *, uint32_t *, uint32_t *, size_t);
+static uint64_t integerify(void *, size_t);
+static void smix(uint8_t *, size_t, uint64_t, uint32_t *, uint32_t *);
+
+static void
+blkcpy(void * dest, void * src, size_t len)
+{
+ size_t * D = dest;
+ size_t * S = src;
+ size_t L = len / sizeof(size_t);
+ size_t i;
+
+ for (i = 0; i < L; i++)
+ D[i] = S[i];
+}
+
+static void
+blkxor(void * dest, void * src, size_t len)
+{
+ size_t * D = dest;
+ size_t * S = src;
+ size_t L = len / sizeof(size_t);
+ size_t i;
+
+ for (i = 0; i < L; i++)
+ D[i] ^= S[i];
+}
+
+/**
+ * salsa20_8(B):
+ * Apply the salsa20/8 core to the provided block.
+ */
+static void
+salsa20_8(uint32_t B[16])
+{
+ uint32_t x[16];
+ size_t i;
+
+ blkcpy(x, B, 64);
+ for (i = 0; i < 8; i += 2) {
+#define R(a,b) (((a) << (b)) | ((a) >> (32 - (b))))
+ /* Operate on columns. */
+ x[ 4] ^= R(x[ 0]+x[12], 7); x[ 8] ^= R(x[ 4]+x[ 0], 9);
+ x[12] ^= R(x[ 8]+x[ 4],13); x[ 0] ^= R(x[12]+x[ 8],18);
+
+ x[ 9] ^= R(x[ 5]+x[ 1], 7); x[13] ^= R(x[ 9]+x[ 5], 9);
+ x[ 1] ^= R(x[13]+x[ 9],13); x[ 5] ^= R(x[ 1]+x[13],18);
+
+ x[14] ^= R(x[10]+x[ 6], 7); x[ 2] ^= R(x[14]+x[10], 9);
+ x[ 6] ^= R(x[ 2]+x[14],13); x[10] ^= R(x[ 6]+x[ 2],18);
+
+ x[ 3] ^= R(x[15]+x[11], 7); x[ 7] ^= R(x[ 3]+x[15], 9);
+ x[11] ^= R(x[ 7]+x[ 3],13); x[15] ^= R(x[11]+x[ 7],18);
+
+ /* Operate on rows. */
+ x[ 1] ^= R(x[ 0]+x[ 3], 7); x[ 2] ^= R(x[ 1]+x[ 0], 9);
+ x[ 3] ^= R(x[ 2]+x[ 1],13); x[ 0] ^= R(x[ 3]+x[ 2],18);
+
+ x[ 6] ^= R(x[ 5]+x[ 4], 7); x[ 7] ^= R(x[ 6]+x[ 5], 9);
+ x[ 4] ^= R(x[ 7]+x[ 6],13); x[ 5] ^= R(x[ 4]+x[ 7],18);
+
+ x[11] ^= R(x[10]+x[ 9], 7); x[ 8] ^= R(x[11]+x[10], 9);
+ x[ 9] ^= R(x[ 8]+x[11],13); x[10] ^= R(x[ 9]+x[ 8],18);
+
+ x[12] ^= R(x[15]+x[14], 7); x[13] ^= R(x[12]+x[15], 9);
+ x[14] ^= R(x[13]+x[12],13); x[15] ^= R(x[14]+x[13],18);
+#undef R
+ }
+ for (i = 0; i < 16; i++)
+ B[i] += x[i];
+}
+
+/**
+ * blockmix_salsa8(Bin, Bout, X, r):
+ * Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
+ * bytes in length; the output Bout must also be the same size. The
+ * temporary space X must be 64 bytes.
+ */
+static void
+blockmix_salsa8(uint32_t * Bin, uint32_t * Bout, uint32_t * X, size_t r)
+{
+ size_t i;
+
+ /* 1: X <-- B_{2r - 1} */
+ blkcpy(X, &Bin[(2 * r - 1) * 16], 64);
+
+ /* 2: for i = 0 to 2r - 1 do */
+ for (i = 0; i < 2 * r; i += 2) {
+ /* 3: X <-- H(X \xor B_i) */
+ blkxor(X, &Bin[i * 16], 64);
+ salsa20_8(X);
+
+ /* 4: Y_i <-- X */
+ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
+ blkcpy(&Bout[i * 8], X, 64);
+
+ /* 3: X <-- H(X \xor B_i) */
+ blkxor(X, &Bin[i * 16 + 16], 64);
+ salsa20_8(X);
+
+ /* 4: Y_i <-- X */
+ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
+ blkcpy(&Bout[i * 8 + r * 16], X, 64);
+ }
+}
+
+/**
+ * integerify(B, r):
+ * Return the result of parsing B_{2r-1} as a little-endian integer.
+ */
+static uint64_t
+integerify(void * B, size_t r)
+{
+ uint32_t * X = (void *)((uintptr_t)(B) + (2 * r - 1) * 64);
+
+ return (((uint64_t)(X[1]) << 32) + X[0]);
+}
+
+/**
+ * smix(B, r, N, V, XY):
+ * Compute B = SMix_r(B, N). The input B must be 128r bytes in length;
+ * the temporary storage V must be 128rN bytes in length; the temporary
+ * storage XY must be 256r + 64 bytes in length. The value N must be a
+ * power of 2 greater than 1. The arrays B, V, and XY must be aligned to a
+ * multiple of 64 bytes.
+ */
+static void
+smix(uint8_t * B, size_t r, uint64_t N, uint32_t * V, uint32_t * XY)
+{
+ uint32_t * X = XY;
+ uint32_t * Y = &XY[32 * r];
+ uint32_t * Z = &XY[64 * r];
+ uint64_t i;
+ uint64_t j;
+ size_t k;
+
+ /* 1: X <-- B */
+ for (k = 0; k < 32 * r; k++)
+ X[k] = le32dec(&B[4 * k]);
+
+ /* 2: for i = 0 to N - 1 do */
+ for (i = 0; i < N; i += 2) {
+ /* 3: V_i <-- X */
+ blkcpy(&V[i * (32 * r)], X, 128 * r);
+
+ /* 4: X <-- H(X) */
+ blockmix_salsa8(X, Y, Z, r);
+
+ /* 3: V_i <-- X */
+ blkcpy(&V[(i + 1) * (32 * r)], Y, 128 * r);
+
+ /* 4: X <-- H(X) */
+ blockmix_salsa8(Y, X, Z, r);
+ }
+
+ /* 6: for i = 0 to N - 1 do */
+ for (i = 0; i < N; i += 2) {
+ /* 7: j <-- Integerify(X) mod N */
+ j = integerify(X, r) & (N - 1);
+
+ /* 8: X <-- H(X \xor V_j) */
+ blkxor(X, &V[j * (32 * r)], 128 * r);
+ blockmix_salsa8(X, Y, Z, r);
+
+ /* 7: j <-- Integerify(X) mod N */
+ j = integerify(Y, r) & (N - 1);
+
+ /* 8: X <-- H(X \xor V_j) */
+ blkxor(Y, &V[j * (32 * r)], 128 * r);
+ blockmix_salsa8(Y, X, Z, r);
+ }
+
+ /* 10: B' <-- X */
+ for (k = 0; k < 32 * r; k++)
+ le32enc(&B[4 * k], X[k]);
+}
+
+/* cpu and memory intensive function to transform a 80 byte buffer into a 32 byte output
+ scratchpad size needs to be at least 63 + (128 * r * p) + (256 * r + 64) + (128 * r * N) bytes
+ */
+static void scrypt_1024_1_1_256_sp(const char* input, char* output, char* scratchpad)
+{
+ uint8_t * B;
+ uint32_t * V;
+ uint32_t * XY;
+ uint32_t i;
+
+ const uint32_t N = 1024;
+ const uint32_t r = 1;
+ const uint32_t p = 1;
+
+ B = (uint8_t *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63));
+ XY = (uint32_t *)(B + (128 * r * p));
+ V = (uint32_t *)(B + (128 * r * p) + (256 * r + 64));
+
+ /* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
+ PBKDF2_SHA256((const uint8_t*)input, 80, (const uint8_t*)input, 80, 1, B, p * 128 * r);
+
+ /* 2: for i = 0 to p - 1 do */
+ for (i = 0; i < p; i++) {
+ /* 3: B_i <-- MF(B_i, N) */
+ smix(&B[i * 128 * r], r, N, V, XY);
+ }
+
+ /* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
+ PBKDF2_SHA256((const uint8_t*)input, 80, B, p * 128 * r, 1, (uint8_t*)output, 32);
+}
+
+int scanhash_scrypt(int thr_id, unsigned char *pdata, unsigned char *scratchbuf,
+ const unsigned char *ptarget,
+ uint32_t max_nonce, unsigned long *hashes_done)
+{
+ unsigned char data[80];
+ unsigned char tmp_hash[32];
+ uint32_t *nonce = (uint32_t *)(data + 64 + 12);
+ uint32_t n = 0;
+ uint32_t Htarg = *(uint32_t *)(ptarget + 28);
+ int i;
+
+ for (i = 0; i < 80/4; i++)
+ ((uint32_t *)data)[i] = swab32(((uint32_t *)pdata)[i]);
+
+ while(1) {
+ n++;
+ *nonce = n;
+ scrypt_1024_1_1_256_sp(data, tmp_hash, scratchbuf);
+
+ if (*(uint32_t *)(tmp_hash+28) <= Htarg) {
+ *(uint32_t *)(pdata + 64 + 12) = swab32(n);
+ *hashes_done = n;
+ return true;
+ }
+
+ if ((n >= max_nonce) || work_restart[thr_id].restart) {
+ *hashes_done = n;
+ break;
+ }
+ }
+ return false;
+}
+