summaryrefslogtreecommitdiffstats
path: root/main/openssl
diff options
context:
space:
mode:
Diffstat (limited to 'main/openssl')
-rw-r--r--main/openssl/openssl-0.9.8k-padlock-sha.patch821
1 files changed, 0 insertions, 821 deletions
diff --git a/main/openssl/openssl-0.9.8k-padlock-sha.patch b/main/openssl/openssl-0.9.8k-padlock-sha.patch
deleted file mode 100644
index b2e7e954d..000000000
--- a/main/openssl/openssl-0.9.8k-padlock-sha.patch
+++ /dev/null
@@ -1,821 +0,0 @@
-#
-# OpenSSL patch to support VIA C7 hash engine
-# Written by: Timo Teras <timo.teras@iki.fi>
-# based on patch by: Michal Ludvig <michal@logix.cz>
-# http://www.logix.cz/michal/devel/padlock
-#
-Index: openssl-0.9.8k/crypto/engine/eng_padlock.c
-===================================================================
---- openssl-0.9.8k.orig/crypto/engine/eng_padlock.c 2009-07-27 16:18:20.000000000 +0300
-+++ openssl-0.9.8k/crypto/engine/eng_padlock.c 2009-07-30 22:02:54.000000000 +0300
-@@ -1,10 +1,13 @@
--/*
-+/*
- * Support for VIA PadLock Advanced Cryptography Engine (ACE)
- * Written by Michal Ludvig <michal@logix.cz>
- * http://www.logix.cz/michal
- *
-- * Big thanks to Andy Polyakov for a help with optimization,
-- * assembler fixes, port to MS Windows and a lot of other
-+ * SHA support by Timo Teras <timo.teras@iki.fi> based on code
-+ * originally by Michal Ludvig.
-+ *
-+ * Big thanks to Andy Polyakov for a help with optimization,
-+ * assembler fixes, port to MS Windows and a lot of other
- * valuable work on this engine!
- */
-
-@@ -66,6 +69,13 @@
- #include <stdio.h>
- #include <string.h>
-
-+#include <signal.h>
-+#include <stdint.h>
-+#include <unistd.h>
-+#include <sys/mman.h>
-+#include <sys/ucontext.h>
-+#include <arpa/inet.h>
-+
- #include <openssl/opensslconf.h>
- #include <openssl/crypto.h>
- #include <openssl/dso.h>
-@@ -74,12 +84,23 @@
- #ifndef OPENSSL_NO_AES
- #include <openssl/aes.h>
- #endif
-+#ifndef OPENSSL_NO_SHA
-+#include <openssl/sha.h>
-+#endif
- #include <openssl/rand.h>
- #include <openssl/err.h>
-
- #ifndef OPENSSL_NO_HW
- #ifndef OPENSSL_NO_HW_PADLOCK
-
-+/* PadLock RNG is disabled by default */
-+#define PADLOCK_NO_RNG 1
-+
-+/* No ASM routines for SHA in MSC yet */
-+#ifdef _MSC_VER
-+#define OPENSSL_NO_SHA
-+#endif
-+
- /* Attempt to have a single source for both 0.9.7 and 0.9.8 :-) */
- #if (OPENSSL_VERSION_NUMBER >= 0x00908000L)
- # ifndef OPENSSL_NO_DYNAMIC_ENGINE
-@@ -96,7 +117,7 @@
- /* VIA PadLock AES is available *ONLY* on some x86 CPUs.
- Not only that it doesn't exist elsewhere, but it
- even can't be compiled on other platforms!
--
-+
- In addition, because of the heavy use of inline assembler,
- compiler choice is limited to GCC and Microsoft C. */
- #undef COMPILE_HW_PADLOCK
-@@ -138,20 +159,42 @@
- static int padlock_init(ENGINE *e);
-
- /* RNG Stuff */
-+#ifndef PADLOCK_NO_RNG
- static RAND_METHOD padlock_rand;
-+#endif
-
- /* Cipher Stuff */
- #ifndef OPENSSL_NO_AES
- static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid);
- #endif
-
-+/* Digest Stuff */
-+#ifndef OPENSSL_NO_SHA
-+static int padlock_digests(ENGINE *e, const EVP_MD **digest, const int **nids, int nid);
-+static volatile void *padlock_cached_sha_buffer = NULL;
-+#endif
-+
- /* Engine names */
- static const char *padlock_id = "padlock";
- static char padlock_name[100];
-
- /* Available features */
--static int padlock_use_ace = 0; /* Advanced Cryptography Engine */
--static int padlock_use_rng = 0; /* Random Number Generator */
-+enum padlock_flags {
-+ PADLOCK_RNG = 0x01,
-+ PADLOCK_ACE = 0x02,
-+ PADLOCK_ACE2 = 0x04,
-+ PADLOCK_PHE = 0x08,
-+ PADLOCK_PMM = 0x10
-+};
-+enum padlock_flags padlock_flags;
-+
-+#define PADLOCK_HAVE_RNG (padlock_flags & PADLOCK_RNG)
-+#define PADLOCK_HAVE_ACE (padlock_flags & (PADLOCK_ACE|PADLOCK_ACE2))
-+#define PADLOCK_HAVE_ACE1 (padlock_flags & PADLOCK_ACE)
-+#define PADLOCK_HAVE_ACE2 (padlock_flags & PADLOCK_ACE2)
-+#define PADLOCK_HAVE_PHE (padlock_flags & PADLOCK_PHE)
-+#define PADLOCK_HAVE_PMM (padlock_flags & PADLOCK_PMM)
-+
- #ifndef OPENSSL_NO_AES
- static int padlock_aes_align_required = 1;
- #endif
-@@ -165,25 +208,30 @@
- /* Check available features */
- padlock_available();
-
--#if 1 /* disable RNG for now, see commentary in vicinity of RNG code */
-- padlock_use_rng=0;
--#endif
--
- /* Generate a nice engine name with available features */
- BIO_snprintf(padlock_name, sizeof(padlock_name),
-- "VIA PadLock (%s, %s)",
-- padlock_use_rng ? "RNG" : "no-RNG",
-- padlock_use_ace ? "ACE" : "no-ACE");
-+ "VIA PadLock: %s%s%s%s%s",
-+ padlock_flags ? "" : "not supported",
-+ PADLOCK_HAVE_RNG ? "RNG " : "",
-+ PADLOCK_HAVE_ACE ? (PADLOCK_HAVE_ACE2 ? "ACE2 " : "ACE ") : "",
-+ PADLOCK_HAVE_PHE ? "PHE " : "",
-+ PADLOCK_HAVE_PMM ? "PMM " : "");
-
-- /* Register everything or return with an error */
-+ /* Register everything or return with an error */
- if (!ENGINE_set_id(e, padlock_id) ||
- !ENGINE_set_name(e, padlock_name) ||
-
-- !ENGINE_set_init_function(e, padlock_init) ||
-+ !ENGINE_set_init_function(e, padlock_init)
- #ifndef OPENSSL_NO_AES
-- (padlock_use_ace && !ENGINE_set_ciphers (e, padlock_ciphers)) ||
-+ || (PADLOCK_HAVE_ACE && !ENGINE_set_ciphers (e, padlock_ciphers))
- #endif
-- (padlock_use_rng && !ENGINE_set_RAND (e, &padlock_rand))) {
-+#ifndef OPENSSL_NO_SHA
-+ || (PADLOCK_HAVE_PHE && !ENGINE_set_digests (e, padlock_digests))
-+#endif
-+#ifndef PADLOCK_NO_RNG
-+ || (PADLOCK_HAVE_RNG && !ENGINE_set_RAND (e, &padlock_rand))
-+#endif
-+ ) {
- return 0;
- }
-
-@@ -213,7 +261,7 @@
- static int
- padlock_init(ENGINE *e)
- {
-- return (padlock_use_rng || padlock_use_ace);
-+ return (padlock_flags);
- }
-
- /* This stuff is needed if this ENGINE is being compiled into a self-contained
-@@ -247,7 +295,7 @@
- #define AES_KEY_SIZE_192 24
- #define AES_KEY_SIZE_256 32
-
--/* Here we store the status information relevant to the
-+/* Here we store the status information relevant to the
- current context. */
- /* BIG FAT WARNING:
- * Inline assembler in PADLOCK_XCRYPT_ASM()
-@@ -306,7 +354,7 @@
- {
- int result = -1;
-
-- /* We're checking if the bit #21 of EFLAGS
-+ /* We're checking if the bit #21 of EFLAGS
- can be toggled. If yes = CPUID is available. */
- asm volatile (
- "pushf\n"
-@@ -322,7 +370,7 @@
- "xorl %%eax, %%ecx\n"
- "movl %%ecx, %0\n"
- : "=r" (result) : : "eax", "ecx");
--
-+
- return (result == 0);
- }
-
-@@ -365,10 +413,22 @@
- : "+a"(eax), "=d"(edx) : : "ecx");
-
- /* Fill up some flags */
-- padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6));
-- padlock_use_rng = ((edx & (0x3<<2)) == (0x3<<2));
-+ padlock_flags |= ((edx & (0x3<<3)) ? PADLOCK_RNG : 0);
-+ padlock_flags |= ((edx & (0x3<<7)) ? PADLOCK_ACE : 0);
-+ padlock_flags |= ((edx & (0x3<<9)) ? PADLOCK_ACE2 : 0);
-+ padlock_flags |= ((edx & (0x3<<11)) ? PADLOCK_PHE : 0);
-+ padlock_flags |= ((edx & (0x3<<13)) ? PADLOCK_PMM : 0);
-
-- return padlock_use_ace + padlock_use_rng;
-+ return padlock_flags;
-+}
-+
-+static inline void
-+padlock_htonl_block(uint32_t *data, size_t count)
-+{
-+ while (count--) {
-+ asm volatile ("bswapl %0" : "+r"(*data));
-+ data++;
-+ }
- }
-
- #ifndef OPENSSL_NO_AES
-@@ -377,17 +437,14 @@
- padlock_bswapl(AES_KEY *ks)
- {
- size_t i = sizeof(ks->rd_key)/sizeof(ks->rd_key[0]);
-- unsigned int *key = ks->rd_key;
-+ uint32_t *key = (uint32_t*) ks->rd_key;
-
-- while (i--) {
-- asm volatile ("bswapl %0" : "+r"(*key));
-- key++;
-- }
-+ padlock_htonl_block(key, i);
- }
- #endif
-
- /* Force key reload from memory to the CPU microcode.
-- Loading EFLAGS from the stack clears EFLAGS[30]
-+ Loading EFLAGS from the stack clears EFLAGS[30]
- which does the trick. */
- static inline void
- padlock_reload_key(void)
-@@ -423,7 +480,7 @@
- }
-
- /* Template for padlock_xcrypt_* modes */
--/* BIG FAT WARNING:
-+/* BIG FAT WARNING:
- * The offsets used with 'leal' instructions
- * describe items of the 'padlock_cipher_data'
- * structure.
-@@ -475,7 +532,7 @@
- * In case you wonder 'rep xcrypt*' instructions above are *not*
- * affected by the Direction Flag and pointers advance toward
- * larger addresses unconditionally.
-- */
-+ */
- static inline unsigned char *
- padlock_memcpy(void *dst,const void *src,size_t n)
- {
-@@ -501,7 +558,7 @@
- _asm _emit 0x0f _asm _emit 0xa7 \
- _asm _emit code
-
--/* BIG FAT WARNING:
-+/* BIG FAT WARNING:
- * The offsets used with 'lea' instructions
- * describe items of the 'padlock_cipher_data'
- * structure.
-@@ -840,7 +897,7 @@
- return 1;
- }
-
--/*
-+/*
- * Simplified version of padlock_aes_cipher() used when
- * 1) both input and output buffers are at aligned addresses.
- * or when
-@@ -895,7 +952,7 @@
- # error "insane PADLOCK_CHUNK..."
- #endif
-
--/* Re-align the arguments to 16-Bytes boundaries and run the
-+/* Re-align the arguments to 16-Bytes boundaries and run the
- encryption function itself. This function is not AES-specific. */
- static int
- padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
-@@ -1157,6 +1214,514 @@
-
- #endif /* OPENSSL_NO_AES */
-
-+#ifndef OPENSSL_NO_SHA
-+
-+#define DIGEST_DATA(ctx) ((struct padlock_digest_data *)(ctx->md_data))
-+#define PADLOCK_SHA_ALIGN(dd) (uint32_t*)(((uintptr_t)(dd) + 15) & ~15)
-+#define PADLOCK_SHA_PAGES 14
-+#define PADLOCK_SHA_BUFFER (512 - sizeof(size_t) - 4*sizeof(void*))
-+#define PADLOCK_SHA_INITVECTOR_SIZE (8 * sizeof(uint32_t))
-+
-+struct padlock_digest_data {
-+ union {
-+ unsigned char smallbuffer[PADLOCK_SHA_BUFFER];
-+ struct {
-+ unsigned char padlockctx[128+16];
-+ unsigned char *buffer;
-+ size_t mmap_size;
-+ uint64_t total;
-+ };
-+ };
-+ void *initvector;
-+ size_t used;
-+ void (*hash)(void *padlockctx, const void *buf, size_t len);
-+ int (*update)(EVP_MD_CTX *ctx, const void *buffer, size_t len);
-+ int (*final)(EVP_MD_CTX *ctx, unsigned char *buffer);
-+};
-+
-+static inline void *
-+padlock_atomic_xchg(volatile void **mem, void *fixed)
-+{
-+ /* No lock prefix due the xchg asserts it anyway, and the
-+ * funny unsigned long* cast is required to workaround some gcc
-+ * problems if compiling in PIC mode */
-+ asm volatile (
-+ "xchg %0, %1"
-+ : "=r"(fixed)
-+ : "m"(*(unsigned long*)mem), "0"(fixed)
-+ : "memory");
-+ return fixed;
-+}
-+
-+static void
-+padlock_do_sha1(void *padlockctx, const void *buf, size_t len)
-+{
-+ asm volatile (
-+ "xsha1"
-+ : "+S"(buf), "+D"(padlockctx)
-+ : "c"(len), "a"(0));
-+}
-+
-+static void
-+padlock_do_sha256(void *padlockctx, const void *buf, size_t len)
-+{
-+ asm volatile (
-+ "xsha256"
-+ : "+S"(buf), "+D"(padlockctx)
-+ : "c"(len), "a"(0));
-+}
-+
-+static void
-+handle_sigsegv(int sig, siginfo_t *info, void *uctxp)
-+{
-+ ucontext_t *uctx = uctxp;
-+ uctx->uc_mcontext.gregs[14] += 4;
-+}
-+
-+static void
-+padlock_sha_nonfinalizing(struct padlock_digest_data *data)
-+{
-+ struct sigaction act, oldact;
-+ size_t bofs = 0;
-+
-+ if (data->used != data->mmap_size) {
-+ bofs = data->mmap_size - data->used;
-+ memmove(&data->buffer[bofs], data->buffer, data->used);
-+ }
-+
-+ memset(&act, 0, sizeof(act));
-+ act.sa_sigaction = handle_sigsegv;
-+ act.sa_flags = SA_SIGINFO;
-+ sigaction(SIGSEGV, &act, &oldact);
-+ data->hash(PADLOCK_SHA_ALIGN(data->padlockctx),
-+ &data->buffer[bofs], data->used + 64);
-+ sigaction(SIGSEGV, &oldact, NULL);
-+}
-+
-+static void
-+padlock_free_buffer(void *buf)
-+{
-+ buf = padlock_atomic_xchg(&padlock_cached_sha_buffer, buf);
-+ if (buf != NULL) {
-+ munmap(buf, (PADLOCK_SHA_PAGES + 1) * getpagesize());
-+ }
-+}
-+
-+static void *
-+padlock_allocate_buffer(size_t *maxsize)
-+{
-+ void *buf;
-+ size_t size, page;
-+
-+ page = getpagesize();
-+ buf = padlock_atomic_xchg(&padlock_cached_sha_buffer, NULL);
-+ if (buf != NULL)
-+ goto ret;
-+
-+ size = (PADLOCK_SHA_PAGES + 1) * page;
-+ buf = mmap(0, size, PROT_READ | PROT_WRITE,
-+ MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
-+ if (buf == NULL)
-+ return NULL;
-+
-+ /* Try locking the pages to avoid swapping, but don't fail if
-+ * we are over quota. */
-+ mlock(buf, size);
-+
-+ if (mprotect(buf + PADLOCK_SHA_PAGES * page, page, PROT_NONE) < 0) {
-+ munmap(buf, size);
-+ return NULL;
-+ }
-+
-+ret:
-+ *maxsize = PADLOCK_SHA_PAGES * page - 64;
-+
-+ return buf;
-+}
-+
-+static int
-+padlock_multi_update(EVP_MD_CTX *ctx, const void *data, size_t len)
-+{
-+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
-+ size_t chunk_size;
-+
-+ if (ddata->buffer == NULL)
-+ ddata->buffer = padlock_allocate_buffer(&ddata->mmap_size);
-+
-+ while (len) {
-+ if (ddata->used + len < ddata->mmap_size) {
-+ memcpy(&ddata->buffer[ddata->used], data, len);
-+ ddata->used += len;
-+ ddata->total += len;
-+ return 1;
-+ }
-+
-+ chunk_size = ddata->mmap_size - ddata->used;
-+ memcpy(&ddata->buffer[ddata->used], data, chunk_size);
-+
-+ data += chunk_size;
-+ len -= chunk_size;
-+ ddata->used = ddata->mmap_size;
-+ ddata->total += chunk_size;
-+ padlock_sha_nonfinalizing(ddata);
-+ ddata->used = 0;
-+ }
-+
-+ return 1;
-+}
-+
-+static int
-+padlock_oneshot_final(EVP_MD_CTX *ctx, unsigned char *md)
-+{
-+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
-+ size_t size = EVP_MD_CTX_size(ctx);
-+
-+ memcpy(md, PADLOCK_SHA_ALIGN(ddata->padlockctx), size);
-+ return 1;
-+}
-+
-+static int
-+padlock_copy_final(EVP_MD_CTX *ctx, unsigned char *md)
-+{
-+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
-+ char padlockctx[128+16];
-+ void *aligned = PADLOCK_SHA_ALIGN(padlockctx);
-+ size_t size = EVP_MD_CTX_size(ctx);
-+
-+ memcpy(aligned, ddata->initvector, PADLOCK_SHA_INITVECTOR_SIZE);
-+ ddata->hash(aligned, ddata->smallbuffer, ddata->used);
-+ padlock_htonl_block(aligned, size / sizeof(uint32_t));
-+ memcpy(md, aligned, size);
-+
-+ return 1;
-+}
-+
-+static int
-+padlock_multi_final(EVP_MD_CTX *ctx, unsigned char *md)
-+{
-+ static const char padding[64] = { 0x80, };
-+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
-+ size_t mdsize = EVP_MD_CTX_size(ctx);
-+ void *aligned = PADLOCK_SHA_ALIGN(ddata->padlockctx);
-+
-+ if (ddata->used == ddata->total) {
-+ /* Sweet, everything fits in one buffer. */
-+ ddata->hash(aligned, ddata->buffer, ddata->used);
-+ } else {
-+ /* Hardware already hashed some buffers.
-+ * Do finalizing manually */
-+ union {
-+ uint64_t u64;
-+ uint32_t u32[2];
-+ } bits_le, bits;
-+ size_t lastblocklen, padlen;
-+
-+ /* BigEndianise the length. */
-+ bits_le.u64 = ddata->total * 8;
-+ bits.u32[1] = htonl(bits_le.u32[0]);
-+ bits.u32[0] = htonl(bits_le.u32[1]);
-+
-+ /* Append padding, leave space for length. */
-+ lastblocklen = ddata->total & 63;
-+ padlen = (lastblocklen < 56) ? (56 - lastblocklen) : ((64+56) - lastblocklen);
-+ padlock_multi_update(ctx, padding, padlen);
-+
-+ /* Length in BigEndian64 */
-+ padlock_multi_update(ctx, (const char *) &bits, sizeof(bits));
-+
-+ /* And finally calculate it */
-+ padlock_sha_nonfinalizing(ddata);
-+ }
-+ padlock_htonl_block(aligned, mdsize / sizeof(uint32_t));
-+ memcpy(md, aligned, mdsize);
-+
-+ return 1;
-+}
-+
-+static int
-+padlock_copy_update(EVP_MD_CTX *ctx, const void *data, size_t len)
-+{
-+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
-+
-+ if (ddata->used + len > sizeof(ddata->smallbuffer)) {
-+ ddata->update = padlock_multi_update;
-+ ddata->final = padlock_multi_final;
-+
-+ if (ddata->used != 0) {
-+ void *buffer;
-+ size_t mmap_size;
-+
-+ buffer = padlock_allocate_buffer(&mmap_size);
-+ memcpy(buffer, ddata->smallbuffer, ddata->used);
-+ ddata->buffer = buffer;
-+ ddata->total = ddata->used;
-+ ddata->mmap_size = mmap_size;
-+ } else {
-+ ddata->buffer = NULL;
-+ ddata->total = 0;
-+ }
-+
-+ memcpy(PADLOCK_SHA_ALIGN(ddata->padlockctx), ddata->initvector,
-+ PADLOCK_SHA_INITVECTOR_SIZE);
-+
-+ return padlock_multi_update(ctx, data, len);
-+ }
-+
-+ memcpy(&ddata->smallbuffer[ddata->used], data, len);
-+ ddata->used += len;
-+
-+ return 1;
-+}
-+
-+static int
-+padlock_oneshot_update(EVP_MD_CTX *ctx, const void *data, size_t len)
-+{
-+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
-+ void *aligned = PADLOCK_SHA_ALIGN(ddata->padlockctx);
-+ size_t mdsize = EVP_MD_CTX_size(ctx);
-+
-+ /* Oneshot update is only possible if context flags indicate so */
-+ if (!(ctx->flags & EVP_MD_CTX_FLAG_ONESHOT)) {
-+ ddata->update = padlock_copy_update;
-+ ddata->final = padlock_copy_final;
-+ return padlock_copy_update(ctx, data, len);
-+ }
-+
-+ memcpy(aligned, ddata->initvector, PADLOCK_SHA_INITVECTOR_SIZE);
-+ ddata->hash(aligned, data, len);
-+ padlock_htonl_block(aligned, mdsize / sizeof(uint32_t));
-+ ddata->used += len;
-+
-+ return 1;
-+}
-+
-+static int
-+padlock_sha_init(struct padlock_digest_data *ddata)
-+{
-+ ddata->used = 0;
-+ ddata->update = padlock_oneshot_update;
-+ ddata->final = padlock_oneshot_final;
-+
-+ return 1;
-+}
-+
-+static int
-+padlock_sha1_init(EVP_MD_CTX *ctx)
-+{
-+ static uint32_t sha1_initvector[8] = {
-+ 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476,
-+ 0xC3D2E1F0
-+ };
-+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
-+
-+ ddata->hash = padlock_do_sha1;
-+ ddata->initvector = sha1_initvector;
-+ return padlock_sha_init(ddata);
-+}
-+
-+static int
-+padlock_sha224_init(EVP_MD_CTX *ctx)
-+{
-+ static uint32_t sha224_initvector[] = {
-+ 0xC1059ED8, 0x367CD507, 0x3070DD17, 0xF70E5939,
-+ 0xFFC00B31, 0x68581511, 0x64F98FA7, 0xBEFA4FA4,
-+ };
-+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
-+
-+ ddata->hash = padlock_do_sha256;
-+ ddata->initvector = sha224_initvector;
-+ return padlock_sha_init(ddata);
-+}
-+
-+static int
-+padlock_sha256_init(EVP_MD_CTX *ctx)
-+{
-+ static uint32_t sha256_initvector[] = {
-+ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
-+ 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
-+ };
-+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
-+
-+ ddata->hash = padlock_do_sha256;
-+ ddata->initvector = sha256_initvector;
-+ return padlock_sha_init(ddata);
-+}
-+
-+static int
-+padlock_sha_update(EVP_MD_CTX *ctx, const void *data, size_t length)
-+{
-+ return DIGEST_DATA(ctx)->update(ctx, data, length);
-+}
-+
-+static int
-+padlock_sha_final(EVP_MD_CTX *ctx, unsigned char *md)
-+{
-+ return DIGEST_DATA(ctx)->final(ctx, md);
-+}
-+
-+static int
-+padlock_sha_copy(EVP_MD_CTX *to, const EVP_MD_CTX *from)
-+{
-+ struct padlock_digest_data *dfrom = DIGEST_DATA(from);
-+ struct padlock_digest_data *dto = DIGEST_DATA(to);
-+
-+ /* When we get here, dto is already a memcpied from dfrom,
-+ * it's ok for all other cases except when data is on a separate
-+ * mmapped area. It would be nice if we had a flag, if this is
-+ * a "finalization copy", so we could do finalizing SHA here and
-+ * store the result to *to precalculated. But there's no such
-+ * flag as to is reset on copy. */
-+
-+ if (dfrom->update != padlock_copy_update) {
-+ /* Recopy the context, as they might have different alignment */
-+ memcpy(PADLOCK_SHA_ALIGN(dto->padlockctx),
-+ PADLOCK_SHA_ALIGN(dfrom->padlockctx),
-+ PADLOCK_SHA_INITVECTOR_SIZE);
-+ }
-+
-+ if (dfrom->update == padlock_multi_update) {
-+ /* Update total, and copy the buffer */
-+ dto->total = dfrom->total - dfrom->used;
-+ dto->buffer = NULL;
-+ dto->used = 0;
-+ dto->mmap_size = 0;
-+ if (dfrom->used != 0)
-+ padlock_sha_update(to, dfrom->buffer, dfrom->used);
-+ }
-+
-+ return 1;
-+}
-+
-+static int
-+padlock_sha_cleanup(EVP_MD_CTX *ctx)
-+{
-+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
-+
-+ if (ddata->update == padlock_multi_update && ddata->buffer != NULL)
-+ padlock_free_buffer(ddata->buffer);
-+
-+ return 1;
-+}
-+
-+static const EVP_MD padlock_sha1_md = {
-+ NID_sha1,
-+ NID_sha1WithRSAEncryption,
-+ SHA_DIGEST_LENGTH,
-+ 0,
-+ padlock_sha1_init,
-+ padlock_sha_update,
-+ padlock_sha_final,
-+ padlock_sha_copy,
-+ padlock_sha_cleanup,
-+ EVP_PKEY_RSA_method,
-+ SHA_CBLOCK,
-+ sizeof(struct padlock_digest_data),
-+};
-+
-+static const EVP_MD padlock_dss1_md = {
-+ NID_dsa,
-+ NID_dsaWithSHA1,
-+ SHA_DIGEST_LENGTH,
-+ 0,
-+ padlock_sha1_init,
-+ padlock_sha_update,
-+ padlock_sha_final,
-+ padlock_sha_copy,
-+ padlock_sha_cleanup,
-+ EVP_PKEY_DSA_method,
-+ SHA_CBLOCK,
-+ sizeof(struct padlock_digest_data),
-+};
-+
-+static const EVP_MD padlock_sha224_md = {
-+ NID_sha224,
-+ NID_sha224WithRSAEncryption,
-+ SHA224_DIGEST_LENGTH,
-+ 0,
-+ padlock_sha224_init,
-+ padlock_sha_update,
-+ padlock_sha_final,
-+ padlock_sha_copy,
-+ padlock_sha_cleanup,
-+ EVP_PKEY_RSA_method,
-+ SHA_CBLOCK,
-+ sizeof(struct padlock_digest_data),
-+};
-+
-+static const EVP_MD padlock_sha256_md = {
-+ NID_sha256,
-+ NID_sha256WithRSAEncryption,
-+ SHA256_DIGEST_LENGTH,
-+ 0,
-+ padlock_sha256_init,
-+ padlock_sha_update,
-+ padlock_sha_final,
-+ padlock_sha_copy,
-+ padlock_sha_cleanup,
-+ EVP_PKEY_RSA_method,
-+ SHA_CBLOCK,
-+ sizeof(struct padlock_digest_data),
-+};
-+
-+static int padlock_digest_nids[] = {
-+#if !defined(OPENSSL_NO_SHA)
-+ NID_sha1,
-+ NID_dsa,
-+#endif
-+#if !defined(OPENSSL_NO_SHA256)
-+#if !defined(OPENSSL_NO_SHA224)
-+ NID_sha224,
-+#endif
-+ NID_sha256,
-+#endif
-+};
-+
-+static int padlock_digest_nids_num = sizeof(padlock_digest_nids)/sizeof(padlock_digest_nids[0]);
-+
-+static int
-+padlock_digests (ENGINE *e, const EVP_MD **digest, const int **nids, int nid)
-+{
-+ /* No specific digest => return a list of supported nids ... */
-+ if (!digest) {
-+ *nids = padlock_digest_nids;
-+ return padlock_digest_nids_num;
-+ }
-+
-+ /* ... or the requested "digest" otherwise */
-+ switch (nid) {
-+#if !defined(OPENSSL_NO_SHA)
-+ case NID_sha1:
-+ *digest = &padlock_sha1_md;
-+ break;
-+ case NID_dsa:
-+ *digest = &padlock_dss1_md;
-+ break;
-+#endif
-+
-+#if !defined(OPENSSL_NO_SHA256)
-+#if !defined(OPENSSL_NO_SHA224)
-+ case NID_sha224:
-+ *digest = &padlock_sha224_md;
-+ break;
-+#endif /* OPENSSL_NO_SHA224 */
-+
-+ case NID_sha256:
-+ *digest = &padlock_sha256_md;
-+ break;
-+#endif /* OPENSSL_NO_SHA256 */
-+
-+ default:
-+ /* Sorry, we don't support this NID */
-+ *digest = NULL;
-+ return 0;
-+ }
-+
-+ return 1;
-+}
-+
-+#endif /* OPENSSL_NO_SHA */
-+
-+#ifndef PADLOCK_NO_RNG
- /* ===== Random Number Generator ===== */
- /*
- * This code is not engaged. The reason is that it does not comply
-@@ -1164,7 +1729,7 @@
- * (posted at http://www.via.com.tw/en/viac3/c3.jsp) nor does it
- * provide meaningful error control...
- */
--/* Wrapper that provides an interface between the API and
-+/* Wrapper that provides an interface between the API and
- the raw PadLock RNG */
- static int
- padlock_rand_bytes(unsigned char *output, int count)
-@@ -1212,6 +1777,7 @@
- padlock_rand_bytes, /* pseudorand */
- padlock_rand_status, /* rand status */
- };
-+#endif /* PADLOCK_NO_RNG */
-
- #endif /* COMPILE_HW_PADLOCK */
-