summaryrefslogtreecommitdiffstats
path: root/main/openssl/0002-engines-e_padlock-backport-cvs-head-changes.patch
diff options
context:
space:
mode:
authorTimo Teräs <timo.teras@iki.fi>2012-08-02 17:20:47 +0300
committerTimo Teräs <timo.teras@iki.fi>2012-08-02 17:23:40 +0300
commitcdbddc83dc90972ef93e2cbf8bd706d0e0ab129f (patch)
tree23d27eff2ee7aebf1a6297a41217d1b39fa1c28c /main/openssl/0002-engines-e_padlock-backport-cvs-head-changes.patch
parentffb1c7b4dc64a49a5c873e9a119e3da57f1a5e86 (diff)
downloadaports-fcolista-cdbddc83dc90972ef93e2cbf8bd706d0e0ab129f.tar.bz2
aports-fcolista-cdbddc83dc90972ef93e2cbf8bd706d0e0ab129f.tar.xz
main/openssl: refresh hmac/oneshot and padlock patches
* fixed hmac oneshot flag to work as expected * renamed the patch series, and rebased against 1.0.1c
Diffstat (limited to 'main/openssl/0002-engines-e_padlock-backport-cvs-head-changes.patch')
-rw-r--r--main/openssl/0002-engines-e_padlock-backport-cvs-head-changes.patch203
1 files changed, 203 insertions, 0 deletions
diff --git a/main/openssl/0002-engines-e_padlock-backport-cvs-head-changes.patch b/main/openssl/0002-engines-e_padlock-backport-cvs-head-changes.patch
new file mode 100644
index 0000000000..c508c9c5a2
--- /dev/null
+++ b/main/openssl/0002-engines-e_padlock-backport-cvs-head-changes.patch
@@ -0,0 +1,203 @@
+From 6e182155643a6aeb07cbba1e7f79ac1adfcddad2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Timo=20Ter=C3=A4s?= <timo.teras@iki.fi>
+Date: Wed, 28 Jul 2010 08:29:09 +0300
+Subject: [PATCH 2/4] engines/e_padlock: backport cvs head changes
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Includes support for VIA Nano 64-bit mode.
+
+Signed-off-by: Timo Teräs <timo.teras@iki.fi>
+---
+ engines/e_padlock.c | 140 +++++++++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 122 insertions(+), 18 deletions(-)
+
+diff --git a/engines/e_padlock.c b/engines/e_padlock.c
+index 9f7a85a..6ab42d2 100644
+--- a/engines/e_padlock.c
++++ b/engines/e_padlock.c
+@@ -101,7 +101,10 @@
+ compiler choice is limited to GCC and Microsoft C. */
+ #undef COMPILE_HW_PADLOCK
+ #if !defined(I386_ONLY) && !defined(OPENSSL_NO_INLINE_ASM)
+-# if (defined(__GNUC__) && (defined(__i386__) || defined(__i386))) || \
++# if (defined(__GNUC__) && __GNUC__>=2 && \
++ (defined(__i386__) || defined(__i386) || \
++ defined(__x86_64__) || defined(__x86_64)) \
++ ) || \
+ (defined(_MSC_VER) && defined(_M_IX86))
+ # define COMPILE_HW_PADLOCK
+ # endif
+@@ -304,6 +307,7 @@ static volatile struct padlock_cipher_data *padlock_saved_context;
+ * =======================================================
+ */
+ #if defined(__GNUC__) && __GNUC__>=2
++#if defined(__i386__) || defined(__i386)
+ /*
+ * As for excessive "push %ebx"/"pop %ebx" found all over.
+ * When generating position-independent code GCC won't let
+@@ -383,21 +387,6 @@ padlock_available(void)
+ return padlock_use_ace + padlock_use_rng;
+ }
+
+-#ifndef OPENSSL_NO_AES
+-/* Our own htonl()/ntohl() */
+-static inline void
+-padlock_bswapl(AES_KEY *ks)
+-{
+- size_t i = sizeof(ks->rd_key)/sizeof(ks->rd_key[0]);
+- unsigned int *key = ks->rd_key;
+-
+- while (i--) {
+- asm volatile ("bswapl %0" : "+r"(*key));
+- key++;
+- }
+-}
+-#endif
+-
+ /* Force key reload from memory to the CPU microcode.
+ Loading EFLAGS from the stack clears EFLAGS[30]
+ which does the trick. */
+@@ -455,12 +444,127 @@ static inline void *name(size_t cnt, \
+ : "edx", "cc", "memory"); \
+ return iv; \
+ }
++#endif
++
++#elif defined(__x86_64__) || defined(__x86_64)
++
++/* Load supported features of the CPU to see if
++ the PadLock is available. */
++static int
++padlock_available(void)
++{
++ char vendor_string[16];
++ unsigned int eax, edx;
+
++ /* Are we running on the Centaur (VIA) CPU? */
++ eax = 0x00000000;
++ vendor_string[12] = 0;
++ asm volatile (
++ "cpuid\n"
++ "movl %%ebx,(%1)\n"
++ "movl %%edx,4(%1)\n"
++ "movl %%ecx,8(%1)\n"
++ : "+a"(eax) : "r"(vendor_string) : "rbx", "rcx", "rdx");
++ if (strcmp(vendor_string, "CentaurHauls") != 0)
++ return 0;
++
++ /* Check for Centaur Extended Feature Flags presence */
++ eax = 0xC0000000;
++ asm volatile ("cpuid"
++ : "+a"(eax) : : "rbx", "rcx", "rdx");
++ if (eax < 0xC0000001)
++ return 0;
++
++ /* Read the Centaur Extended Feature Flags */
++ eax = 0xC0000001;
++ asm volatile ("cpuid"
++ : "+a"(eax), "=d"(edx) : : "rbx", "rcx");
++
++ /* Fill up some flags */
++ padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6));
++ padlock_use_rng = ((edx & (0x3<<2)) == (0x3<<2));
++
++ return padlock_use_ace + padlock_use_rng;
++}
++
++/* Force key reload from memory to the CPU microcode.
++ Loading EFLAGS from the stack clears EFLAGS[30]
++ which does the trick. */
++static inline void
++padlock_reload_key(void)
++{
++ asm volatile ("pushfq; popfq");
++}
++
++#ifndef OPENSSL_NO_AES
++/*
++ * This is heuristic key context tracing. At first one
++ * believes that one should use atomic swap instructions,
++ * but it's not actually necessary. Point is that if
++ * padlock_saved_context was changed by another thread
++ * after we've read it and before we compare it with cdata,
++ * our key *shall* be reloaded upon thread context switch
++ * and we are therefore set in either case...
++ */
++static inline void
++padlock_verify_context(struct padlock_cipher_data *cdata)
++{
++ asm volatile (
++ "pushfq\n"
++" btl $30,(%%rsp)\n"
++" jnc 1f\n"
++" cmpq %2,%1\n"
++" je 1f\n"
++" popfq\n"
++" subq $8,%%rsp\n"
++"1: addq $8,%%rsp\n"
++" movq %2,%0"
++ :"+m"(padlock_saved_context)
++ : "r"(padlock_saved_context), "r"(cdata) : "cc");
++}
++
++/* Template for padlock_xcrypt_* modes */
++/* BIG FAT WARNING:
++ * The offsets used with 'leal' instructions
++ * describe items of the 'padlock_cipher_data'
++ * structure.
++ */
++#define PADLOCK_XCRYPT_ASM(name,rep_xcrypt) \
++static inline void *name(size_t cnt, \
++ struct padlock_cipher_data *cdata, \
++ void *out, const void *inp) \
++{ void *iv; \
++ asm volatile ( "leaq 16(%0),%%rdx\n" \
++ " leaq 32(%0),%%rbx\n" \
++ rep_xcrypt "\n" \
++ : "=a"(iv), "=c"(cnt), "=D"(out), "=S"(inp) \
++ : "0"(cdata), "1"(cnt), "2"(out), "3"(inp) \
++ : "rbx", "rdx", "cc", "memory"); \
++ return iv; \
++}
++#endif
++
++#endif /* cpu */
++
++#ifndef OPENSSL_NO_AES
+ /* Generate all functions with appropriate opcodes */
+ PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb, ".byte 0xf3,0x0f,0xa7,0xc8") /* rep xcryptecb */
+ PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc, ".byte 0xf3,0x0f,0xa7,0xd0") /* rep xcryptcbc */
+ PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb, ".byte 0xf3,0x0f,0xa7,0xe0") /* rep xcryptcfb */
+ PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb, ".byte 0xf3,0x0f,0xa7,0xe8") /* rep xcryptofb */
++
++/* Our own htonl()/ntohl() */
++static inline void
++padlock_bswapl(AES_KEY *ks)
++{
++ size_t i = sizeof(ks->rd_key)/sizeof(ks->rd_key[0]);
++ unsigned int *key = ks->rd_key;
++
++ while (i--) {
++ asm volatile ("bswapl %0" : "+r"(*key));
++ key++;
++ }
++}
+ #endif
+
+ /* The RNG call itself */
+@@ -491,8 +595,8 @@ padlock_xstore(void *addr, unsigned int edx_in)
+ static inline unsigned char *
+ padlock_memcpy(void *dst,const void *src,size_t n)
+ {
+- long *d=dst;
+- const long *s=src;
++ size_t *d=dst;
++ const size_t *s=src;
+
+ n /= sizeof(*d);
+ do { *d++ = *s++; } while (--n);
+--
+1.7.11.3
+