aboutsummaryrefslogtreecommitdiffstats
path: root/main/openssl1.0/1002-backport-changes-from-upstream-padlock-module.patch
diff options
context:
space:
mode:
authorJakub Jirutka <jakub@jirutka.cz>2018-07-08 13:54:27 +0200
committerJakub Jirutka <jakub@jirutka.cz>2018-07-08 13:56:32 +0200
commit2fdc8bcc6549290b131675ef42d52243c37f3879 (patch)
treeb74487ad5a87f857fc24262fa82dce596d7913dc /main/openssl1.0/1002-backport-changes-from-upstream-padlock-module.patch
parent56c7a6d7971e769c3dbec1bf8a8ef3021e34d654 (diff)
downloadaports-2fdc8bcc6549290b131675ef42d52243c37f3879.tar.bz2
aports-2fdc8bcc6549290b131675ef42d52243c37f3879.tar.xz
main/openssl: rename to openssl1.0 and update dependent aports
Diffstat (limited to 'main/openssl1.0/1002-backport-changes-from-upstream-padlock-module.patch')
-rw-r--r--main/openssl1.0/1002-backport-changes-from-upstream-padlock-module.patch200
1 files changed, 200 insertions, 0 deletions
diff --git a/main/openssl1.0/1002-backport-changes-from-upstream-padlock-module.patch b/main/openssl1.0/1002-backport-changes-from-upstream-padlock-module.patch
new file mode 100644
index 0000000000..f63bbcd1ce
--- /dev/null
+++ b/main/openssl1.0/1002-backport-changes-from-upstream-padlock-module.patch
@@ -0,0 +1,200 @@
+From ba17588a940ee712c3ef6d458adb1087f0c84521 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Timo=20Ter=C3=A4s?= <timo.teras@iki.fi>
+Date: Thu, 5 Feb 2015 09:28:10 +0200
+Subject: [PATCH] backport changes from upstream padlock module.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Includes support for VIA Nano 64-bit mode.
+
+Signed-off-by: Timo Teräs <timo.teras@iki.fi>
+---
+ engines/e_padlock.c | 142 +++++++++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 125 insertions(+), 17 deletions(-)
+
+diff --git a/engines/e_padlock.c b/engines/e_padlock.c
+index 2898e4c..94406cb 100644
+--- a/engines/e_padlock.c
++++ b/engines/e_padlock.c
+@@ -101,7 +101,10 @@
+ */
+ # undef COMPILE_HW_PADLOCK
+ # if !defined(I386_ONLY) && !defined(OPENSSL_NO_INLINE_ASM)
+-# if (defined(__GNUC__) && (defined(__i386__) || defined(__i386))) || \
++# if (defined(__GNUC__) && __GNUC__>=2 && \
++ (defined(__i386__) || defined(__i386) || \
++ defined(__x86_64__) || defined(__x86_64)) \
++ ) || \
+ (defined(_MSC_VER) && defined(_M_IX86))
+ # define COMPILE_HW_PADLOCK
+ # endif
+@@ -303,6 +306,7 @@ static volatile struct padlock_cipher_data *padlock_saved_context;
+ * =======================================================
+ */
+ # if defined(__GNUC__) && __GNUC__>=2
++# if defined(__i386__) || defined(__i386)
+ /*
+ * As for excessive "push %ebx"/"pop %ebx" found all over.
+ * When generating position-independent code GCC won't let
+@@ -379,22 +383,6 @@ static int padlock_available(void)
+ return padlock_use_ace + padlock_use_rng;
+ }
+
+-# ifndef OPENSSL_NO_AES
+-# ifndef AES_ASM
+-/* Our own htonl()/ntohl() */
+-static inline void padlock_bswapl(AES_KEY *ks)
+-{
+- size_t i = sizeof(ks->rd_key) / sizeof(ks->rd_key[0]);
+- unsigned int *key = ks->rd_key;
+-
+- while (i--) {
+- asm volatile ("bswapl %0":"+r" (*key));
+- key++;
+- }
+-}
+-# endif
+-# endif
+-
+ /*
+ * Force key reload from memory to the CPU microcode. Loading EFLAGS from the
+ * stack clears EFLAGS[30] which does the trick.
+@@ -448,6 +436,110 @@ static inline void *name(size_t cnt, \
+ : "edx", "cc", "memory"); \
+ return iv; \
+ }
++#endif
++
++#elif defined(__x86_64__) || defined(__x86_64)
++
++/* Load supported features of the CPU to see if
++ the PadLock is available. */
++static int
++padlock_available(void)
++{
++ char vendor_string[16];
++ unsigned int eax, edx;
++
++ /* Are we running on the Centaur (VIA) CPU? */
++ eax = 0x00000000;
++ vendor_string[12] = 0;
++ asm volatile (
++ "cpuid\n"
++ "movl %%ebx,(%1)\n"
++ "movl %%edx,4(%1)\n"
++ "movl %%ecx,8(%1)\n"
++ : "+a"(eax) : "r"(vendor_string) : "rbx", "rcx", "rdx");
++ if (strcmp(vendor_string, "CentaurHauls") != 0)
++ return 0;
++
++ /* Check for Centaur Extended Feature Flags presence */
++ eax = 0xC0000000;
++ asm volatile ("cpuid"
++ : "+a"(eax) : : "rbx", "rcx", "rdx");
++ if (eax < 0xC0000001)
++ return 0;
++
++ /* Read the Centaur Extended Feature Flags */
++ eax = 0xC0000001;
++ asm volatile ("cpuid"
++ : "+a"(eax), "=d"(edx) : : "rbx", "rcx");
++
++ /* Fill up some flags */
++ padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6));
++ padlock_use_rng = ((edx & (0x3<<2)) == (0x3<<2));
++
++ return padlock_use_ace + padlock_use_rng;
++}
++
++/* Force key reload from memory to the CPU microcode.
++ Loading EFLAGS from the stack clears EFLAGS[30]
++ which does the trick. */
++static inline void
++padlock_reload_key(void)
++{
++ asm volatile ("pushfq; popfq");
++}
++
++#ifndef OPENSSL_NO_AES
++/*
++ * This is heuristic key context tracing. At first one
++ * believes that one should use atomic swap instructions,
++ * but it's not actually necessary. Point is that if
++ * padlock_saved_context was changed by another thread
++ * after we've read it and before we compare it with cdata,
++ * our key *shall* be reloaded upon thread context switch
++ * and we are therefore set in either case...
++ */
++static inline void
++padlock_verify_context(struct padlock_cipher_data *cdata)
++{
++ asm volatile (
++ "pushfq\n"
++" btl $30,(%%rsp)\n"
++" jnc 1f\n"
++" cmpq %2,%1\n"
++" je 1f\n"
++" popfq\n"
++" subq $8,%%rsp\n"
++"1: addq $8,%%rsp\n"
++" movq %2,%0"
++ :"+m"(padlock_saved_context)
++ : "r"(padlock_saved_context), "r"(cdata) : "cc");
++}
++
++/* Template for padlock_xcrypt_* modes */
++/* BIG FAT WARNING:
++ * The offsets used with 'leal' instructions
++ * describe items of the 'padlock_cipher_data'
++ * structure.
++ */
++#define PADLOCK_XCRYPT_ASM(name,rep_xcrypt) \
++static inline void *name(size_t cnt, \
++ struct padlock_cipher_data *cdata, \
++ void *out, const void *inp) \
++{ void *iv; \
++ asm volatile ( "leaq 16(%0),%%rdx\n" \
++ " leaq 32(%0),%%rbx\n" \
++ rep_xcrypt "\n" \
++ : "=a"(iv), "=c"(cnt), "=D"(out), "=S"(inp) \
++ : "0"(cdata), "1"(cnt), "2"(out), "3"(inp) \
++ : "rbx", "rdx", "cc", "memory"); \
++ return iv; \
++}
++#endif
++
++#endif /* cpu */
++
++
++# ifndef OPENSSL_NO_AES
+
+ /* Generate all functions with appropriate opcodes */
+ /* rep xcryptecb */
+@@ -458,7 +550,23 @@ PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb, ".byte 0xf3,0x0f,0xa7,0xc8")
+ PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb, ".byte 0xf3,0x0f,0xa7,0xe0")
+ /* rep xcryptofb */
+ PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb, ".byte 0xf3,0x0f,0xa7,0xe8")
++
++# ifndef AES_ASM
++/* Our own htonl()/ntohl() */
++static inline void padlock_bswapl(AES_KEY *ks)
++{
++ size_t i = sizeof(ks->rd_key) / sizeof(ks->rd_key[0]);
++ unsigned int *key = ks->rd_key;
++
++ while (i--) {
++ asm volatile ("bswapl %0":"+r" (*key));
++ key++;
++ }
++}
++# endif
++
+ # endif
++
+ /* The RNG call itself */
+ static inline unsigned int padlock_xstore(void *addr, unsigned int edx_in)
+ {
+--
+2.2.2
+