diff -up chromium-61.0.3163.79/base/numerics/safe_math_clang_gcc_impl.h.fixgccagain chromium-61.0.3163.79/base/numerics/safe_math_clang_gcc_impl.h --- chromium-61.0.3163.79/base/numerics/safe_math_clang_gcc_impl.h.fixgccagain 2017-09-06 15:48:27.560803028 -0400 +++ base/numerics/safe_math_clang_gcc_impl.h 2017-09-06 15:50:08.715853695 -0400 @@ -126,6 +126,7 @@ struct ClampedAddFastOp { } }; +#if defined(__clang__) // Not supported on GCC. // This is the fastest negation on Intel, and a decent fallback on arm. __attribute__((always_inline)) inline int8_t ClampedNegate(uint8_t value) { uint8_t carry; @@ -169,6 +170,7 @@ __attribute__((always_inline)) inline in __attribute__((always_inline)) inline int64_t ClampedNegate(int64_t value) { return ClampedNegate(static_cast(value)); } +#endif template struct ClampedSubFastOp { @@ -180,6 +182,7 @@ struct ClampedSubFastOp { return ClampedSubFastAsmOp::template Do(x, y); } +#if defined(__clang__) // Not supported on GCC. // Fast path for generic clamped negation. if (std::is_same::value && std::is_same::value && IsCompileTimeConstant(x) && x == 0 && !IsCompileTimeConstant(y)) { @@ -190,6 +193,7 @@ struct ClampedSubFastOp { IntegerBitsPlusSign::value, std::is_signed::value>::type>( y)); } +#endif V result; return !__builtin_sub_overflow(x, y, &result) diff -up chromium-61.0.3163.79/base/numerics/safe_math_shared_impl.h.fixgcc3 chromium-61.0.3163.79/base/numerics/safe_math_shared_impl.h --- base/numerics/safe_math_shared_impl.h.fixgcc3 2017-09-06 16:30:15.898454585 -0400 +++ base/numerics/safe_math_shared_impl.h 2017-09-06 16:30:27.570229539 -0400 @@ -21,8 +21,7 @@ #if !defined(__native_client__) && \ ((defined(__clang__) && \ ((__clang_major__ > 3) || \ - (__clang_major__ == 3 && __clang_minor__ >= 4))) || \ - (defined(__GNUC__) && __GNUC__ >= 5)) + (__clang_major__ == 3 && __clang_minor__ >= 4)))) #include "base/numerics/safe_math_clang_gcc_impl.h" #define BASE_HAS_OPTIMIZED_SAFE_MATH (1) #else