Skip to content

Commit

Permalink
Use checked arithmetic intrinsics instead of asm, when possible
Browse files Browse the repository at this point in the history
  • Loading branch information
hikari-no-yume committed Aug 9, 2016
1 parent f41b69e commit 0987737
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 3 deletions.
24 changes: 23 additions & 1 deletion Zend/zend_multiply.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,32 @@

/* $Id$ */

#include "zend_portability.h"

#ifndef ZEND_MULTIPLY_H
#define ZEND_MULTIPLY_H

#if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
#if __has_builtin(__builtin_smull_overflow) && SIZEOF_LONG == SIZEOF_ZEND_LONG

#define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
long __tmpvar; \
if (((usedval) = __builtin_smull_overflow((a), (b), &__tmpvar))) { \
(dval) = (double) (a) * (double) (b); \
} \
else (lval) = __tmpvar; \
} while (0)

#elif __has_builtin(__builtin_smulll_overflow) && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG

#define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
long long __tmpvar; \
if (((usedval) = __builtin_smulll_overflow((a), (b), &__tmpvar))) { \
(dval) = (double) (a) * (double) (b); \
} \
else (lval) = __tmpvar; \
} while (0)

#elif (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)

#define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
zend_long __tmpvar; \
Expand Down
33 changes: 31 additions & 2 deletions Zend/zend_operators.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
#include <ieeefp.h>
#endif

#include "zend_portability.h"
#include "zend_strtod.h"
#include "zend_multiply.h"

Expand Down Expand Up @@ -520,7 +521,21 @@ static zend_always_inline void fast_long_decrement_function(zval *op1)

static zend_always_inline void fast_long_add_function(zval *result, zval *op1, zval *op2)
{
#if defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__)
#if __has_builtin(__builtin_saddl_overflow) && SIZEOF_LONG == SIZEOF_ZEND_LONG
long lresult;
if (UNEXPECTED(__builtin_saddl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &lresult))) {
ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) + (double) Z_LVAL_P(op2));
} else {
ZVAL_LONG(result, lresult);
}
#elif __has_builtin(__builtin_saddll_overflow) && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG
long long llresult;
if (UNEXPECTED(__builtin_saddl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &llresult))) {
ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) + (double) Z_LVAL_P(op2));
} else {
ZVAL_LONG(result, llresult);
}
#elif defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__)
__asm__(
"movl (%1), %%eax\n\t"
"addl (%2), %%eax\n\t"
Expand Down Expand Up @@ -606,7 +621,21 @@ static zend_always_inline int fast_add_function(zval *result, zval *op1, zval *o

static zend_always_inline void fast_long_sub_function(zval *result, zval *op1, zval *op2)
{
#if defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__)
#if __has_builtin(__builtin_ssubl_overflow) && SIZEOF_LONG == SIZEOF_ZEND_LONG
long lresult;
if (UNEXPECTED(__builtin_ssubl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &lresult))) {
ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) - (double) Z_LVAL_P(op2));
} else {
ZVAL_LONG(result, lresult);
}
#elif __has_builtin(__builtin_ssubll_overflow) && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG
long long llresult;
if (UNEXPECTED(__builtin_ssubl_overflow(Z_LVAL_P(op1), Z_LVAL_P(op2), &llresult))) {
ZVAL_DOUBLE(result, (double) Z_LVAL_P(op1) - (double) Z_LVAL_P(op2));
} else {
ZVAL_LONG(result, llresult);
}
#elif defined(__GNUC__) && defined(__i386__) && !(4 == __GNUC__ && 8 == __GNUC_MINOR__)
__asm__(
"movl (%1), %%eax\n\t"
"subl (%2), %%eax\n\t"
Expand Down

0 comments on commit 0987737

Please sign in to comment.