Skip to content

Commit

Permalink
Fixed some more compiler warnings
Browse files Browse the repository at this point in the history
git-svn-id: svn://scm.gforge.inria.fr/svnroot/ecm/trunk@1238 404564d9-a503-0410-82bf-e18ce2cf3989
  • Loading branch information
akruppa committed Apr 29, 2008
1 parent c89dc25 commit 957e84b
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 16 deletions.
2 changes: 2 additions & 0 deletions auxlib.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,10 @@
#include <stdint.h>
#else
/* size_t is an unsigned integer so this ought to work */
#ifndef SIZE_MAX
#define SIZE_MAX (~((size_t) 0))
#endif
#endif

#define VERBOSE __ECM(verbose)
static int VERBOSE = OUTPUT_NORMAL;
Expand Down
29 changes: 17 additions & 12 deletions longlong.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,14 +164,16 @@ MA 02110-1301, USA. */
/* clz_tab is required by mpn/alpha/cntlz.asm, and that file is built for
all alphas, even though ev67 and ev68 don't need it. */
#define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
#if defined (__GNUC__) && (HAVE_HOST_CPU_alphaev67 || HAVE_HOST_CPU_alphaev68)
#if defined (__GNUC__) && \
(defined(HAVE_HOST_CPU_alphaev67) && HAVE_HOST_CPU_alphaev67 || \
defined(HAVE_HOST_CPU_alphaev68) && HAVE_HOST_CPU_alphaev68)
#define count_leading_zeros(COUNT,X) \
__asm__("ctlz %1,%0" : "=r"(COUNT) : "r"(X))
#define count_trailing_zeros(COUNT,X) \
__asm__("cttz %1,%0" : "=r"(COUNT) : "r"(X))
#else /* ! (ev67 || ev68) */
#ifndef LONGLONG_STANDALONE
#if HAVE_ATTRIBUTE_CONST
#if defined(HAVE_ATTRIBUTE_CONST) && HAVE_ATTRIBUTE_CONST
long __MPN(count_leading_zeros) _PROTO ((UDItype)) __attribute__ ((const));
#else
long __MPN(count_leading_zeros) _PROTO ((UDItype));
Expand Down Expand Up @@ -610,7 +612,8 @@ extern UWtype __MPN(udiv_qrnnd) _PROTO ((UWtype *, UWtype, UWtype, UWtype));
is. The faster count_leading_zeros are pressed into service via the
generic count_trailing_zeros at the end of the file. */

#if HAVE_HOST_CPU_i586 || HAVE_HOST_CPU_pentium
#if defined(HAVE_HOST_CPU_i586) && HAVE_HOST_CPU_i586 || \
defined(HAVE_HOST_CPU_pentium) && HAVE_HOST_CPU_pentium

/* The following should be a fixed 14 cycles or so. Some scheduling
opportunities should be available between the float load/store too. This
Expand All @@ -632,7 +635,7 @@ extern UWtype __MPN(udiv_qrnnd) _PROTO ((UWtype *, UWtype, UWtype, UWtype));
#define COUNT_LEADING_ZEROS_0 (0x3FF + 31)

#else /* ! pentium */
#if HAVE_HOST_CPU_pentiummmx
#if defined(HAVE_HOST_CPU_pentiummmx) && HAVE_HOST_CPU_pentiummmx

/* The following should be a fixed 14 or 15 cycles, but possibly plus an L1
cache miss reading from __clz_tab. It's favoured over the float above so
Expand Down Expand Up @@ -677,11 +680,11 @@ extern UWtype __MPN(udiv_qrnnd) _PROTO ((UWtype *, UWtype, UWtype, UWtype));
cost of one extra instruction. Do this for "i386" too, since that means
generic x86. */
#if __GNUC__ < 3 \
&& (HAVE_HOST_CPU_i386 \
|| HAVE_HOST_CPU_i686 \
|| HAVE_HOST_CPU_pentiumpro \
|| HAVE_HOST_CPU_pentium2 \
|| HAVE_HOST_CPU_pentium3)
&& (defined(HAVE_HOST_CPU_i386) && HAVE_HOST_CPU_i386 \
|| defined(HAVE_HOST_CPU_i686) && HAVE_HOST_CPU_i686 \
|| defined(HAVE_HOST_CPU_pentiumpro) && HAVE_HOST_CPU_pentiumpro \
|| defined(HAVE_HOST_CPU_pentium2) && HAVE_HOST_CPU_pentium2 \
|| defined(HAVE_HOST_CPU_pentium3) && HAVE_HOST_CPU_pentium3)
#define count_leading_zeros(count, x) \
do { \
USItype __cbtmp; \
Expand Down Expand Up @@ -1246,7 +1249,7 @@ extern UWtype __MPN(udiv_qrnnd) _PROTO ((UWtype *, UWtype, UWtype, UWtype));
__asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
#define UMUL_TIME 5

#if HAVE_HOST_CPU_supersparc
#if defined(HAVE_HOST_CPU_supersparc) && HAVE_HOST_CPU_supersparc
#define UDIV_TIME 60 /* SuperSPARC timing */
#else
/* Don't use this on SuperSPARC because its udiv only handles 53 bit
Expand Down Expand Up @@ -1496,7 +1499,8 @@ extern UWtype __MPN(udiv_qrnnd) _PROTO ((UWtype *, UWtype, UWtype, UWtype));
/* Note the prototypes are under !define(umul_ppmm) etc too, since the HPPA
versions above are different and we don't want to conflict. */

#if ! defined (umul_ppmm) && HAVE_NATIVE_mpn_umul_ppmm
#if ! defined (umul_ppmm) && \
defined(HAVE_NATIVE_mpn_umul_ppmm) && HAVE_NATIVE_mpn_umul_ppmm
#define mpn_umul_ppmm __MPN(umul_ppmm)
extern mp_limb_t mpn_umul_ppmm _PROTO ((mp_limb_t *, mp_limb_t, mp_limb_t));
#define umul_ppmm(wh, wl, u, v) \
Expand All @@ -1508,7 +1512,8 @@ extern mp_limb_t mpn_umul_ppmm _PROTO ((mp_limb_t *, mp_limb_t, mp_limb_t));
} while (0)
#endif

#if ! defined (udiv_qrnnd) && HAVE_NATIVE_mpn_udiv_qrnnd
#if ! defined (udiv_qrnnd) && \
defined(HAVE_NATIVE_mpn_udiv_qrnnd) && HAVE_NATIVE_mpn_udiv_qrnnd
#define mpn_udiv_qrnnd __MPN(udiv_qrnnd)
extern mp_limb_t mpn_udiv_qrnnd _PROTO ((mp_limb_t *,
mp_limb_t, mp_limb_t, mp_limb_t));
Expand Down
8 changes: 4 additions & 4 deletions mul_fft.c
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,7 @@ MPN_FFT_STORE (mp_ptr dst, mp_size_t n, mp_limb_t d)
}
}
#else
void static inline
static inline void
MPN_FFT_STORE (mp_ptr dst, mp_size_t n, mp_limb_t d)
{
ASSERT(n >= 0);
Expand All @@ -387,7 +387,7 @@ MPN_FFT_STORE (mp_ptr dst, mp_size_t n, mp_limb_t d)
#endif

#if defined(__x86_64__) && defined(__GNUC__) && defined(OWN_MPN_FFT_COPY)
void static inline
static inline void
MPN_FFT_COPY (mp_ptr dst, const mp_srcptr src, mp_size_t n)
{
__asm__ __volatile__ ("rep movsq": "+c" (n), "+S" (src), "+D" (dst) :
Expand All @@ -398,14 +398,14 @@ MPN_FFT_COPY (mp_ptr dst, const mp_srcptr src, mp_size_t n)
FIXME: should "memory" go in the clobbered list? */
}
#elif defined(__i386__) && defined(__GNUC__) && defined(OWN_MPN_FFT_COPY)
void static inline
static inline void
MPN_FFT_COPY (mp_ptr dst, const mp_srcptr src, mp_size_t n)
{
__asm__ __volatile__ ("rep movsl" : "+c" (n), "+S" (src), "+D" (dst) :
"memory");
}
#elif defined(_MSC_VER) && !defined(_WIN64)
void static inline
static inline void
MPN_FFT_COPY (mp_ptr dst, const mp_srcptr src, mp_size_t n)
{
__asm
Expand Down

0 comments on commit 957e84b

Please sign in to comment.