mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-23 11:39:53 +00:00
include/qemu/host-utils: Remove unused code in the *_overflow wrappers
According to commit cec07c0b61
the code in the #else paths was required
for GCC < 5.0 and Clang < 3.8. We don't support such old compilers
at all anymore, so we can remove these lines now. We keep the wrapper
function, though, since they are easier to read and help to make sure that
the parameters have the right types.
Message-Id: <20220701025132.303469-1-thuth@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Thomas Huth <thuth@redhat.com>
This commit is contained in:
parent
1ec8c2c01e
commit
7a890b7566
@ -376,12 +376,7 @@ static inline uint64_t uabs64(int64_t v)
|
||||
*/
|
||||
static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5
|
||||
return __builtin_add_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x + y;
|
||||
return ((*ret ^ x) & ~(x ^ y)) < 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -394,12 +389,7 @@ static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
*/
|
||||
static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5
|
||||
return __builtin_add_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x + y;
|
||||
return ((*ret ^ x) & ~(x ^ y)) < 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -412,12 +402,7 @@ static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
*/
|
||||
static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5
|
||||
return __builtin_add_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x + y;
|
||||
return *ret < x;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -430,12 +415,7 @@ static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
*/
|
||||
static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5
|
||||
return __builtin_add_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x + y;
|
||||
return *ret < x;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -449,12 +429,7 @@ static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
|
||||
*/
|
||||
static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5
|
||||
return __builtin_sub_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x - y;
|
||||
return ((*ret ^ x) & (x ^ y)) < 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -468,12 +443,7 @@ static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
*/
|
||||
static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5
|
||||
return __builtin_sub_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x - y;
|
||||
return ((*ret ^ x) & (x ^ y)) < 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -487,12 +457,7 @@ static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
*/
|
||||
static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5
|
||||
return __builtin_sub_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x - y;
|
||||
return x < y;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -506,12 +471,7 @@ static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
*/
|
||||
static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5
|
||||
return __builtin_sub_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x - y;
|
||||
return x < y;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -524,13 +484,7 @@ static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
|
||||
*/
|
||||
static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5
|
||||
return __builtin_mul_overflow(x, y, ret);
|
||||
#else
|
||||
int64_t z = (int64_t)x * y;
|
||||
*ret = z;
|
||||
return *ret != z;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -543,14 +497,7 @@ static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
*/
|
||||
static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5
|
||||
return __builtin_mul_overflow(x, y, ret);
|
||||
#else
|
||||
uint64_t hi, lo;
|
||||
muls64(&lo, &hi, x, y);
|
||||
*ret = lo;
|
||||
return hi != ((int64_t)lo >> 63);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -563,13 +510,7 @@ static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
*/
|
||||
static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5
|
||||
return __builtin_mul_overflow(x, y, ret);
|
||||
#else
|
||||
uint64_t z = (uint64_t)x * y;
|
||||
*ret = z;
|
||||
return z > UINT32_MAX;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -582,13 +523,7 @@ static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
*/
|
||||
static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5
|
||||
return __builtin_mul_overflow(x, y, ret);
|
||||
#else
|
||||
uint64_t hi;
|
||||
mulu64(ret, &hi, x, y);
|
||||
return hi != 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user