summaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorNikolas Nyby <nikolas@gnu.org>2019-08-06 07:19:19 -0400
committerCharles Pigott <charlespigott@googlemail.com>2019-08-31 20:27:56 +0100
commit28e11623bd22fcf933350aae980b28008f113286 (patch)
treeaf363ff04c1e9784ed855561062625b646dc8ac0 /src/core
parent1be42c6cb88fca9de926a8b74c88fbfc98ef4090 (diff)
downloadopenttd-28e11623bd22fcf933350aae980b28008f113286.tar.xz
Codechange: math functions - use cpp-style casts
Diffstat (limited to 'src/core')
-rw-r--r--src/core/bitmath_func.hpp10
-rw-r--r--src/core/math_func.hpp10
2 files changed, 10 insertions, 10 deletions
diff --git a/src/core/bitmath_func.hpp b/src/core/bitmath_func.hpp
index fd05aa3f5..8fdc7100e 100644
--- a/src/core/bitmath_func.hpp
+++ b/src/core/bitmath_func.hpp
@@ -367,12 +367,12 @@ static inline T ROR(const T x, const uint8 n)
* (since it will use hardware swapping if available).
* Even though they should return uint16 and uint32, we get
* warnings if we don't cast those (why?) */
- #define BSWAP32(x) ((uint32)CFSwapInt32(x))
- #define BSWAP16(x) ((uint16)CFSwapInt16(x))
+# define BSWAP32(x) (static_cast<uint32>(CFSwapInt32(x)))
+# define BSWAP16(x) (static_cast<uint16>(CFSwapInt16(x)))
#elif defined(_MSC_VER)
/* MSVC has intrinsics for swapping, resulting in faster code */
- #define BSWAP32(x) (_byteswap_ulong(x))
- #define BSWAP16(x) (_byteswap_ushort(x))
+# define BSWAP32(x) (_byteswap_ulong(x))
+# define BSWAP16(x) (_byteswap_ushort(x))
#else
/**
* Perform a 32 bits endianness bitswap on x.
@@ -383,7 +383,7 @@ static inline T ROR(const T x, const uint8 n)
{
#if !defined(__ICC) && defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ >= 3))
/* GCC >= 4.3 provides a builtin, resulting in faster code */
- return (uint32)__builtin_bswap32((int32)x);
+ return static_cast<uint32>(__builtin_bswap32(static_cast<int32>(x)));
#else
return ((x >> 24) & 0xFF) | ((x >> 8) & 0xFF00) | ((x << 8) & 0xFF0000) | ((x << 24) & 0xFF000000);
#endif /* defined(__GNUC__) */
diff --git a/src/core/math_func.hpp b/src/core/math_func.hpp
index 0b51d6bbf..570f54c23 100644
--- a/src/core/math_func.hpp
+++ b/src/core/math_func.hpp
@@ -115,7 +115,7 @@ template <typename T>
static inline T *AlignPtr(T *x, uint n)
{
assert_compile(sizeof(size_t) == sizeof(void *));
- return (T *)Align((size_t)x, n);
+ return reinterpret_cast<T *>(Align((size_t)x, n));
}
/**
@@ -202,7 +202,7 @@ static inline uint ClampU(const uint a, const uint min, const uint max)
*/
static inline int32 ClampToI32(const int64 a)
{
- return (int32)Clamp<int64>(a, INT32_MIN, INT32_MAX);
+ return static_cast<int32>(Clamp<int64>(a, INT32_MIN, INT32_MAX));
}
/**
@@ -218,7 +218,7 @@ static inline uint16 ClampToU16(const uint64 a)
* match for min(uint64, uint) than uint64 min(uint64, uint64). As such we
* need to cast the UINT16_MAX to prevent MSVC from displaying its
* infinite loads of warnings. */
- return (uint16)min<uint64>(a, (uint64)UINT16_MAX);
+ return static_cast<uint16>(min<uint64>(a, static_cast<uint64>(UINT16_MAX)));
}
/**
@@ -339,10 +339,10 @@ static inline int RoundDivSU(int a, uint b)
{
if (a > 0) {
/* 0.5 is rounded to 1 */
- return (a + (int)b / 2) / (int)b;
+ return (a + static_cast<int>(b) / 2) / static_cast<int>(b);
} else {
/* -0.5 is rounded to 0 */
- return (a - ((int)b - 1) / 2) / (int)b;
+ return (a - (static_cast<int>(b) - 1) / 2) / static_cast<int>(b);
}
}