diff --git a/Code/Framework/AzCore/AzCore/Math/MathUtils.h b/Code/Framework/AzCore/AzCore/Math/MathUtils.h index bf7f87997c..5388764680 100644 --- a/Code/Framework/AzCore/AzCore/Math/MathUtils.h +++ b/Code/Framework/AzCore/AzCore/Math/MathUtils.h @@ -9,6 +9,7 @@ #pragma once #include +#include #include #include #include @@ -20,6 +21,7 @@ #include #include #include +#include // We have a separate inline define for math functions. // The performance of these functions is very sensitive to inlining, and some compilers don't deal well with this. @@ -256,13 +258,13 @@ namespace AZ struct ClampedIntegralLimits { //! If SourceType and ClampType are different, returns the greater value of - //! std::numeric_limits::lowest() and std::numeric_limits::lowest(), - //! otherwise returns std::numeric_limits::lowest(). + //! AZStd::numeric_limits::lowest() and AZStd::numeric_limits::lowest(), + //! otherwise returns AZStd::numeric_limits::lowest(). static constexpr SourceType Min(); //! If SourceType and ClampType are different, returns the lesser value of - //! std::numeric_limits::max() and std::numeric_limits::max(), - //! otherwise returns std::numeric_limits::max(). + //! AZStd::numeric_limits::max() and AZStd::numeric_limits::max(), + //! otherwise returns AZStd::numeric_limits::max(). static constexpr SourceType Max(); //! Safely clamps a value of type ValueType to the [Min(), Max()] range as determined by the @@ -375,12 +377,12 @@ namespace AZ //! Returns a value t where Lerp(a, b, t) == value (or 0 if a == b). inline float LerpInverse(float a, float b, float value) { - return IsClose(a, b, std::numeric_limits::epsilon()) ? 0.0f : (value - a) / (b - a); + return IsClose(a, b, AZStd::numeric_limits::epsilon()) ? 0.0f : (value - a) / (b - a); } inline double LerpInverse(double a, double b, double value) { - return IsClose(a, b, std::numeric_limits::epsilon()) ? 0.0 : (value - a) / (b - a); + return IsClose(a, b, AZStd::numeric_limits::epsilon()) ? 0.0 : (value - a) / (b - a); } //! Returns true if the number provided is even. @@ -431,19 +433,19 @@ namespace AZ AZ_MATH_INLINE float GetFloatQNaN() { - return std::numeric_limits::quiet_NaN(); + return AZStd::numeric_limits::quiet_NaN(); } //! IsCloseMag(x, y, epsilon) returns true if y and x are sufficiently close, taking magnitude of x and y into account in the epsilon template - AZ_MATH_INLINE bool IsCloseMag(T x, T y, T epsilonValue = std::numeric_limits::epsilon()) + AZ_MATH_INLINE bool IsCloseMag(T x, T y, T epsilonValue = AZStd::numeric_limits::epsilon()) { return (AZStd::abs(x - y) <= epsilonValue * GetMax(GetMax(T(1.0), AZStd::abs(x)), AZStd::abs(y))); } //! ClampIfCloseMag(x, y, epsilon) returns y when x and y are within epsilon of each other (taking magnitude into account). Otherwise returns x. template - AZ_MATH_INLINE T ClampIfCloseMag(T x, T y, T epsilonValue = std::numeric_limits::epsilon()) + AZ_MATH_INLINE T ClampIfCloseMag(T x, T y, T epsilonValue = AZStd::numeric_limits::epsilon()) { return IsCloseMag(x, y, epsilonValue) ? y : x; } @@ -461,6 +463,44 @@ namespace AZ return (azisfinite(x) != 0); } + //! Returns the value divided by alignment, where the result is rounded up if the remainder is non-zero. + //! Example: alignment: 4 + //! Value: 0 1 2 3 4 5 6 7 8 + //! Result: 0 1 1 1 1 2 2 2 2 + constexpr uint32_t DivideAndRoundUp(uint32_t value, uint32_t alignment) + { + AZ_Assert(alignment != 0, "0 is an invalid multiple to round to."); + AZ_Assert( + AZStd::numeric_limits::max() - value >= alignment, + "value '%" PRIu32 "' and alignment '%" PRIu32 "' will overflow when added together during DivideAndRoundUp.", value, alignment); + return (value + alignment - 1) / alignment; + } + + constexpr uint64_t DivideAndRoundUp(uint64_t value, uint64_t alignment) + { + AZ_Assert(alignment != 0, "0 is an invalid multiple to round to."); + AZ_Assert( + AZStd::numeric_limits::max() - value >= alignment, + "value '%" PRIu64 "' and alignment '%" PRIu64 "' will overflow when added together during DivideAndRoundUp.", value, alignment); + return (value + alignment - 1) / alignment; + } + + //! Returns the value rounded up to a multiple of alignment. + //! This function will work for non power of two alignments. + //! If your alignment is guaranteed to be a power of two, SizeAlignUp in base.h is a more efficient implementation. + //! Example: roundTo: 4 + //! Value: 0 1 2 3 4 5 6 7 8 + //! Result: 0 4 4 4 4 8 8 8 8 + constexpr uint32_t RoundUpToMultiple(uint32_t value, uint32_t alignment) + { + return DivideAndRoundUp(value, alignment) * alignment; + } + + constexpr uint64_t RoundUpToMultiple(uint64_t value, uint64_t alignment) + { + return DivideAndRoundUp(value, alignment) * alignment; + } + //! Returns the maximum value for SourceType as constrained by the numerical range of ClampType. template constexpr SourceType ClampedIntegralLimits::Min() @@ -474,8 +514,8 @@ namespace AZ { // Both SourceType and ClampType are signed, take the greater of the lower limits of each type return sizeof(SourceType) < sizeof(ClampType) ? - (std::numeric_limits::lowest)() : - static_cast((std::numeric_limits::lowest)()); + (AZStd::numeric_limits::lowest)() : + static_cast((AZStd::numeric_limits::lowest)()); } } @@ -486,12 +526,12 @@ namespace AZ if constexpr (sizeof(SourceType) < sizeof(ClampType)) { // If SourceType is narrower than ClampType, the upper limit will be SourceType's - return (std::numeric_limits::max)(); + return (AZStd::numeric_limits::max)(); } else if constexpr (sizeof(SourceType) > sizeof(ClampType)) { // If SourceType is wider than ClampType, the upper limit will be ClampType's - return static_cast((std::numeric_limits::max)()); + return static_cast((AZStd::numeric_limits::max)()); } else { @@ -499,13 +539,13 @@ namespace AZ { // SourceType and ClampType are the same width, ClampType is signed // so our upper limit will be ClampType - return static_cast((std::numeric_limits::max)()); + return static_cast((AZStd::numeric_limits::max)()); } else { // SourceType and ClampType are the same width, ClampType is unsigned // then our upper limit will be SourceType - return (std::numeric_limits::max)(); + return (AZStd::numeric_limits::max)(); } } } @@ -588,7 +628,7 @@ namespace AZ // LeftTypeSize <= RightTypeSize // LeftType is signed // RightType is unsigned - RightType max = static_cast((std::numeric_limits::max)()); + RightType max = static_cast((AZStd::numeric_limits::max)()); if (rhs > max) { @@ -604,7 +644,7 @@ namespace AZ // LeftType < RightType // LeftType is unsigned // RightType is signed - RightType max = static_cast((std::numeric_limits::max)()); + RightType max = static_cast((AZStd::numeric_limits::max)()); if (rhs < 0) { diff --git a/Code/Framework/AzCore/Tests/Math/MathUtilsTests.cpp b/Code/Framework/AzCore/Tests/Math/MathUtilsTests.cpp index 878e705e26..52f7220962 100644 --- a/Code/Framework/AzCore/Tests/Math/MathUtilsTests.cpp +++ b/Code/Framework/AzCore/Tests/Math/MathUtilsTests.cpp @@ -37,8 +37,8 @@ namespace UnitTest // min/max need to be substantially different to return a useful t value // Float - const float epsilonF = std::numeric_limits::epsilon(); - const float doesntMatterF = std::numeric_limits::signaling_NaN(); + const float epsilonF = AZStd::numeric_limits::epsilon(); + const float doesntMatterF = AZStd::numeric_limits::signaling_NaN(); float lowerF = 2.3f, upperF = 2.3f; EXPECT_EQ(0.0f, AZ::LerpInverse(lowerF, upperF, doesntMatterF)); EXPECT_EQ(0.0f, AZ::LerpInverse(0.0f, 0.5f * epsilonF, doesntMatterF)); @@ -48,8 +48,8 @@ namespace UnitTest EXPECT_NEAR(1.0f, AZ::LerpInverse(1.0f, 1.0f + 5.0f * epsilonF, 1.0f + 5.0f * epsilonF), epsilonF); // Double - const double epsilonD = std::numeric_limits::epsilon(); - const double doesntMatterD = std::numeric_limits::signaling_NaN(); + const double epsilonD = AZStd::numeric_limits::epsilon(); + const double doesntMatterD = AZStd::numeric_limits::signaling_NaN(); double lowerD = 2.3, upperD = 2.3; EXPECT_EQ(0.0, AZ::LerpInverse(lowerD, upperD, doesntMatterD)); EXPECT_EQ(0.0, AZ::LerpInverse(0.0, 0.5 * epsilonD, doesntMatterD)); @@ -58,4 +58,128 @@ namespace UnitTest EXPECT_NEAR(0.6, AZ::LerpInverse(1.0, 1.0 + 5.0 * epsilonD, 1.0 + 3.0 * epsilonD), epsilonD); EXPECT_NEAR(1.0, AZ::LerpInverse(1.0, 1.0 + 5.0 * epsilonD, 1.0 + 5.0 * epsilonD), epsilonD); } + + template + void TestRoundUpToMultipleIsCorrect() + { + // Example: alignment: 4 + // inputValue: 0 1 2 3 4 5 6 7 8 ... + // expectedOutput: 0 4 4 4 4 8 8 8 8 ... + EXPECT_EQ(RoundUpToMultiple(static_cast(0) , static_cast(1)) , 0); + EXPECT_EQ(RoundUpToMultiple(static_cast(1) , static_cast(1)) , 1); + EXPECT_EQ(RoundUpToMultiple(static_cast(2) , static_cast(1)) , 2); + + EXPECT_EQ(RoundUpToMultiple(static_cast(0) , static_cast(2)) , 0); + EXPECT_EQ(RoundUpToMultiple(static_cast(1) , static_cast(2)) , 2); + EXPECT_EQ(RoundUpToMultiple(static_cast(2) , static_cast(2)) , 2); + EXPECT_EQ(RoundUpToMultiple(static_cast(3) , static_cast(2)) , 4); + EXPECT_EQ(RoundUpToMultiple(static_cast(4) , static_cast(2)) , 4); + EXPECT_EQ(RoundUpToMultiple(static_cast(5) , static_cast(2)) , 6); + + EXPECT_EQ(RoundUpToMultiple(static_cast(0) , static_cast(8)) , 0); + EXPECT_EQ(RoundUpToMultiple(static_cast(1) , static_cast(8)) , 8); + EXPECT_EQ(RoundUpToMultiple(static_cast(7) , static_cast(8)) , 8); + EXPECT_EQ(RoundUpToMultiple(static_cast(8) , static_cast(8)) , 8); + EXPECT_EQ(RoundUpToMultiple(static_cast(9) , static_cast(8)) , 16); + EXPECT_EQ(RoundUpToMultiple(static_cast(15), static_cast(8)) , 16); + EXPECT_EQ(RoundUpToMultiple(static_cast(16), static_cast(8)) , 16); + EXPECT_EQ(RoundUpToMultiple(static_cast(17), static_cast(8)) , 24); + + EXPECT_EQ(RoundUpToMultiple(static_cast(0) , static_cast(13)), 0); + EXPECT_EQ(RoundUpToMultiple(static_cast(1) , static_cast(13)), 13); + EXPECT_EQ(RoundUpToMultiple(static_cast(9) , static_cast(13)), 13); + EXPECT_EQ(RoundUpToMultiple(static_cast(12), static_cast(13)), 13); + EXPECT_EQ(RoundUpToMultiple(static_cast(13), static_cast(13)), 13); + EXPECT_EQ(RoundUpToMultiple(static_cast(14), static_cast(13)), 26); + EXPECT_EQ(RoundUpToMultiple(static_cast(25), static_cast(13)), 26); + EXPECT_EQ(RoundUpToMultiple(static_cast(26), static_cast(13)), 26); + EXPECT_EQ(RoundUpToMultiple(static_cast(27), static_cast(13)), 39); + + EXPECT_EQ(RoundUpToMultiple(static_cast(0), AZStd::numeric_limits::max()), 0); + + T aVeryLargeNumberThatStillWontOverflow = AZStd::numeric_limits::max() - 4; + EXPECT_EQ(RoundUpToMultiple(static_cast(1), aVeryLargeNumberThatStillWontOverflow), aVeryLargeNumberThatStillWontOverflow); + EXPECT_EQ(RoundUpToMultiple(aVeryLargeNumberThatStillWontOverflow, static_cast(1)), aVeryLargeNumberThatStillWontOverflow); + } + + TEST(RoundUpToMultipleTest, RoundUpToMultipleUInt32_ValidInput_IsCorrect) + { + TestRoundUpToMultipleIsCorrect(); + } + + TEST(RoundUpToMultipleTest, RoundUpToMultipleUInt64_ValidInput_IsCorrect) + { + TestRoundUpToMultipleIsCorrect(); + } + + template + void TestDivideAndRoundUpIsCorrect() + { + //! Example: alignment: 3 + //! Value: 0 1 2 3 4 5 6 7 8 + //! Result: 0 1 1 1 2 2 2 3 3 + EXPECT_EQ(DivideAndRoundUp(static_cast(0), static_cast(3)), 0); + EXPECT_EQ(DivideAndRoundUp(static_cast(1), static_cast(3)), 1); + EXPECT_EQ(DivideAndRoundUp(static_cast(2), static_cast(3)), 1); + EXPECT_EQ(DivideAndRoundUp(static_cast(3), static_cast(3)), 1); + EXPECT_EQ(DivideAndRoundUp(static_cast(4), static_cast(3)), 2); + EXPECT_EQ(DivideAndRoundUp(static_cast(5), static_cast(3)), 2); + EXPECT_EQ(DivideAndRoundUp(static_cast(6), static_cast(3)), 2); + EXPECT_EQ(DivideAndRoundUp(static_cast(7), static_cast(3)), 3); + EXPECT_EQ(DivideAndRoundUp(static_cast(8), static_cast(3)), 3); + + EXPECT_EQ(DivideAndRoundUp(static_cast(0), AZStd::numeric_limits::max()), 0); + + T aVeryLargeNumberThatStillWontOverflow = AZStd::numeric_limits::max() - 4; + EXPECT_EQ(DivideAndRoundUp(static_cast(1), aVeryLargeNumberThatStillWontOverflow), static_cast(1)); + EXPECT_EQ(DivideAndRoundUp(aVeryLargeNumberThatStillWontOverflow, static_cast(1)), aVeryLargeNumberThatStillWontOverflow); + + } + + TEST(DivideAndRoundUpTest, DivideAndRoundUpUInt32_ValidInput_IsCorrect) + { + TestDivideAndRoundUpIsCorrect(); + } + + TEST(DivideAndRoundUpTest, DivideAndRoundUpUInt64_ValidInput_IsCorrect) + { + TestDivideAndRoundUpIsCorrect(); + } + + + class RoundUpInvalidInputTestsFixture : public ScopedAllocatorSetupFixture + { + }; + + TEST_F(RoundUpInvalidInputTestsFixture, DividAndRoundUp_AlignmentZeroUint32_Assert) + { + AZ_TEST_START_TRACE_SUPPRESSION; + DivideAndRoundUp(static_cast(0), static_cast(0)); + AZ_TEST_STOP_TRACE_SUPPRESSION(1); + } + + TEST_F(RoundUpInvalidInputTestsFixture, DividAndRoundUp_AlignmentZeroUint64_Assert) + { + AZ_TEST_START_TRACE_SUPPRESSION; + DivideAndRoundUp(static_cast(0), static_cast(0)); + AZ_TEST_STOP_TRACE_SUPPRESSION(1); + } + + TEST_F(RoundUpInvalidInputTestsFixture, DividAndRoundUp_OverflowUint32_Assert) + { + AZ_TEST_START_TRACE_SUPPRESSION; + DivideAndRoundUp( + static_cast((AZStd::numeric_limits::max() / 2) + 1), + static_cast((AZStd::numeric_limits::max() / 2) + 1)); + AZ_TEST_STOP_TRACE_SUPPRESSION(1); + } + + TEST_F(RoundUpInvalidInputTestsFixture, DividAndRoundUp_OverflowUint64_Assert) + { + AZ_TEST_START_TRACE_SUPPRESSION; + DivideAndRoundUp( + static_cast((AZStd::numeric_limits::max() / 2) + 1), + static_cast((AZStd::numeric_limits::max() / 2) + 1)); + AZ_TEST_STOP_TRACE_SUPPRESSION(1); + } } diff --git a/Code/Framework/AzNetworking/AzNetworking/UdpTransport/UdpNetworkInterface.cpp b/Code/Framework/AzNetworking/AzNetworking/UdpTransport/UdpNetworkInterface.cpp index af279e93f1..9454e29d41 100644 --- a/Code/Framework/AzNetworking/AzNetworking/UdpTransport/UdpNetworkInterface.cpp +++ b/Code/Framework/AzNetworking/AzNetworking/UdpTransport/UdpNetworkInterface.cpp @@ -17,6 +17,7 @@ #include #include #include +#include namespace AzNetworking { @@ -539,7 +540,7 @@ namespace AzNetworking // Each fragmented packet we send adds an extra fragmented packet header, need to deduct that from our chunk size, otherwise we infinitely loop // SSL encryption can also inflate our payload so we pre-emptively deduct an estimated tax const uint32_t chunkSize = connection.GetConnectionMtu() - net_FragmentedHeaderOverhead - net_SslInflationOverhead; - const uint32_t numChunks = (packetSize + chunkSize - 1) / chunkSize; // We want to round up on the remainder + const uint32_t numChunks = AZ::DivideAndRoundUp(packetSize, chunkSize); // We want to round up on the remainder const uint8_t* chunkStart = packetData; const SequenceId fragmentedSequence = connection.m_fragmentQueue.GetNextFragmentedSequenceId(); uint32_t bytesRemaining = packetSize; diff --git a/Code/Framework/GridMate/GridMate/Carrier/SocketDriver.cpp b/Code/Framework/GridMate/GridMate/Carrier/SocketDriver.cpp index 38ca534fb8..b07e6fa91d 100644 --- a/Code/Framework/GridMate/GridMate/Carrier/SocketDriver.cpp +++ b/Code/Framework/GridMate/GridMate/Carrier/SocketDriver.cpp @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -1951,7 +1952,7 @@ namespace GridMate char *SocketDriverCommon::RIOPlatformSocketDriver::AllocRIOBuffer(AZ::u64 bufferSize, AZ::u64 numBuffers, AZ::u64* amountAllocated /*=nullptr*/) { // calculate how much memory we are really asking for, and this must be page aligned. - AZ::u64 totalBufferSize = RoundUp(bufferSize * numBuffers, m_pageSize); + AZ::u64 totalBufferSize = AZ::RoundUpToMultiple(bufferSize * numBuffers, m_pageSize); if (amountAllocated != nullptr) { diff --git a/Code/Framework/GridMate/GridMate/Carrier/SocketDriver.h b/Code/Framework/GridMate/GridMate/Carrier/SocketDriver.h index eb4130a477..de47eaf290 100644 --- a/Code/Framework/GridMate/GridMate/Carrier/SocketDriver.h +++ b/Code/Framework/GridMate/GridMate/Carrier/SocketDriver.h @@ -221,18 +221,6 @@ namespace GridMate void StopWaitForData() override; private: - AZ::u64 RoundUpAndDivide(AZ::u64 Value, AZ::u64 RoundTo) const - { - return ((Value + RoundTo - 1) / RoundTo); - } - AZ::u64 RoundUp(AZ::u64 Value, AZ::u64 RoundTo) const - { - // rounds value up to multiple of RoundTo - // Example: RoundTo: 4 - // Value: 0 1 2 3 4 5 6 7 8 - // Result: 0 4 4 4 4 8 8 8 8 - return RoundUpAndDivide(Value, RoundTo) * RoundTo; - } char *AllocRIOBuffer(AZ::u64 bufferSize, AZ::u64 numBuffers, AZ::u64* amountAllocated=nullptr); bool FreeRIOBuffer(char *buffer); diff --git a/Gems/Atom/Asset/ImageProcessingAtom/Code/Source/Compressors/ASTCCompressor.cpp b/Gems/Atom/Asset/ImageProcessingAtom/Code/Source/Compressors/ASTCCompressor.cpp index 91829bb5be..cf01fa87ed 100644 --- a/Gems/Atom/Asset/ImageProcessingAtom/Code/Source/Compressors/ASTCCompressor.cpp +++ b/Gems/Atom/Asset/ImageProcessingAtom/Code/Source/Compressors/ASTCCompressor.cpp @@ -179,7 +179,7 @@ namespace ImageProcessingAtom // Create a context based on the configuration astcenc_context* context; - AZ::u32 blockCount = ((srcImage->GetWidth(0)+ dstFormatInfo->blockWidth-1)/dstFormatInfo->blockWidth) * ((srcImage->GetHeight(0) + dstFormatInfo->blockHeight-1)/dstFormatInfo->blockHeight); + AZ::u32 blockCount = AZ::DivideAndRoundUp(srcImage->GetWidth(0), dstFormatInfo->blockWidth) * AZ::DivideAndRoundUp(srcImage->GetHeight(0), dstFormatInfo->blockHeight); AZ::u32 threadCount = AZStd::min(AZStd::thread::hardware_concurrency()/2, blockCount); status = astcenc_context_alloc(&config, threadCount, &context); AZ_Assert( status == ASTCENC_SUCCESS, "ERROR: Codec context alloc failed: %s\n", astcenc_get_error_string(status)); diff --git a/Gems/Atom/Asset/ImageProcessingAtom/Code/Source/Processing/PixelFormatInfo.cpp b/Gems/Atom/Asset/ImageProcessingAtom/Code/Source/Processing/PixelFormatInfo.cpp index a413a29862..692a0ab28a 100644 --- a/Gems/Atom/Asset/ImageProcessingAtom/Code/Source/Processing/PixelFormatInfo.cpp +++ b/Gems/Atom/Asset/ImageProcessingAtom/Code/Source/Processing/PixelFormatInfo.cpp @@ -10,6 +10,8 @@ #include #include +#include + namespace ImageProcessingAtom { CPixelFormats* CPixelFormats::s_instance = nullptr; @@ -370,11 +372,11 @@ namespace ImageProcessingAtom { if (outWidth % pFormatInfo->blockWidth != 0) { - outWidth = ((outWidth + pFormatInfo->blockWidth - 1) / pFormatInfo->blockWidth) * pFormatInfo->blockWidth; + outWidth = AZ::RoundUpToMultiple(outWidth, pFormatInfo->blockWidth); } if (outHeight % pFormatInfo->blockHeight != 0) { - outHeight = ((outHeight + pFormatInfo->blockHeight - 1) / pFormatInfo->blockHeight) * pFormatInfo->blockHeight; + outHeight = AZ::RoundUpToMultiple(outHeight, pFormatInfo->blockHeight); } } } @@ -390,10 +392,11 @@ namespace ImageProcessingAtom return 0; } - // get number of blocks (ceiling round up for block count) and multiply with bits per block. Divided by 8 to get + // get number of blocks and multiply with bits per block. Divided by 8 to get // final byte size - return (((imageWidth + pFormatInfo->blockWidth - 1) / pFormatInfo->blockWidth) * - ((imageHeight + pFormatInfo->blockHeight - 1) / pFormatInfo->blockHeight) * pFormatInfo->bitsPerBlock) / 8; + return (AZ::DivideAndRoundUp(imageWidth, pFormatInfo->blockWidth) * + AZ::DivideAndRoundUp(imageHeight, pFormatInfo->blockHeight) * + pFormatInfo->bitsPerBlock) / 8; } bool CPixelFormats::IsFormatSingleChannel(EPixelFormat fmt) diff --git a/Gems/Atom/RHI/Code/Include/Atom/RHI.Reflect/Bits.h b/Gems/Atom/RHI/Code/Include/Atom/RHI.Reflect/Bits.h index c3972c829e..9f19027083 100644 --- a/Gems/Atom/RHI/Code/Include/Atom/RHI.Reflect/Bits.h +++ b/Gems/Atom/RHI/Code/Include/Atom/RHI.Reflect/Bits.h @@ -8,6 +8,7 @@ #pragma once #include +#include #ifndef AZ_BIT #define AZ_BIT(x) (1u << x) @@ -237,6 +238,7 @@ namespace AZ } /** + * O3DE_DEPRECATION_NOTICE(GHI-7407) * Returns the value divided by alignment, where the result is rounded up if the remainder is non-zero. */ template inline T DivideByMultiple(T value, size_t alignment) diff --git a/Gems/Atom/RHI/Code/Include/Atom/RHI/ConstantsData.h b/Gems/Atom/RHI/Code/Include/Atom/RHI/ConstantsData.h index 04aa97a189..d9eafb09e6 100644 --- a/Gems/Atom/RHI/Code/Include/Atom/RHI/ConstantsData.h +++ b/Gems/Atom/RHI/Code/Include/Atom/RHI/ConstantsData.h @@ -228,7 +228,7 @@ namespace AZ { AZStd::span constantBytes = GetConstantRaw(inputIndex); const size_t elementSize = sizeof(T); - const size_t elementCount = DivideByMultiple(constantBytes.size(), elementSize); + const size_t elementCount = AZ::DivideAndRoundUp(constantBytes.size(), elementSize); const size_t sizeInBytes = elementCount * elementSize; if (ValidateConstantAccess(inputIndex, ValidateConstantAccessExpect::Complete, 0, sizeInBytes)) { diff --git a/Gems/Atom/RHI/Code/Include/Atom/RHI/DispatchItem.h b/Gems/Atom/RHI/Code/Include/Atom/RHI/DispatchItem.h index 72f190dcc8..c8e285acf5 100644 --- a/Gems/Atom/RHI/Code/Include/Atom/RHI/DispatchItem.h +++ b/Gems/Atom/RHI/Code/Include/Atom/RHI/DispatchItem.h @@ -41,17 +41,17 @@ namespace AZ uint16_t GetNumberOfGroupsX() const { - return aznumeric_cast((m_totalNumberOfThreadsX + m_threadsPerGroupX - 1) / m_threadsPerGroupX); + return aznumeric_cast(DivideAndRoundUp(m_totalNumberOfThreadsX, aznumeric_caster(m_threadsPerGroupX))); } uint16_t GetNumberOfGroupsY() const { - return aznumeric_cast((m_totalNumberOfThreadsY + m_threadsPerGroupY - 1) / m_threadsPerGroupY); + return aznumeric_cast(DivideAndRoundUp(m_totalNumberOfThreadsY, aznumeric_caster(m_threadsPerGroupY))); } uint16_t GetNumberOfGroupsZ() const { - return aznumeric_cast((m_totalNumberOfThreadsZ + m_threadsPerGroupZ - 1) / m_threadsPerGroupZ); + return aznumeric_cast(DivideAndRoundUp(m_totalNumberOfThreadsZ, aznumeric_caster(m_threadsPerGroupZ))); } // Different platforms require number of groups or number of threads or both in their Dispatch() call diff --git a/Gems/Atom/RHI/Code/Source/RHI.Reflect/ImageSubresource.cpp b/Gems/Atom/RHI/Code/Source/RHI.Reflect/ImageSubresource.cpp index 7b66af2640..f6339e43bf 100644 --- a/Gems/Atom/RHI/Code/Source/RHI.Reflect/ImageSubresource.cpp +++ b/Gems/Atom/RHI/Code/Source/RHI.Reflect/ImageSubresource.cpp @@ -358,8 +358,7 @@ namespace AZ } else { - const uint32_t bitsPerPixel = RHI::GetFormatSize(imageFormat) * 8; - subresourceLayout.m_bytesPerRow = (imageSize.m_width * bitsPerPixel + 7) / 8; // round up to nearest byte + subresourceLayout.m_bytesPerRow = imageSize.m_width * RHI::GetFormatSize(imageFormat); subresourceLayout.m_rowCount = imageSize.m_height; subresourceLayout.m_size.m_width = imageSize.m_width; subresourceLayout.m_size.m_height = imageSize.m_height; diff --git a/Gems/Atom/RHI/Code/Source/RHI/DrawList.cpp b/Gems/Atom/RHI/Code/Source/RHI/DrawList.cpp index 3fa6ea5010..ef8c8a794e 100644 --- a/Gems/Atom/RHI/Code/Source/RHI/DrawList.cpp +++ b/Gems/Atom/RHI/Code/Source/RHI/DrawList.cpp @@ -20,7 +20,7 @@ namespace AZ return DrawListView{}; } - const size_t itemsPerPartition = DivideByMultiple(drawList.size(), partitionCount); + const size_t itemsPerPartition = AZ::DivideAndRoundUp(drawList.size(), partitionCount); const size_t itemOffset = partitionIndex * itemsPerPartition; const size_t itemCount = AZStd::min(drawList.size() - itemOffset, itemsPerPartition); return DrawListView(&drawList[itemOffset], itemCount); diff --git a/Gems/Atom/RHI/Code/Source/RHI/FrameScheduler.cpp b/Gems/Atom/RHI/Code/Source/RHI/FrameScheduler.cpp index 716582b3ab..945f3db6d7 100644 --- a/Gems/Atom/RHI/Code/Source/RHI/FrameScheduler.cpp +++ b/Gems/Atom/RHI/Code/Source/RHI/FrameScheduler.cpp @@ -281,7 +281,7 @@ namespace AZ { srgPool->CompileGroupsBegin(); const uint32_t compilesInPool = srgPool->GetGroupsToCompileCount(); - const uint32_t jobCount = DivideByMultiple(compilesInPool, compilesPerJob); + const uint32_t jobCount = AZ::DivideAndRoundUp(compilesInPool, compilesPerJob); AZ::TaskDescriptor srgCompileDesc{"SrgCompile", "Graphics"}; AZ::TaskDescriptor srgCompileEndDesc{"SrgCompileEnd", "Graphics"}; @@ -332,7 +332,7 @@ namespace AZ const auto compileIntervalsFunction = [compilesPerJob, &jobCompletion](ShaderResourceGroupPool* srgPool) { const uint32_t compilesInPool = srgPool->GetGroupsToCompileCount(); - const uint32_t jobCount = DivideByMultiple(compilesInPool, compilesPerJob); + const uint32_t jobCount = AZ::DivideAndRoundUp(compilesInPool, compilesPerJob); for (uint32_t i = 0; i < jobCount; ++i) { diff --git a/Gems/Atom/RHI/DX12/Code/Source/RHI/FrameGraphExecuter.cpp b/Gems/Atom/RHI/DX12/Code/Source/RHI/FrameGraphExecuter.cpp index 2484890fea..1af19b6431 100644 --- a/Gems/Atom/RHI/DX12/Code/Source/RHI/FrameGraphExecuter.cpp +++ b/Gems/Atom/RHI/DX12/Code/Source/RHI/FrameGraphExecuter.cpp @@ -92,7 +92,7 @@ namespace AZ const uint32_t CommandListCostThreshold = AZStd::max( m_frameGraphExecuterData.m_commandListCostThresholdMin, - RHI::DivideByMultiple(estimatedItemCount, m_frameGraphExecuterData.m_commandListsPerScopeMax)); + AZ::DivideAndRoundUp(estimatedItemCount, m_frameGraphExecuterData.m_commandListsPerScopeMax)); /** * Computes a cost heuristic based on the number of items and number of attachments in @@ -145,7 +145,7 @@ namespace AZ else { // And then create a new group for the current scope with dedicated [1, N] command lists. - const uint32_t commandListCount = AZStd::max(RHI::DivideByMultiple(totalScopeCost, CommandListCostThreshold), 1u); + const uint32_t commandListCount = AZStd::max(AZ::DivideAndRoundUp(totalScopeCost, CommandListCostThreshold), 1u); FrameGraphExecuteGroup* scopeContextGroup = AddGroup(); scopeContextGroup->Init(device, scope, commandListCount, GetJobPolicy()); diff --git a/Gems/Atom/RHI/Vulkan/Code/Source/RHI/FrameGraphExecuter.cpp b/Gems/Atom/RHI/Vulkan/Code/Source/RHI/FrameGraphExecuter.cpp index 3b195d1dc6..1cb4b2474d 100644 --- a/Gems/Atom/RHI/Vulkan/Code/Source/RHI/FrameGraphExecuter.cpp +++ b/Gems/Atom/RHI/Vulkan/Code/Source/RHI/FrameGraphExecuter.cpp @@ -81,7 +81,7 @@ namespace AZ const uint32_t CommandListCostThreshold = AZStd::max( m_frameGraphExecuterData.m_commandListCostThresholdMin, - RHI::DivideByMultiple(estimatedItemCount, m_frameGraphExecuterData.m_commandListsPerScopeMax)); + AZ::DivideAndRoundUp(estimatedItemCount, m_frameGraphExecuterData.m_commandListsPerScopeMax)); /** * Computes a cost heuristic based on the number of items and number of attachments in @@ -136,7 +136,7 @@ namespace AZ else { // And then create a new group for the current scope with dedicated [1, N] command lists. - const uint32_t commandListCount = AZStd::max(RHI::DivideByMultiple(totalScopeCost, CommandListCostThreshold), 1u); + const uint32_t commandListCount = AZStd::max(AZ::DivideAndRoundUp(totalScopeCost, CommandListCostThreshold), 1u); FrameGraphExecuteGroup* scopeContextGroup = AddGroup(); scopeContextGroup->Init(device, scope, commandListCount, GetJobPolicy()); diff --git a/Gems/Vegetation/Code/Source/Debugger/DebugComponent.cpp b/Gems/Vegetation/Code/Source/Debugger/DebugComponent.cpp index 0bb8c57c44..5cec3bf87b 100644 --- a/Gems/Vegetation/Code/Source/Debugger/DebugComponent.cpp +++ b/Gems/Vegetation/Code/Source/Debugger/DebugComponent.cpp @@ -334,16 +334,11 @@ void DebugComponent::FillSectorEnd([[maybe_unused]] int sectorX, [[maybe_unused] namespace DebugComponentUtilities { - constexpr uint32 RoundUpAndDivide(uint32 value, uint32 divide) - { - return (value + divide - 1) / divide; - } - template union LocalAliasingUnion { ValueType aliasedValue; - AZStd::size_t aliasedValueArray[RoundUpAndDivide(sizeof(ValueType), sizeof(AZStd::size_t))] = {}; + AZStd::size_t aliasedValueArray[AZ::DivideAndRoundUp(sizeof(ValueType), sizeof(AZStd::size_t))] = {}; }; template