diff --git a/src/ImageSharp/Processing/Processors/Transforms/Resize/ResizeKernel.cs b/src/ImageSharp/Processing/Processors/Transforms/Resize/ResizeKernel.cs
index 6c1f7217a7..e94c6cd37b 100644
--- a/src/ImageSharp/Processing/Processors/Transforms/Resize/ResizeKernel.cs
+++ b/src/ImageSharp/Processing/Processors/Transforms/Resize/ResizeKernel.cs
@@ -37,7 +37,7 @@ internal readonly unsafe struct ResizeKernel
///
/// Gets a value indicating whether vectorization is supported.
///
- public static bool SupportsVectorization
+ public static bool IsHardwareAccelerated
{
[MethodImpl(MethodImplOptions.AggressiveInlining)]
get => Vector256.IsHardwareAccelerated;
@@ -92,7 +92,7 @@ internal readonly unsafe struct ResizeKernel
[MethodImpl(InliningOptions.ShortMethod)]
public Vector4 ConvolveCore(ref Vector4 rowStartRef)
{
- if (SupportsVectorization)
+ if (IsHardwareAccelerated)
{
if (Vector512.IsHardwareAccelerated)
{
@@ -106,8 +106,8 @@ internal readonly unsafe struct ResizeKernel
Vector512 pixels512_0 = Unsafe.As>(ref rowStartRef);
Vector512 pixels512_1 = Unsafe.As>(ref Unsafe.Add(ref rowStartRef, (nuint)4));
- result512_0 = Vector512Utilities.MultiplyAddEstimate(Vector512.Load(bufferStart), pixels512_0, result512_0);
- result512_1 = Vector512Utilities.MultiplyAddEstimate(Vector512.Load(bufferStart + 16), pixels512_1, result512_1);
+ result512_0 = Vector512_.MultiplyAdd(result512_0, Vector512.Load(bufferStart), pixels512_0);
+ result512_1 = Vector512_.MultiplyAdd(result512_1, Vector512.Load(bufferStart + 16), pixels512_1);
bufferStart += 32;
rowStartRef = ref Unsafe.Add(ref rowStartRef, (nuint)8);
@@ -118,7 +118,7 @@ internal readonly unsafe struct ResizeKernel
if ((this.Length & 7) >= 4)
{
Vector512 pixels512_0 = Unsafe.As>(ref rowStartRef);
- result512_0 = Vector512Utilities.MultiplyAddEstimate(Vector512.Load(bufferStart), pixels512_0, result512_0);
+ result512_0 = Vector512_.MultiplyAdd(result512_0, Vector512.Load(bufferStart), pixels512_0);
bufferStart += 16;
rowStartRef = ref Unsafe.Add(ref rowStartRef, (nuint)4);
@@ -129,7 +129,7 @@ internal readonly unsafe struct ResizeKernel
if ((this.Length & 3) >= 2)
{
Vector256 pixels256_0 = Unsafe.As>(ref rowStartRef);
- result256 = Vector256Utilities.MultiplyAddEstimate(Vector256.Load(bufferStart), pixels256_0, result256);
+ result256 = Vector256_.MultiplyAdd(result256, Vector256.Load(bufferStart), pixels256_0);
bufferStart += 8;
rowStartRef = ref Unsafe.Add(ref rowStartRef, (nuint)2);
@@ -140,10 +140,10 @@ internal readonly unsafe struct ResizeKernel
if ((this.Length & 1) != 0)
{
Vector128 pixels128 = Unsafe.As>(ref rowStartRef);
- result128 = Vector128Utilities.MultiplyAddEstimate(Vector128.Load(bufferStart), pixels128, result128);
+ result128 = Vector128_.MultiplyAdd(result128, Vector128.Load(bufferStart), pixels128);
}
- return *(Vector4*)&result128;
+ return result128.AsVector4();
}
else
{
@@ -157,8 +157,8 @@ internal readonly unsafe struct ResizeKernel
Vector256 pixels256_0 = Unsafe.As>(ref rowStartRef);
Vector256 pixels256_1 = Unsafe.As>(ref Unsafe.Add(ref rowStartRef, (nuint)2));
- result256_0 = Vector256Utilities.MultiplyAddEstimate(Vector256.Load(bufferStart), pixels256_0, result256_0);
- result256_1 = Vector256Utilities.MultiplyAddEstimate(Vector256.Load(bufferStart + 8), pixels256_1, result256_1);
+ result256_0 = Vector256_.MultiplyAdd(result256_0, Vector256.Load(bufferStart), pixels256_0);
+ result256_1 = Vector256_.MultiplyAdd(result256_1, Vector256.Load(bufferStart + 8), pixels256_1);
bufferStart += 16;
rowStartRef = ref Unsafe.Add(ref rowStartRef, (nuint)4);
@@ -169,7 +169,7 @@ internal readonly unsafe struct ResizeKernel
if ((this.Length & 3) >= 2)
{
Vector256 pixels256_0 = Unsafe.As>(ref rowStartRef);
- result256_0 = Vector256Utilities.MultiplyAddEstimate(Vector256.Load(bufferStart), pixels256_0, result256_0);
+ result256_0 = Vector256_.MultiplyAdd(result256_0, Vector256.Load(bufferStart), pixels256_0);
bufferStart += 8;
rowStartRef = ref Unsafe.Add(ref rowStartRef, (nuint)2);
@@ -180,10 +180,10 @@ internal readonly unsafe struct ResizeKernel
if ((this.Length & 1) != 0)
{
Vector128 pixels128 = Unsafe.As>(ref rowStartRef);
- result128 = Vector128Utilities.MultiplyAddEstimate(Vector128.Load(bufferStart), pixels128, result128);
+ result128 = Vector128_.MultiplyAdd(result128, Vector128.Load(bufferStart), pixels128);
}
- return *(Vector4*)&result128;
+ return result128.AsVector4();
}
}
else
diff --git a/src/ImageSharp/Processing/Processors/Transforms/Resize/ResizeKernelMap.cs b/src/ImageSharp/Processing/Processors/Transforms/Resize/ResizeKernelMap.cs
index b52054d553..40d05f0a7f 100644
--- a/src/ImageSharp/Processing/Processors/Transforms/Resize/ResizeKernelMap.cs
+++ b/src/ImageSharp/Processing/Processors/Transforms/Resize/ResizeKernelMap.cs
@@ -51,7 +51,7 @@ internal partial class ResizeKernelMap : IDisposable
this.DestinationLength = destinationLength;
this.MaxDiameter = (radius * 2) + 1;
- if (ResizeKernel.SupportsVectorization)
+ if (ResizeKernel.IsHardwareAccelerated)
{
this.data = memoryAllocator.Allocate2D(this.MaxDiameter * 4, bufferHeight, preferContiguosImageBuffers: true);
}
diff --git a/tests/ImageSharp.Tests/Processing/Processors/Transforms/ResizeKernelMapTests.cs b/tests/ImageSharp.Tests/Processing/Processors/Transforms/ResizeKernelMapTests.cs
index 6d0de65c42..cc0fc051e4 100644
--- a/tests/ImageSharp.Tests/Processing/Processors/Transforms/ResizeKernelMapTests.cs
+++ b/tests/ImageSharp.Tests/Processing/Processors/Transforms/ResizeKernelMapTests.cs
@@ -141,7 +141,7 @@ public partial class ResizeKernelMapTests
Span actualValues;
ApproximateFloatComparer comparer;
- if (ResizeKernel.SupportsVectorization)
+ if (ResizeKernel.IsHardwareAccelerated)
{
comparer = new ApproximateFloatComparer(1e-4f);