Browse Source

Merge branch 'master' into af/benchmark/SkiaBitmap_DecodeToTargetSize

pull/1789/head
Anton Firszov 4 years ago
committed by GitHub
parent
commit
bfcf134755
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 166
      ImageSharp.sln
  2. 5
      src/ImageSharp/Advanced/AotCompilerTools.cs
  3. 33
      src/ImageSharp/Configuration.cs
  4. 104
      src/ImageSharp/Formats/ImageExtensions.Save.cs
  5. 1
      src/ImageSharp/Formats/ImageExtensions.Save.tt
  6. 25
      src/ImageSharp/Formats/Jpeg/JpegEncoder.cs
  7. 39
      src/ImageSharp/Formats/Jpeg/JpegEncoderCore.cs
  8. 38
      src/ImageSharp/Formats/Png/PngEncoderCore.cs
  9. 8
      src/ImageSharp/Formats/Png/PngEncoderOptions.cs
  10. 12
      src/ImageSharp/Formats/Png/PngEncoderOptionsHelpers.cs
  11. 431
      src/ImageSharp/Formats/Webp/AlphaDecoder.cs
  12. 58
      src/ImageSharp/Formats/Webp/BitReader/BitReaderBase.cs
  13. 229
      src/ImageSharp/Formats/Webp/BitReader/Vp8BitReader.cs
  14. 215
      src/ImageSharp/Formats/Webp/BitReader/Vp8LBitReader.cs
  15. 148
      src/ImageSharp/Formats/Webp/BitWriter/BitWriterBase.cs
  16. 674
      src/ImageSharp/Formats/Webp/BitWriter/Vp8BitWriter.cs
  17. 212
      src/ImageSharp/Formats/Webp/BitWriter/Vp8LBitWriter.cs
  18. 25
      src/ImageSharp/Formats/Webp/EntropyIx.cs
  19. 36
      src/ImageSharp/Formats/Webp/HistoIx.cs
  20. 16
      src/ImageSharp/Formats/Webp/IWebpDecoderOptions.cs
  21. 77
      src/ImageSharp/Formats/Webp/IWebpEncoderOptions.cs
  22. 854
      src/ImageSharp/Formats/Webp/Lossless/BackwardReferenceEncoder.cs
  23. 84
      src/ImageSharp/Formats/Webp/Lossless/ColorCache.cs
  24. 20
      src/ImageSharp/Formats/Webp/Lossless/CostCacheInterval.cs
  25. 39
      src/ImageSharp/Formats/Webp/Lossless/CostInterval.cs
  26. 308
      src/ImageSharp/Formats/Webp/Lossless/CostManager.cs
  27. 102
      src/ImageSharp/Formats/Webp/Lossless/CostModel.cs
  28. 14
      src/ImageSharp/Formats/Webp/Lossless/CrunchConfig.cs
  29. 12
      src/ImageSharp/Formats/Webp/Lossless/CrunchSubConfig.cs
  30. 92
      src/ImageSharp/Formats/Webp/Lossless/DominantCostRange.cs
  31. 59
      src/ImageSharp/Formats/Webp/Lossless/HTreeGroup.cs
  32. 18
      src/ImageSharp/Formats/Webp/Lossless/HistogramBinInfo.cs
  33. 685
      src/ImageSharp/Formats/Webp/Lossless/HistogramEncoder.cs
  34. 22
      src/ImageSharp/Formats/Webp/Lossless/HistogramPair.cs
  35. 36
      src/ImageSharp/Formats/Webp/Lossless/HuffIndex.cs
  36. 24
      src/ImageSharp/Formats/Webp/Lossless/HuffmanCode.cs
  37. 64
      src/ImageSharp/Formats/Webp/Lossless/HuffmanTree.cs
  38. 26
      src/ImageSharp/Formats/Webp/Lossless/HuffmanTreeCode.cs
  39. 24
      src/ImageSharp/Formats/Webp/Lossless/HuffmanTreeToken.cs
  40. 656
      src/ImageSharp/Formats/Webp/Lossless/HuffmanUtils.cs
  41. 1279
      src/ImageSharp/Formats/Webp/Lossless/LosslessUtils.cs
  42. 125
      src/ImageSharp/Formats/Webp/Lossless/NearLosslessEnc.cs
  43. 54
      src/ImageSharp/Formats/Webp/Lossless/PixOrCopy.cs
  44. 16
      src/ImageSharp/Formats/Webp/Lossless/PixOrCopyMode.cs
  45. 1181
      src/ImageSharp/Formats/Webp/Lossless/PredictorEncoder.cs
  46. 24
      src/ImageSharp/Formats/Webp/Lossless/Vp8LBackwardRefs.cs
  47. 221
      src/ImageSharp/Formats/Webp/Lossless/Vp8LBitEntropy.cs
  48. 70
      src/ImageSharp/Formats/Webp/Lossless/Vp8LDecoder.cs
  49. 1786
      src/ImageSharp/Formats/Webp/Lossless/Vp8LEncoder.cs
  50. 284
      src/ImageSharp/Formats/Webp/Lossless/Vp8LHashChain.cs
  51. 515
      src/ImageSharp/Formats/Webp/Lossless/Vp8LHistogram.cs
  52. 14
      src/ImageSharp/Formats/Webp/Lossless/Vp8LLz77Type.cs
  53. 28
      src/ImageSharp/Formats/Webp/Lossless/Vp8LMetadata.cs
  54. 14
      src/ImageSharp/Formats/Webp/Lossless/Vp8LMultipliers.cs
  55. 63
      src/ImageSharp/Formats/Webp/Lossless/Vp8LStreaks.cs
  56. 47
      src/ImageSharp/Formats/Webp/Lossless/Vp8LTransform.cs
  57. 37
      src/ImageSharp/Formats/Webp/Lossless/Vp8LTransformType.cs
  58. 1001
      src/ImageSharp/Formats/Webp/Lossless/WebpLosslessDecoder.cs
  59. BIN
      src/ImageSharp/Formats/Webp/Lossless/Webp_Lossless_Bitstream_Specification.pdf
  60. 28
      src/ImageSharp/Formats/Webp/Lossy/IntraPredictionMode.cs
  61. 26
      src/ImageSharp/Formats/Webp/Lossy/LoopFilter.cs
  62. 1086
      src/ImageSharp/Formats/Webp/Lossy/LossyUtils.cs
  63. 76
      src/ImageSharp/Formats/Webp/Lossy/PassStats.cs
  64. 637
      src/ImageSharp/Formats/Webp/Lossy/QuantEnc.cs
  65. 28
      src/ImageSharp/Formats/Webp/Lossy/Vp8BandProbas.cs
  66. 15
      src/ImageSharp/Formats/Webp/Lossy/Vp8CostArray.cs
  67. 25
      src/ImageSharp/Formats/Webp/Lossy/Vp8Costs.cs
  68. 341
      src/ImageSharp/Formats/Webp/Lossy/Vp8Decoder.cs
  69. 948
      src/ImageSharp/Formats/Webp/Lossy/Vp8EncIterator.cs
  70. 265
      src/ImageSharp/Formats/Webp/Lossy/Vp8EncProba.cs
  71. 34
      src/ImageSharp/Formats/Webp/Lossy/Vp8EncSegmentHeader.cs
  72. 1102
      src/ImageSharp/Formats/Webp/Lossy/Vp8Encoder.cs
  73. 655
      src/ImageSharp/Formats/Webp/Lossy/Vp8Encoding.cs
  74. 72
      src/ImageSharp/Formats/Webp/Lossy/Vp8FilterHeader.cs
  75. 83
      src/ImageSharp/Formats/Webp/Lossy/Vp8FilterInfo.cs
  76. 26
      src/ImageSharp/Formats/Webp/Lossy/Vp8FrameHeader.cs
  77. 140
      src/ImageSharp/Formats/Webp/Lossy/Vp8Histogram.cs
  78. 68
      src/ImageSharp/Formats/Webp/Lossy/Vp8Io.cs
  79. 21
      src/ImageSharp/Formats/Webp/Lossy/Vp8MacroBlock.cs
  80. 66
      src/ImageSharp/Formats/Webp/Lossy/Vp8MacroBlockData.cs
  81. 21
      src/ImageSharp/Formats/Webp/Lossy/Vp8MacroBlockInfo.cs
  82. 12
      src/ImageSharp/Formats/Webp/Lossy/Vp8MacroBlockType.cs
  83. 111
      src/ImageSharp/Formats/Webp/Lossy/Vp8Matrix.cs
  84. 128
      src/ImageSharp/Formats/Webp/Lossy/Vp8ModeScore.cs
  85. 42
      src/ImageSharp/Formats/Webp/Lossy/Vp8PictureHeader.cs
  86. 42
      src/ImageSharp/Formats/Webp/Lossy/Vp8Proba.cs
  87. 21
      src/ImageSharp/Formats/Webp/Lossy/Vp8ProbaArray.cs
  88. 34
      src/ImageSharp/Formats/Webp/Lossy/Vp8QuantMatrix.cs
  89. 31
      src/ImageSharp/Formats/Webp/Lossy/Vp8RDLevel.cs
  90. 171
      src/ImageSharp/Formats/Webp/Lossy/Vp8Residual.cs
  91. 45
      src/ImageSharp/Formats/Webp/Lossy/Vp8SegmentHeader.cs
  92. 85
      src/ImageSharp/Formats/Webp/Lossy/Vp8SegmentInfo.cs
  93. 22
      src/ImageSharp/Formats/Webp/Lossy/Vp8Stats.cs
  94. 15
      src/ImageSharp/Formats/Webp/Lossy/Vp8StatsArray.cs
  95. 14
      src/ImageSharp/Formats/Webp/Lossy/Vp8TopSamples.cs
  96. 1376
      src/ImageSharp/Formats/Webp/Lossy/WebpLossyDecoder.cs
  97. 303
      src/ImageSharp/Formats/Webp/Lossy/YuvConversion.cs
  98. BIN
      src/ImageSharp/Formats/Webp/Lossy/rfc6386_lossy_specification.pdf
  99. 21
      src/ImageSharp/Formats/Webp/MetadataExtensions.cs
  100. 10
      src/ImageSharp/Formats/Webp/Readme.md

166
ImageSharp.sln

@ -1,3 +1,4 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 16
VisualStudioVersion = 16.0.28902.138
@ -378,6 +379,170 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Png", "Png", "{E1C42A6F-913
tests\Images\Input\Png\zlib-ztxt-bad-header.png = tests\Images\Input\Png\zlib-ztxt-bad-header.png
EndProjectSection
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Webp", "Webp", "{983A31E2-5E26-4058-BD6E-03B4922D4BBF}"
ProjectSection(SolutionItems) = preProject
tests\Images\Input\Webp\1602311202.webp = tests\Images\Input\Webp\1602311202.webp
tests\Images\Input\Webp\alpha_color_cache.webp = tests\Images\Input\Webp\alpha_color_cache.webp
tests\Images\Input\Webp\alpha_filter_0_method_0.webp = tests\Images\Input\Webp\alpha_filter_0_method_0.webp
tests\Images\Input\Webp\alpha_filter_0_method_1.webp = tests\Images\Input\Webp\alpha_filter_0_method_1.webp
tests\Images\Input\Webp\alpha_filter_1.webp = tests\Images\Input\Webp\alpha_filter_1.webp
tests\Images\Input\Webp\alpha_filter_1_method_0.webp = tests\Images\Input\Webp\alpha_filter_1_method_0.webp
tests\Images\Input\Webp\alpha_filter_1_method_1.webp = tests\Images\Input\Webp\alpha_filter_1_method_1.webp
tests\Images\Input\Webp\alpha_filter_2.webp = tests\Images\Input\Webp\alpha_filter_2.webp
tests\Images\Input\Webp\alpha_filter_2_method_0.webp = tests\Images\Input\Webp\alpha_filter_2_method_0.webp
tests\Images\Input\Webp\alpha_filter_2_method_1.webp = tests\Images\Input\Webp\alpha_filter_2_method_1.webp
tests\Images\Input\Webp\alpha_filter_3.webp = tests\Images\Input\Webp\alpha_filter_3.webp
tests\Images\Input\Webp\alpha_filter_3_method_0.webp = tests\Images\Input\Webp\alpha_filter_3_method_0.webp
tests\Images\Input\Webp\alpha_filter_3_method_1.webp = tests\Images\Input\Webp\alpha_filter_3_method_1.webp
tests\Images\Input\Webp\alpha_no_compression.webp = tests\Images\Input\Webp\alpha_no_compression.webp
tests\Images\Input\Webp\animated-webp.webp = tests\Images\Input\Webp\animated-webp.webp
tests\Images\Input\Webp\animated2.webp = tests\Images\Input\Webp\animated2.webp
tests\Images\Input\Webp\animated3.webp = tests\Images\Input\Webp\animated3.webp
tests\Images\Input\Webp\animated_lossy.webp = tests\Images\Input\Webp\animated_lossy.webp
tests\Images\Input\Webp\bad_palette_index.webp = tests\Images\Input\Webp\bad_palette_index.webp
tests\Images\Input\Webp\big_endian_bug_393.webp = tests\Images\Input\Webp\big_endian_bug_393.webp
tests\Images\Input\Webp\bike_lossless.webp = tests\Images\Input\Webp\bike_lossless.webp
tests\Images\Input\Webp\bike_lossless_small.webp = tests\Images\Input\Webp\bike_lossless_small.webp
tests\Images\Input\Webp\bike_lossy.webp = tests\Images\Input\Webp\bike_lossy.webp
tests\Images\Input\Webp\bike_lossy_complex_filter.webp = tests\Images\Input\Webp\bike_lossy_complex_filter.webp
tests\Images\Input\Webp\bryce.webp = tests\Images\Input\Webp\bryce.webp
tests\Images\Input\Webp\bug3.webp = tests\Images\Input\Webp\bug3.webp
tests\Images\Input\Webp\color_cache_bits_11.webp = tests\Images\Input\Webp\color_cache_bits_11.webp
tests\Images\Input\Webp\earth_lossless.webp = tests\Images\Input\Webp\earth_lossless.webp
tests\Images\Input\Webp\earth_lossy.webp = tests\Images\Input\Webp\earth_lossy.webp
tests\Images\Input\Webp\exif_lossless.webp = tests\Images\Input\Webp\exif_lossless.webp
tests\Images\Input\Webp\exif_lossy.webp = tests\Images\Input\Webp\exif_lossy.webp
tests\Images\Input\Webp\flag_of_germany.png = tests\Images\Input\Webp\flag_of_germany.png
tests\Images\Input\Webp\lossless1.webp = tests\Images\Input\Webp\lossless1.webp
tests\Images\Input\Webp\lossless2.webp = tests\Images\Input\Webp\lossless2.webp
tests\Images\Input\Webp\lossless3.webp = tests\Images\Input\Webp\lossless3.webp
tests\Images\Input\Webp\lossless4.webp = tests\Images\Input\Webp\lossless4.webp
tests\Images\Input\Webp\lossless_alpha_small.webp = tests\Images\Input\Webp\lossless_alpha_small.webp
tests\Images\Input\Webp\lossless_big_random_alpha.webp = tests\Images\Input\Webp\lossless_big_random_alpha.webp
tests\Images\Input\Webp\lossless_color_transform.bmp = tests\Images\Input\Webp\lossless_color_transform.bmp
tests\Images\Input\Webp\lossless_color_transform.pam = tests\Images\Input\Webp\lossless_color_transform.pam
tests\Images\Input\Webp\lossless_color_transform.pgm = tests\Images\Input\Webp\lossless_color_transform.pgm
tests\Images\Input\Webp\lossless_color_transform.ppm = tests\Images\Input\Webp\lossless_color_transform.ppm
tests\Images\Input\Webp\lossless_color_transform.tiff = tests\Images\Input\Webp\lossless_color_transform.tiff
tests\Images\Input\Webp\lossless_color_transform.webp = tests\Images\Input\Webp\lossless_color_transform.webp
tests\Images\Input\Webp\lossless_vec_1_0.webp = tests\Images\Input\Webp\lossless_vec_1_0.webp
tests\Images\Input\Webp\lossless_vec_1_1.webp = tests\Images\Input\Webp\lossless_vec_1_1.webp
tests\Images\Input\Webp\lossless_vec_1_10.webp = tests\Images\Input\Webp\lossless_vec_1_10.webp
tests\Images\Input\Webp\lossless_vec_1_11.webp = tests\Images\Input\Webp\lossless_vec_1_11.webp
tests\Images\Input\Webp\lossless_vec_1_12.webp = tests\Images\Input\Webp\lossless_vec_1_12.webp
tests\Images\Input\Webp\lossless_vec_1_13.webp = tests\Images\Input\Webp\lossless_vec_1_13.webp
tests\Images\Input\Webp\lossless_vec_1_14.webp = tests\Images\Input\Webp\lossless_vec_1_14.webp
tests\Images\Input\Webp\lossless_vec_1_15.webp = tests\Images\Input\Webp\lossless_vec_1_15.webp
tests\Images\Input\Webp\lossless_vec_1_2.webp = tests\Images\Input\Webp\lossless_vec_1_2.webp
tests\Images\Input\Webp\lossless_vec_1_3.webp = tests\Images\Input\Webp\lossless_vec_1_3.webp
tests\Images\Input\Webp\lossless_vec_1_4.webp = tests\Images\Input\Webp\lossless_vec_1_4.webp
tests\Images\Input\Webp\lossless_vec_1_5.webp = tests\Images\Input\Webp\lossless_vec_1_5.webp
tests\Images\Input\Webp\lossless_vec_1_6.webp = tests\Images\Input\Webp\lossless_vec_1_6.webp
tests\Images\Input\Webp\lossless_vec_1_7.webp = tests\Images\Input\Webp\lossless_vec_1_7.webp
tests\Images\Input\Webp\lossless_vec_1_8.webp = tests\Images\Input\Webp\lossless_vec_1_8.webp
tests\Images\Input\Webp\lossless_vec_1_9.webp = tests\Images\Input\Webp\lossless_vec_1_9.webp
tests\Images\Input\Webp\lossless_vec_2_0.webp = tests\Images\Input\Webp\lossless_vec_2_0.webp
tests\Images\Input\Webp\lossless_vec_2_1.webp = tests\Images\Input\Webp\lossless_vec_2_1.webp
tests\Images\Input\Webp\lossless_vec_2_10.webp = tests\Images\Input\Webp\lossless_vec_2_10.webp
tests\Images\Input\Webp\lossless_vec_2_11.webp = tests\Images\Input\Webp\lossless_vec_2_11.webp
tests\Images\Input\Webp\lossless_vec_2_12.webp = tests\Images\Input\Webp\lossless_vec_2_12.webp
tests\Images\Input\Webp\lossless_vec_2_13.webp = tests\Images\Input\Webp\lossless_vec_2_13.webp
tests\Images\Input\Webp\lossless_vec_2_14.webp = tests\Images\Input\Webp\lossless_vec_2_14.webp
tests\Images\Input\Webp\lossless_vec_2_15.webp = tests\Images\Input\Webp\lossless_vec_2_15.webp
tests\Images\Input\Webp\lossless_vec_2_2.webp = tests\Images\Input\Webp\lossless_vec_2_2.webp
tests\Images\Input\Webp\lossless_vec_2_3.webp = tests\Images\Input\Webp\lossless_vec_2_3.webp
tests\Images\Input\Webp\lossless_vec_2_4.webp = tests\Images\Input\Webp\lossless_vec_2_4.webp
tests\Images\Input\Webp\lossless_vec_2_5.webp = tests\Images\Input\Webp\lossless_vec_2_5.webp
tests\Images\Input\Webp\lossless_vec_2_6.webp = tests\Images\Input\Webp\lossless_vec_2_6.webp
tests\Images\Input\Webp\lossless_vec_2_7.webp = tests\Images\Input\Webp\lossless_vec_2_7.webp
tests\Images\Input\Webp\lossless_vec_2_8.webp = tests\Images\Input\Webp\lossless_vec_2_8.webp
tests\Images\Input\Webp\lossless_vec_2_9.webp = tests\Images\Input\Webp\lossless_vec_2_9.webp
tests\Images\Input\Webp\lossless_vec_list.txt = tests\Images\Input\Webp\lossless_vec_list.txt
tests\Images\Input\Webp\lossless_with_iccp.webp = tests\Images\Input\Webp\lossless_with_iccp.webp
tests\Images\Input\Webp\lossy_alpha1.webp = tests\Images\Input\Webp\lossy_alpha1.webp
tests\Images\Input\Webp\lossy_alpha2.webp = tests\Images\Input\Webp\lossy_alpha2.webp
tests\Images\Input\Webp\lossy_alpha3.webp = tests\Images\Input\Webp\lossy_alpha3.webp
tests\Images\Input\Webp\lossy_alpha4.webp = tests\Images\Input\Webp\lossy_alpha4.webp
tests\Images\Input\Webp\lossy_extreme_probabilities.webp = tests\Images\Input\Webp\lossy_extreme_probabilities.webp
tests\Images\Input\Webp\lossy_q0_f100.webp = tests\Images\Input\Webp\lossy_q0_f100.webp
tests\Images\Input\Webp\lossy_with_iccp.webp = tests\Images\Input\Webp\lossy_with_iccp.webp
tests\Images\Input\Webp\near_lossless_75.webp = tests\Images\Input\Webp\near_lossless_75.webp
tests\Images\Input\Webp\peak.png = tests\Images\Input\Webp\peak.png
tests\Images\Input\Webp\rgb_pattern_100x100.png = tests\Images\Input\Webp\rgb_pattern_100x100.png
tests\Images\Input\Webp\rgb_pattern_63x63.png = tests\Images\Input\Webp\rgb_pattern_63x63.png
tests\Images\Input\Webp\rgb_pattern_80x80.png = tests\Images\Input\Webp\rgb_pattern_80x80.png
tests\Images\Input\Webp\segment01.webp = tests\Images\Input\Webp\segment01.webp
tests\Images\Input\Webp\segment02.webp = tests\Images\Input\Webp\segment02.webp
tests\Images\Input\Webp\segment03.webp = tests\Images\Input\Webp\segment03.webp
tests\Images\Input\Webp\small_13x1.webp = tests\Images\Input\Webp\small_13x1.webp
tests\Images\Input\Webp\small_1x1.webp = tests\Images\Input\Webp\small_1x1.webp
tests\Images\Input\Webp\small_1x13.webp = tests\Images\Input\Webp\small_1x13.webp
tests\Images\Input\Webp\small_31x13.webp = tests\Images\Input\Webp\small_31x13.webp
tests\Images\Input\Webp\sticker.webp = tests\Images\Input\Webp\sticker.webp
tests\Images\Input\Webp\test-nostrong.webp = tests\Images\Input\Webp\test-nostrong.webp
tests\Images\Input\Webp\test.webp = tests\Images\Input\Webp\test.webp
tests\Images\Input\Webp\testpattern_opaque.png = tests\Images\Input\Webp\testpattern_opaque.png
tests\Images\Input\Webp\testpattern_opaque_small.png = tests\Images\Input\Webp\testpattern_opaque_small.png
tests\Images\Input\Webp\very_short.webp = tests\Images\Input\Webp\very_short.webp
tests\Images\Input\Webp\vp80-00-comprehensive-001.webp = tests\Images\Input\Webp\vp80-00-comprehensive-001.webp
tests\Images\Input\Webp\vp80-00-comprehensive-002.webp = tests\Images\Input\Webp\vp80-00-comprehensive-002.webp
tests\Images\Input\Webp\vp80-00-comprehensive-003.webp = tests\Images\Input\Webp\vp80-00-comprehensive-003.webp
tests\Images\Input\Webp\vp80-00-comprehensive-004.webp = tests\Images\Input\Webp\vp80-00-comprehensive-004.webp
tests\Images\Input\Webp\vp80-00-comprehensive-005.webp = tests\Images\Input\Webp\vp80-00-comprehensive-005.webp
tests\Images\Input\Webp\vp80-00-comprehensive-006.webp = tests\Images\Input\Webp\vp80-00-comprehensive-006.webp
tests\Images\Input\Webp\vp80-00-comprehensive-007.webp = tests\Images\Input\Webp\vp80-00-comprehensive-007.webp
tests\Images\Input\Webp\vp80-00-comprehensive-008.webp = tests\Images\Input\Webp\vp80-00-comprehensive-008.webp
tests\Images\Input\Webp\vp80-00-comprehensive-009.webp = tests\Images\Input\Webp\vp80-00-comprehensive-009.webp
tests\Images\Input\Webp\vp80-00-comprehensive-010.webp = tests\Images\Input\Webp\vp80-00-comprehensive-010.webp
tests\Images\Input\Webp\vp80-00-comprehensive-011.webp = tests\Images\Input\Webp\vp80-00-comprehensive-011.webp
tests\Images\Input\Webp\vp80-00-comprehensive-012.webp = tests\Images\Input\Webp\vp80-00-comprehensive-012.webp
tests\Images\Input\Webp\vp80-00-comprehensive-013.webp = tests\Images\Input\Webp\vp80-00-comprehensive-013.webp
tests\Images\Input\Webp\vp80-00-comprehensive-014.webp = tests\Images\Input\Webp\vp80-00-comprehensive-014.webp
tests\Images\Input\Webp\vp80-00-comprehensive-015.webp = tests\Images\Input\Webp\vp80-00-comprehensive-015.webp
tests\Images\Input\Webp\vp80-00-comprehensive-016.webp = tests\Images\Input\Webp\vp80-00-comprehensive-016.webp
tests\Images\Input\Webp\vp80-00-comprehensive-017.webp = tests\Images\Input\Webp\vp80-00-comprehensive-017.webp
tests\Images\Input\Webp\vp80-01-intra-1400.webp = tests\Images\Input\Webp\vp80-01-intra-1400.webp
tests\Images\Input\Webp\vp80-01-intra-1411.webp = tests\Images\Input\Webp\vp80-01-intra-1411.webp
tests\Images\Input\Webp\vp80-01-intra-1416.webp = tests\Images\Input\Webp\vp80-01-intra-1416.webp
tests\Images\Input\Webp\vp80-01-intra-1417.webp = tests\Images\Input\Webp\vp80-01-intra-1417.webp
tests\Images\Input\Webp\vp80-02-inter-1402.webp = tests\Images\Input\Webp\vp80-02-inter-1402.webp
tests\Images\Input\Webp\vp80-02-inter-1412.webp = tests\Images\Input\Webp\vp80-02-inter-1412.webp
tests\Images\Input\Webp\vp80-02-inter-1418.webp = tests\Images\Input\Webp\vp80-02-inter-1418.webp
tests\Images\Input\Webp\vp80-02-inter-1424.webp = tests\Images\Input\Webp\vp80-02-inter-1424.webp
tests\Images\Input\Webp\vp80-03-segmentation-1401.webp = tests\Images\Input\Webp\vp80-03-segmentation-1401.webp
tests\Images\Input\Webp\vp80-03-segmentation-1403.webp = tests\Images\Input\Webp\vp80-03-segmentation-1403.webp
tests\Images\Input\Webp\vp80-03-segmentation-1407.webp = tests\Images\Input\Webp\vp80-03-segmentation-1407.webp
tests\Images\Input\Webp\vp80-03-segmentation-1408.webp = tests\Images\Input\Webp\vp80-03-segmentation-1408.webp
tests\Images\Input\Webp\vp80-03-segmentation-1409.webp = tests\Images\Input\Webp\vp80-03-segmentation-1409.webp
tests\Images\Input\Webp\vp80-03-segmentation-1410.webp = tests\Images\Input\Webp\vp80-03-segmentation-1410.webp
tests\Images\Input\Webp\vp80-03-segmentation-1413.webp = tests\Images\Input\Webp\vp80-03-segmentation-1413.webp
tests\Images\Input\Webp\vp80-03-segmentation-1414.webp = tests\Images\Input\Webp\vp80-03-segmentation-1414.webp
tests\Images\Input\Webp\vp80-03-segmentation-1415.webp = tests\Images\Input\Webp\vp80-03-segmentation-1415.webp
tests\Images\Input\Webp\vp80-03-segmentation-1425.webp = tests\Images\Input\Webp\vp80-03-segmentation-1425.webp
tests\Images\Input\Webp\vp80-03-segmentation-1426.webp = tests\Images\Input\Webp\vp80-03-segmentation-1426.webp
tests\Images\Input\Webp\vp80-03-segmentation-1427.webp = tests\Images\Input\Webp\vp80-03-segmentation-1427.webp
tests\Images\Input\Webp\vp80-03-segmentation-1432.webp = tests\Images\Input\Webp\vp80-03-segmentation-1432.webp
tests\Images\Input\Webp\vp80-03-segmentation-1435.webp = tests\Images\Input\Webp\vp80-03-segmentation-1435.webp
tests\Images\Input\Webp\vp80-03-segmentation-1436.webp = tests\Images\Input\Webp\vp80-03-segmentation-1436.webp
tests\Images\Input\Webp\vp80-03-segmentation-1437.webp = tests\Images\Input\Webp\vp80-03-segmentation-1437.webp
tests\Images\Input\Webp\vp80-03-segmentation-1441.webp = tests\Images\Input\Webp\vp80-03-segmentation-1441.webp
tests\Images\Input\Webp\vp80-03-segmentation-1442.webp = tests\Images\Input\Webp\vp80-03-segmentation-1442.webp
tests\Images\Input\Webp\vp80-04-partitions-1404.webp = tests\Images\Input\Webp\vp80-04-partitions-1404.webp
tests\Images\Input\Webp\vp80-04-partitions-1405.webp = tests\Images\Input\Webp\vp80-04-partitions-1405.webp
tests\Images\Input\Webp\vp80-04-partitions-1406.webp = tests\Images\Input\Webp\vp80-04-partitions-1406.webp
tests\Images\Input\Webp\vp80-05-sharpness-1428.webp = tests\Images\Input\Webp\vp80-05-sharpness-1428.webp
tests\Images\Input\Webp\vp80-05-sharpness-1429.webp = tests\Images\Input\Webp\vp80-05-sharpness-1429.webp
tests\Images\Input\Webp\vp80-05-sharpness-1430.webp = tests\Images\Input\Webp\vp80-05-sharpness-1430.webp
tests\Images\Input\Webp\vp80-05-sharpness-1431.webp = tests\Images\Input\Webp\vp80-05-sharpness-1431.webp
tests\Images\Input\Webp\vp80-05-sharpness-1433.webp = tests\Images\Input\Webp\vp80-05-sharpness-1433.webp
tests\Images\Input\Webp\vp80-05-sharpness-1434.webp = tests\Images\Input\Webp\vp80-05-sharpness-1434.webp
tests\Images\Input\Webp\vp80-05-sharpness-1438.webp = tests\Images\Input\Webp\vp80-05-sharpness-1438.webp
tests\Images\Input\Webp\vp80-05-sharpness-1439.webp = tests\Images\Input\Webp\vp80-05-sharpness-1439.webp
tests\Images\Input\Webp\vp80-05-sharpness-1440.webp = tests\Images\Input\Webp\vp80-05-sharpness-1440.webp
tests\Images\Input\Webp\vp80-05-sharpness-1443.webp = tests\Images\Input\Webp\vp80-05-sharpness-1443.webp
tests\Images\Input\Webp\yuv_test.png = tests\Images\Input\Webp\yuv_test.png
EndProjectSection
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ImageSharp.Tests", "tests\ImageSharp.Tests\ImageSharp.Tests.csproj", "{EA3000E9-2A91-4EC4-8A68-E566DEBDC4F6}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ImageSharp.Benchmarks", "tests\ImageSharp.Benchmarks\ImageSharp.Benchmarks.csproj", "{2BF743D8-2A06-412D-96D7-F448F00C5EA5}"
@ -539,6 +704,7 @@ Global
{6458AFCB-A159-47D5-8F2B-50C95C0915E0} = {DB21FED7-E8CB-4B00-9EB2-9144D32A590A}
{39F5197B-CF6C-41A5-9739-7F97E78BB104} = {6458AFCB-A159-47D5-8F2B-50C95C0915E0}
{E1C42A6F-913B-4A7B-B1A8-2BB62843B254} = {9DA226A1-8656-49A8-A58A-A8B5C081AD66}
{983A31E2-5E26-4058-BD6E-03B4922D4BBF} = {9DA226A1-8656-49A8-A58A-A8B5C081AD66}
{EA3000E9-2A91-4EC4-8A68-E566DEBDC4F6} = {56801022-D71A-4FBE-BC5B-CBA08E2284EC}
{2BF743D8-2A06-412D-96D7-F448F00C5EA5} = {56801022-D71A-4FBE-BC5B-CBA08E2284EC}
{C0D7754B-5277-438E-ABEB-2BA34401B5A7} = {1799C43E-5C54-4A8F-8D64-B1475241DB0D}

5
src/ImageSharp/Advanced/AotCompilerTools.cs

@ -13,6 +13,7 @@ using SixLabors.ImageSharp.Formats.Jpeg.Components;
using SixLabors.ImageSharp.Formats.Png;
using SixLabors.ImageSharp.Formats.Tga;
using SixLabors.ImageSharp.Formats.Tiff;
using SixLabors.ImageSharp.Formats.Webp;
using SixLabors.ImageSharp.Memory;
using SixLabors.ImageSharp.PixelFormats;
using SixLabors.ImageSharp.Processing;
@ -195,6 +196,7 @@ namespace SixLabors.ImageSharp.Advanced
private static void AotCompileImageEncoderInternals<TPixel>()
where TPixel : unmanaged, IPixel<TPixel>
{
default(WebpEncoderCore).Encode<TPixel>(default, default, default);
default(BmpEncoderCore).Encode<TPixel>(default, default, default);
default(GifEncoderCore).Encode<TPixel>(default, default, default);
default(JpegEncoderCore).Encode<TPixel>(default, default, default);
@ -211,6 +213,7 @@ namespace SixLabors.ImageSharp.Advanced
private static void AotCompileImageDecoderInternals<TPixel>()
where TPixel : unmanaged, IPixel<TPixel>
{
default(WebpDecoderCore).Decode<TPixel>(default, default, default);
default(BmpDecoderCore).Decode<TPixel>(default, default, default);
default(GifDecoderCore).Decode<TPixel>(default, default, default);
default(JpegDecoderCore).Decode<TPixel>(default, default, default);
@ -227,6 +230,7 @@ namespace SixLabors.ImageSharp.Advanced
private static void AotCompileImageEncoders<TPixel>()
where TPixel : unmanaged, IPixel<TPixel>
{
AotCompileImageEncoder<TPixel, WebpEncoder>();
AotCompileImageEncoder<TPixel, BmpEncoder>();
AotCompileImageEncoder<TPixel, GifEncoder>();
AotCompileImageEncoder<TPixel, JpegEncoder>();
@ -243,6 +247,7 @@ namespace SixLabors.ImageSharp.Advanced
private static void AotCompileImageDecoders<TPixel>()
where TPixel : unmanaged, IPixel<TPixel>
{
AotCompileImageDecoder<TPixel, WebpDecoder>();
AotCompileImageDecoder<TPixel, BmpDecoder>();
AotCompileImageDecoder<TPixel, GifDecoder>();
AotCompileImageDecoder<TPixel, JpegDecoder>();

33
src/ImageSharp/Configuration.cs

@ -11,6 +11,7 @@ using SixLabors.ImageSharp.Formats.Jpeg;
using SixLabors.ImageSharp.Formats.Png;
using SixLabors.ImageSharp.Formats.Tga;
using SixLabors.ImageSharp.Formats.Tiff;
using SixLabors.ImageSharp.Formats.Webp;
using SixLabors.ImageSharp.IO;
using SixLabors.ImageSharp.Memory;
using SixLabors.ImageSharp.Processing;
@ -159,20 +160,17 @@ namespace SixLabors.ImageSharp
/// Creates a shallow copy of the <see cref="Configuration"/>.
/// </summary>
/// <returns>A new configuration instance.</returns>
public Configuration Clone()
public Configuration Clone() => new Configuration
{
return new Configuration
{
MaxDegreeOfParallelism = this.MaxDegreeOfParallelism,
StreamProcessingBufferSize = this.StreamProcessingBufferSize,
ImageFormatsManager = this.ImageFormatsManager,
MemoryAllocator = this.MemoryAllocator,
ImageOperationsProvider = this.ImageOperationsProvider,
ReadOrigin = this.ReadOrigin,
FileSystem = this.FileSystem,
WorkingBufferSizeHintInBytes = this.WorkingBufferSizeHintInBytes,
};
}
MaxDegreeOfParallelism = this.MaxDegreeOfParallelism,
StreamProcessingBufferSize = this.StreamProcessingBufferSize,
ImageFormatsManager = this.ImageFormatsManager,
MemoryAllocator = this.MemoryAllocator,
ImageOperationsProvider = this.ImageOperationsProvider,
ReadOrigin = this.ReadOrigin,
FileSystem = this.FileSystem,
WorkingBufferSizeHintInBytes = this.WorkingBufferSizeHintInBytes,
};
/// <summary>
/// Creates the default instance with the following <see cref="IConfigurationModule"/>s preregistered:
@ -182,17 +180,16 @@ namespace SixLabors.ImageSharp
/// <see cref="BmpConfigurationModule"/>.
/// <see cref="TgaConfigurationModule"/>.
/// <see cref="TiffConfigurationModule"/>.
/// <see cref="WebpConfigurationModule"/>.
/// </summary>
/// <returns>The default configuration of <see cref="Configuration"/>.</returns>
internal static Configuration CreateDefaultInstance()
{
return new Configuration(
internal static Configuration CreateDefaultInstance() => new Configuration(
new PngConfigurationModule(),
new JpegConfigurationModule(),
new GifConfigurationModule(),
new BmpConfigurationModule(),
new TgaConfigurationModule(),
new TiffConfigurationModule());
}
new TiffConfigurationModule(),
new WebpConfigurationModule());
}
}

104
src/ImageSharp/Formats/ImageExtensions.Save.cs

@ -12,6 +12,7 @@ using SixLabors.ImageSharp.Formats.Gif;
using SixLabors.ImageSharp.Formats.Jpeg;
using SixLabors.ImageSharp.Formats.Png;
using SixLabors.ImageSharp.Formats.Tga;
using SixLabors.ImageSharp.Formats.Webp;
using SixLabors.ImageSharp.Formats.Tiff;
namespace SixLabors.ImageSharp
@ -536,6 +537,109 @@ namespace SixLabors.ImageSharp
encoder ?? source.GetConfiguration().ImageFormatsManager.FindEncoder(TgaFormat.Instance),
cancellationToken);
/// <summary>
/// Saves the image to the given stream with the Webp format.
/// </summary>
/// <param name="source">The image this method extends.</param>
/// <param name="path">The file path to save the image to.</param>
/// <exception cref="System.ArgumentNullException">Thrown if the path is null.</exception>
public static void SaveAsWebp(this Image source, string path) => SaveAsWebp(source, path, null);
/// <summary>
/// Saves the image to the given stream with the Webp format.
/// </summary>
/// <param name="source">The image this method extends.</param>
/// <param name="path">The file path to save the image to.</param>
/// <exception cref="System.ArgumentNullException">Thrown if the path is null.</exception>
/// <returns>A <see cref="Task"/> representing the asynchronous operation.</returns>
public static Task SaveAsWebpAsync(this Image source, string path) => SaveAsWebpAsync(source, path, null);
/// <summary>
/// Saves the image to the given stream with the Webp format.
/// </summary>
/// <param name="source">The image this method extends.</param>
/// <param name="path">The file path to save the image to.</param>
/// <param name="cancellationToken">The token to monitor for cancellation requests.</param>
/// <exception cref="System.ArgumentNullException">Thrown if the path is null.</exception>
/// <returns>A <see cref="Task"/> representing the asynchronous operation.</returns>
public static Task SaveAsWebpAsync(this Image source, string path, CancellationToken cancellationToken)
=> SaveAsWebpAsync(source, path, null, cancellationToken);
/// <summary>
/// Saves the image to the given stream with the Webp format.
/// </summary>
/// <param name="source">The image this method extends.</param>
/// <param name="path">The file path to save the image to.</param>
/// <param name="encoder">The encoder to save the image with.</param>
/// <exception cref="System.ArgumentNullException">Thrown if the path is null.</exception>
public static void SaveAsWebp(this Image source, string path, WebpEncoder encoder) =>
source.Save(
path,
encoder ?? source.GetConfiguration().ImageFormatsManager.FindEncoder(WebpFormat.Instance));
/// <summary>
/// Saves the image to the given stream with the Webp format.
/// </summary>
/// <param name="source">The image this method extends.</param>
/// <param name="path">The file path to save the image to.</param>
/// <param name="encoder">The encoder to save the image with.</param>
/// <param name="cancellationToken">The token to monitor for cancellation requests.</param>
/// <exception cref="System.ArgumentNullException">Thrown if the path is null.</exception>
/// <returns>A <see cref="Task"/> representing the asynchronous operation.</returns>
public static Task SaveAsWebpAsync(this Image source, string path, WebpEncoder encoder, CancellationToken cancellationToken = default) =>
source.SaveAsync(
path,
encoder ?? source.GetConfiguration().ImageFormatsManager.FindEncoder(WebpFormat.Instance),
cancellationToken);
/// <summary>
/// Saves the image to the given stream with the Webp format.
/// </summary>
/// <param name="source">The image this method extends.</param>
/// <param name="stream">The stream to save the image to.</param>
/// <exception cref="System.ArgumentNullException">Thrown if the stream is null.</exception>
public static void SaveAsWebp(this Image source, Stream stream)
=> SaveAsWebp(source, stream, null);
/// <summary>
/// Saves the image to the given stream with the Webp format.
/// </summary>
/// <param name="source">The image this method extends.</param>
/// <param name="stream">The stream to save the image to.</param>
/// <param name="cancellationToken">The token to monitor for cancellation requests.</param>
/// <exception cref="System.ArgumentNullException">Thrown if the stream is null.</exception>
/// <returns>A <see cref="Task"/> representing the asynchronous operation.</returns>
public static Task SaveAsWebpAsync(this Image source, Stream stream, CancellationToken cancellationToken = default)
=> SaveAsWebpAsync(source, stream, null, cancellationToken);
/// <summary>
/// Saves the image to the given stream with the Webp format.
/// </summary>
/// <param name="source">The image this method extends.</param>
/// <param name="stream">The stream to save the image to.</param>
/// <param name="encoder">The encoder to save the image with.</param>
/// <exception cref="System.ArgumentNullException">Thrown if the stream is null.</exception>
/// <returns>A <see cref="Task"/> representing the asynchronous operation.</returns>
public static void SaveAsWebp(this Image source, Stream stream, WebpEncoder encoder)
=> source.Save(
stream,
encoder ?? source.GetConfiguration().ImageFormatsManager.FindEncoder(WebpFormat.Instance));
/// <summary>
/// Saves the image to the given stream with the Webp format.
/// </summary>
/// <param name="source">The image this method extends.</param>
/// <param name="stream">The stream to save the image to.</param>
/// <param name="encoder">The encoder to save the image with.</param>
/// <param name="cancellationToken">The token to monitor for cancellation requests.</param>
/// <exception cref="System.ArgumentNullException">Thrown if the stream is null.</exception>
/// <returns>A <see cref="Task"/> representing the asynchronous operation.</returns>
public static Task SaveAsWebpAsync(this Image source, Stream stream, WebpEncoder encoder, CancellationToken cancellationToken = default) =>
source.SaveAsync(
stream,
encoder ?? source.GetConfiguration().ImageFormatsManager.FindEncoder(WebpFormat.Instance),
cancellationToken);
/// <summary>
/// Saves the image to the given stream with the Tiff format.
/// </summary>

1
src/ImageSharp/Formats/ImageExtensions.Save.tt

@ -17,6 +17,7 @@ using SixLabors.ImageSharp.Advanced;
"Jpeg",
"Png",
"Tga",
"Webp",
"Tiff",
};

25
src/ImageSharp/Formats/Jpeg/JpegEncoder.cs

@ -29,7 +29,6 @@ namespace SixLabors.ImageSharp.Formats.Jpeg
where TPixel : unmanaged, IPixel<TPixel>
{
var encoder = new JpegEncoderCore(this);
this.InitializeColorType(image);
encoder.Encode(image, stream);
}
@ -45,31 +44,7 @@ namespace SixLabors.ImageSharp.Formats.Jpeg
where TPixel : unmanaged, IPixel<TPixel>
{
var encoder = new JpegEncoderCore(this);
this.InitializeColorType(image);
return encoder.EncodeAsync(image, stream, cancellationToken);
}
/// <summary>
/// If ColorType was not set, set it based on the given image.
/// </summary>
private void InitializeColorType<TPixel>(Image<TPixel> image)
where TPixel : unmanaged, IPixel<TPixel>
{
// First inspect the image metadata.
if (this.ColorType == null)
{
JpegMetadata metadata = image.Metadata.GetJpegMetadata();
this.ColorType = metadata.ColorType;
}
// Secondly, inspect the pixel type.
if (this.ColorType == null)
{
bool isGrayscale =
typeof(TPixel) == typeof(L8) || typeof(TPixel) == typeof(L16) ||
typeof(TPixel) == typeof(La16) || typeof(TPixel) == typeof(La32);
this.ColorType = isGrayscale ? JpegColorType.Luminance : JpegColorType.YCbCrRatio420;
}
}
}
}

39
src/ImageSharp/Formats/Jpeg/JpegEncoderCore.cs

@ -86,10 +86,10 @@ namespace SixLabors.ImageSharp.Formats.Jpeg
ImageMetadata metadata = image.Metadata;
JpegMetadata jpegMetadata = metadata.GetJpegMetadata();
// If the color type was not specified by the user, preserve the color type of the input image, if it's a supported color type.
if (!this.colorType.HasValue && IsSupportedColorType(jpegMetadata.ColorType))
// If the color type was not specified by the user, preserve the color type of the input image.
if (!this.colorType.HasValue)
{
this.colorType = jpegMetadata.ColorType;
this.colorType = GetFallbackColorType(image);
}
// Compute number of components based on color type in options.
@ -156,6 +156,39 @@ namespace SixLabors.ImageSharp.Formats.Jpeg
stream.Flush();
}
/// <summary>
/// If color type was not set, set it based on the given image.
/// Note, if there is no metadata and the image has multiple components this method
/// returns <see langword="null"/> defering the field assignment
/// to <see cref="InitQuantizationTables(int, JpegMetadata, out Block8x8F, out Block8x8F)"/>.
/// </summary>
private static JpegColorType? GetFallbackColorType<TPixel>(Image<TPixel> image)
where TPixel : unmanaged, IPixel<TPixel>
{
// First inspect the image metadata.
JpegColorType? colorType = null;
JpegMetadata metadata = image.Metadata.GetJpegMetadata();
if (IsSupportedColorType(metadata.ColorType))
{
return metadata.ColorType;
}
// Secondly, inspect the pixel type.
// TODO: PixelTypeInfo should contain a component count!
bool isGrayscale =
typeof(TPixel) == typeof(L8) || typeof(TPixel) == typeof(L16) ||
typeof(TPixel) == typeof(La16) || typeof(TPixel) == typeof(La32);
// We don't set multi-component color types here since we can set it based upon
// the quality in InitQuantizationTables.
if (isGrayscale)
{
colorType = JpegColorType.Luminance;
}
return colorType;
}
/// <summary>
/// Returns true, if the color type is supported by the encoder.
/// </summary>

38
src/ImageSharp/Formats/Png/PngEncoderCore.cs

@ -268,35 +268,27 @@ namespace SixLabors.ImageSharp.Formats.Png
if (this.use16Bit)
{
// 16 bit grayscale + alpha
// TODO: Should we consider in the future a GrayAlpha32 type.
using (IMemoryOwner<Rgba64> rgbaBuffer = this.memoryAllocator.Allocate<Rgba64>(rowSpan.Length))
{
Span<Rgba64> rgbaSpan = rgbaBuffer.GetSpan();
ref Rgba64 rgbaRef = ref MemoryMarshal.GetReference(rgbaSpan);
PixelOperations<TPixel>.Instance.ToRgba64(this.configuration, rowSpan, rgbaSpan);
using IMemoryOwner<La32> laBuffer = this.memoryAllocator.Allocate<La32>(rowSpan.Length);
Span<La32> laSpan = laBuffer.GetSpan();
ref La32 laRef = ref MemoryMarshal.GetReference(laSpan);
PixelOperations<TPixel>.Instance.ToLa32(this.configuration, rowSpan, laSpan);
// Can't map directly to byte array as it's big endian.
for (int x = 0, o = 0; x < rgbaSpan.Length; x++, o += 4)
{
Rgba64 rgba = Unsafe.Add(ref rgbaRef, x);
ushort luminance = ColorNumerics.Get16BitBT709Luminance(rgba.R, rgba.G, rgba.B);
BinaryPrimitives.WriteUInt16BigEndian(rawScanlineSpan.Slice(o, 2), luminance);
BinaryPrimitives.WriteUInt16BigEndian(rawScanlineSpan.Slice(o + 2, 2), rgba.A);
}
// Can't map directly to byte array as it's big endian.
for (int x = 0, o = 0; x < laSpan.Length; x++, o += 4)
{
La32 la = Unsafe.Add(ref laRef, x);
BinaryPrimitives.WriteUInt16BigEndian(rawScanlineSpan.Slice(o, 2), la.L);
BinaryPrimitives.WriteUInt16BigEndian(rawScanlineSpan.Slice(o + 2, 2), la.A);
}
}
else
{
// 8 bit grayscale + alpha
// TODO: Should we consider in the future a GrayAlpha16 type.
Rgba32 rgba = default;
for (int x = 0, o = 0; x < rowSpan.Length; x++, o += 2)
{
Unsafe.Add(ref rowSpanRef, x).ToRgba32(ref rgba);
Unsafe.Add(ref rawScanlineSpanRef, o) =
ColorNumerics.Get8BitBT709Luminance(rgba.R, rgba.G, rgba.B);
Unsafe.Add(ref rawScanlineSpanRef, o + 1) = rgba.A;
}
PixelOperations<TPixel>.Instance.ToLa16Bytes(
this.configuration,
rowSpan,
rawScanlineSpan,
rowSpan.Length);
}
}
}

8
src/ImageSharp/Formats/Png/PngEncoderOptions.cs

@ -18,11 +18,7 @@ namespace SixLabors.ImageSharp.Formats.Png
{
this.BitDepth = source.BitDepth;
this.ColorType = source.ColorType;
// Specification recommends default filter method None for paletted images and Paeth for others.
this.FilterMethod = source.FilterMethod ?? (source.ColorType == PngColorType.Palette
? PngFilterMethod.None
: PngFilterMethod.Paeth);
this.FilterMethod = source.FilterMethod;
this.CompressionLevel = source.CompressionLevel;
this.TextCompressionThreshold = source.TextCompressionThreshold;
this.Gamma = source.Gamma;
@ -41,7 +37,7 @@ namespace SixLabors.ImageSharp.Formats.Png
public PngColorType? ColorType { get; set; }
/// <inheritdoc/>
public PngFilterMethod? FilterMethod { get; }
public PngFilterMethod? FilterMethod { get; set; }
/// <inheritdoc/>
public PngCompressionLevel CompressionLevel { get; } = PngCompressionLevel.DefaultCompression;

12
src/ImageSharp/Formats/Png/PngEncoderOptionsHelpers.cs

@ -34,6 +34,18 @@ namespace SixLabors.ImageSharp.Formats.Png
// a sensible default based upon the pixel format.
options.ColorType ??= pngMetadata.ColorType ?? SuggestColorType<TPixel>();
options.BitDepth ??= pngMetadata.BitDepth ?? SuggestBitDepth<TPixel>();
if (!options.FilterMethod.HasValue)
{
// Specification recommends default filter method None for paletted images and Paeth for others.
if (options.ColorType == PngColorType.Palette)
{
options.FilterMethod = PngFilterMethod.None;
}
else
{
options.FilterMethod = PngFilterMethod.Paeth;
}
}
// Ensure bit depth and color type are a supported combination.
// Bit8 is the only bit depth supported by all color types.

431
src/ImageSharp/Formats/Webp/AlphaDecoder.cs

@ -0,0 +1,431 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Buffers;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using SixLabors.ImageSharp.Formats.Webp.BitReader;
using SixLabors.ImageSharp.Formats.Webp.Lossless;
using SixLabors.ImageSharp.Memory;
namespace SixLabors.ImageSharp.Formats.Webp
{
/// <summary>
/// Implements decoding for lossy alpha chunks which may be compressed.
/// </summary>
internal class AlphaDecoder : IDisposable
{
private readonly MemoryAllocator memoryAllocator;
/// <summary>
/// Initializes a new instance of the <see cref="AlphaDecoder"/> class.
/// </summary>
/// <param name="width">The width of the image.</param>
/// <param name="height">The height of the image.</param>
/// <param name="data">The (maybe compressed) alpha data.</param>
/// <param name="alphaChunkHeader">The first byte of the alpha image stream contains information on how to decode the stream.</param>
/// <param name="memoryAllocator">Used for allocating memory during decoding.</param>
/// <param name="configuration">The configuration.</param>
public AlphaDecoder(int width, int height, IMemoryOwner<byte> data, byte alphaChunkHeader, MemoryAllocator memoryAllocator, Configuration configuration)
{
this.Width = width;
this.Height = height;
this.Data = data;
this.memoryAllocator = memoryAllocator;
this.LastRow = 0;
int totalPixels = width * height;
var compression = (WebpAlphaCompressionMethod)(alphaChunkHeader & 0x03);
if (compression is not WebpAlphaCompressionMethod.NoCompression and not WebpAlphaCompressionMethod.WebpLosslessCompression)
{
WebpThrowHelper.ThrowImageFormatException($"unexpected alpha compression method {compression} found");
}
this.Compressed = compression == WebpAlphaCompressionMethod.WebpLosslessCompression;
// The filtering method used. Only values between 0 and 3 are valid.
int filter = (alphaChunkHeader >> 2) & 0x03;
if (filter is < (int)WebpAlphaFilterType.None or > (int)WebpAlphaFilterType.Gradient)
{
WebpThrowHelper.ThrowImageFormatException($"unexpected alpha filter method {filter} found");
}
this.Alpha = memoryAllocator.Allocate<byte>(totalPixels);
this.AlphaFilterType = (WebpAlphaFilterType)filter;
this.Vp8LDec = new Vp8LDecoder(width, height, memoryAllocator);
if (this.Compressed)
{
var bitReader = new Vp8LBitReader(data);
this.LosslessDecoder = new WebpLosslessDecoder(bitReader, memoryAllocator, configuration);
this.LosslessDecoder.DecodeImageStream(this.Vp8LDec, width, height, true);
this.Use8BDecode = this.Vp8LDec.Transforms.Count > 0 && Is8BOptimizable(this.Vp8LDec.Metadata);
}
}
/// <summary>
/// Gets the width of the image.
/// </summary>
public int Width { get; }
/// <summary>
/// Gets the height of the image.
/// </summary>
public int Height { get; }
/// <summary>
/// Gets the used filter type.
/// </summary>
public WebpAlphaFilterType AlphaFilterType { get; }
/// <summary>
/// Gets or sets the last decoded row.
/// </summary>
public int LastRow { get; set; }
/// <summary>
/// Gets or sets the row before the last decoded row.
/// </summary>
public int PrevRow { get; set; }
/// <summary>
/// Gets information for decoding Vp8L compressed alpha data.
/// </summary>
public Vp8LDecoder Vp8LDec { get; }
/// <summary>
/// Gets the decoded alpha data.
/// </summary>
public IMemoryOwner<byte> Alpha { get; }
/// <summary>
/// Gets a value indicating whether the alpha channel uses compression.
/// </summary>
private bool Compressed { get; }
/// <summary>
/// Gets the (maybe compressed) alpha data.
/// </summary>
private IMemoryOwner<byte> Data { get; }
/// <summary>
/// Gets the Vp8L decoder which is used to de compress the alpha channel, if needed.
/// </summary>
private WebpLosslessDecoder LosslessDecoder { get; }
/// <summary>
/// Gets a value indicating whether the decoding needs 1 byte per pixel for decoding.
/// Although Alpha Channel requires only 1 byte per pixel, sometimes Vp8LDecoder may need to allocate
/// 4 bytes per pixel internally during decode.
/// </summary>
public bool Use8BDecode { get; }
/// <summary>
/// Decodes and filters the maybe compressed alpha data.
/// </summary>
public void Decode()
{
if (!this.Compressed)
{
Span<byte> dataSpan = this.Data.Memory.Span;
int pixelCount = this.Width * this.Height;
if (dataSpan.Length < pixelCount)
{
WebpThrowHelper.ThrowImageFormatException("not enough data in the ALPH chunk");
}
Span<byte> alphaSpan = this.Alpha.Memory.Span;
if (this.AlphaFilterType == WebpAlphaFilterType.None)
{
dataSpan.Slice(0, pixelCount).CopyTo(alphaSpan);
return;
}
Span<byte> deltas = dataSpan;
Span<byte> dst = alphaSpan;
Span<byte> prev = default;
for (int y = 0; y < this.Height; y++)
{
switch (this.AlphaFilterType)
{
case WebpAlphaFilterType.Horizontal:
HorizontalUnfilter(prev, deltas, dst, this.Width);
break;
case WebpAlphaFilterType.Vertical:
VerticalUnfilter(prev, deltas, dst, this.Width);
break;
case WebpAlphaFilterType.Gradient:
GradientUnfilter(prev, deltas, dst, this.Width);
break;
}
prev = dst;
deltas = deltas.Slice(this.Width);
dst = dst.Slice(this.Width);
}
}
else
{
if (this.Use8BDecode)
{
this.LosslessDecoder.DecodeAlphaData(this);
}
else
{
this.LosslessDecoder.DecodeImageData(this.Vp8LDec, this.Vp8LDec.Pixels.Memory.Span);
this.ExtractAlphaRows(this.Vp8LDec);
}
}
}
/// <summary>
/// Applies filtering to a set of rows.
/// </summary>
/// <param name="firstRow">The first row index to start filtering.</param>
/// <param name="lastRow">The last row index for filtering.</param>
/// <param name="dst">The destination to store the filtered data.</param>
/// <param name="stride">The stride to use.</param>
public void AlphaApplyFilter(int firstRow, int lastRow, Span<byte> dst, int stride)
{
if (this.AlphaFilterType == WebpAlphaFilterType.None)
{
return;
}
Span<byte> alphaSpan = this.Alpha.Memory.Span;
Span<byte> prev = this.PrevRow == 0 ? null : alphaSpan.Slice(this.Width * this.PrevRow);
for (int y = firstRow; y < lastRow; y++)
{
switch (this.AlphaFilterType)
{
case WebpAlphaFilterType.Horizontal:
HorizontalUnfilter(prev, dst, dst, this.Width);
break;
case WebpAlphaFilterType.Vertical:
VerticalUnfilter(prev, dst, dst, this.Width);
break;
case WebpAlphaFilterType.Gradient:
GradientUnfilter(prev, dst, dst, this.Width);
break;
}
prev = dst;
dst = dst.Slice(stride);
}
this.PrevRow = lastRow - 1;
}
public void ExtractPalettedAlphaRows(int lastRow)
{
// For vertical and gradient filtering, we need to decode the part above the
// cropTop row, in order to have the correct spatial predictors.
int topRow = this.AlphaFilterType is WebpAlphaFilterType.None or WebpAlphaFilterType.Horizontal ? 0 : this.LastRow;
int firstRow = this.LastRow < topRow ? topRow : this.LastRow;
if (lastRow > firstRow)
{
// Special method for paletted alpha data.
Span<byte> output = this.Alpha.Memory.Span;
Span<uint> pixelData = this.Vp8LDec.Pixels.Memory.Span;
Span<byte> pixelDataAsBytes = MemoryMarshal.Cast<uint, byte>(pixelData);
Span<byte> dst = output.Slice(this.Width * firstRow);
Span<byte> input = pixelDataAsBytes.Slice(this.Vp8LDec.Width * firstRow);
if (this.Vp8LDec.Transforms.Count == 0 || this.Vp8LDec.Transforms[0].TransformType != Vp8LTransformType.ColorIndexingTransform)
{
WebpThrowHelper.ThrowImageFormatException("error while decoding alpha channel, expected color index transform data is missing");
}
Vp8LTransform transform = this.Vp8LDec.Transforms[0];
ColorIndexInverseTransformAlpha(transform, firstRow, lastRow, input, dst);
this.AlphaApplyFilter(firstRow, lastRow, dst, this.Width);
}
this.LastRow = lastRow;
}
/// <summary>
/// Once the image-stream is decoded into ARGB color values, the transparency information will be extracted from the green channel of the ARGB quadruplet.
/// </summary>
/// <param name="dec">The VP8L decoder.</param>
private void ExtractAlphaRows(Vp8LDecoder dec)
{
int numRowsToProcess = dec.Height;
int width = dec.Width;
Span<uint> pixels = dec.Pixels.Memory.Span;
Span<uint> input = pixels;
Span<byte> output = this.Alpha.Memory.Span;
// Extract alpha (which is stored in the green plane).
int pixelCount = width * numRowsToProcess;
WebpLosslessDecoder.ApplyInverseTransforms(dec, input, this.memoryAllocator);
ExtractGreen(input, output, pixelCount);
this.AlphaApplyFilter(0, numRowsToProcess, output, width);
}
private static void ColorIndexInverseTransformAlpha(
Vp8LTransform transform,
int yStart,
int yEnd,
Span<byte> src,
Span<byte> dst)
{
int bitsPerPixel = 8 >> transform.Bits;
int width = transform.XSize;
Span<uint> colorMap = transform.Data.Memory.Span;
if (bitsPerPixel < 8)
{
int srcOffset = 0;
int dstOffset = 0;
int pixelsPerByte = 1 << transform.Bits;
int countMask = pixelsPerByte - 1;
int bitMask = (1 << bitsPerPixel) - 1;
for (int y = yStart; y < yEnd; y++)
{
int packedPixels = 0;
for (int x = 0; x < width; x++)
{
if ((x & countMask) == 0)
{
packedPixels = src[srcOffset];
srcOffset++;
}
dst[dstOffset] = GetAlphaValue((int)colorMap[packedPixels & bitMask]);
dstOffset++;
packedPixels >>= bitsPerPixel;
}
}
}
else
{
MapAlpha(src, colorMap, dst, yStart, yEnd, width);
}
}
private static void HorizontalUnfilter(Span<byte> prev, Span<byte> input, Span<byte> dst, int width)
{
byte pred = (byte)(prev == null ? 0 : prev[0]);
for (int i = 0; i < width; i++)
{
byte val = (byte)(pred + input[i]);
pred = val;
dst[i] = val;
}
}
private static void VerticalUnfilter(Span<byte> prev, Span<byte> input, Span<byte> dst, int width)
{
if (prev == null)
{
HorizontalUnfilter(null, input, dst, width);
}
else
{
for (int i = 0; i < width; i++)
{
dst[i] = (byte)(prev[i] + input[i]);
}
}
}
private static void GradientUnfilter(Span<byte> prev, Span<byte> input, Span<byte> dst, int width)
{
if (prev == null)
{
HorizontalUnfilter(null, input, dst, width);
}
else
{
byte prev0 = prev[0];
byte topLeft = prev0;
byte left = prev0;
for (int i = 0; i < width; i++)
{
byte top = prev[i];
left = (byte)(input[i] + GradientPredictor(left, top, topLeft));
topLeft = top;
dst[i] = left;
}
}
}
/// <summary>
/// Row-processing for the special case when alpha data contains only one
/// transform (color indexing), and trivial non-green literals.
/// </summary>
/// <param name="hdr">The VP8L meta data.</param>
/// <returns>True, if alpha channel needs one byte per pixel, otherwise 4.</returns>
private static bool Is8BOptimizable(Vp8LMetadata hdr)
{
if (hdr.ColorCacheSize > 0)
{
return false;
}
for (int i = 0; i < hdr.NumHTreeGroups; i++)
{
List<HuffmanCode[]> htrees = hdr.HTreeGroups[i].HTrees;
if (htrees[HuffIndex.Red][0].BitsUsed > 0)
{
return false;
}
if (htrees[HuffIndex.Blue][0].BitsUsed > 0)
{
return false;
}
if (htrees[HuffIndex.Alpha][0].BitsUsed > 0)
{
return false;
}
}
return true;
}
private static void MapAlpha(Span<byte> src, Span<uint> colorMap, Span<byte> dst, int yStart, int yEnd, int width)
{
int offset = 0;
for (int y = yStart; y < yEnd; y++)
{
for (int x = 0; x < width; x++)
{
dst[offset] = GetAlphaValue((int)colorMap[src[offset]]);
offset++;
}
}
}
[MethodImpl(InliningOptions.ShortMethod)]
private static byte GetAlphaValue(int val) => (byte)((val >> 8) & 0xff);
[MethodImpl(InliningOptions.ShortMethod)]
private static int GradientPredictor(byte a, byte b, byte c)
{
int g = a + b - c;
return (g & ~0xff) == 0 ? g : g < 0 ? 0 : 255; // clip to 8bit.
}
[MethodImpl(InliningOptions.ShortMethod)]
private static void ExtractGreen(Span<uint> argb, Span<byte> alpha, int size)
{
for (int i = 0; i < size; i++)
{
alpha[i] = (byte)(argb[i] >> 8);
}
}
/// <inheritdoc/>
public void Dispose()
{
this.Vp8LDec?.Dispose();
this.Data.Dispose();
this.Alpha?.Dispose();
}
}
}

58
src/ImageSharp/Formats/Webp/BitReader/BitReaderBase.cs

@ -0,0 +1,58 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Buffers;
using System.IO;
using SixLabors.ImageSharp.Memory;
namespace SixLabors.ImageSharp.Formats.Webp.BitReader
{
/// <summary>
/// Base class for VP8 and VP8L bitreader.
/// </summary>
internal abstract class BitReaderBase : IDisposable
{
private bool isDisposed;
/// <summary>
/// Gets or sets the raw encoded image data.
/// </summary>
public IMemoryOwner<byte> Data { get; set; }
/// <summary>
/// Copies the raw encoded image data from the stream into a byte array.
/// </summary>
/// <param name="input">The input stream.</param>
/// <param name="bytesToRead">Number of bytes to read as indicated from the chunk size.</param>
/// <param name="memoryAllocator">Used for allocating memory during reading data from the stream.</param>
protected void ReadImageDataFromStream(Stream input, int bytesToRead, MemoryAllocator memoryAllocator)
{
this.Data = memoryAllocator.Allocate<byte>(bytesToRead);
Span<byte> dataSpan = this.Data.Memory.Span;
input.Read(dataSpan.Slice(0, bytesToRead), 0, bytesToRead);
}
protected virtual void Dispose(bool disposing)
{
if (this.isDisposed)
{
return;
}
if (disposing)
{
this.Data?.Dispose();
}
this.isDisposed = true;
}
/// <inheritdoc/>
public void Dispose()
{
this.Dispose(disposing: true);
GC.SuppressFinalize(this);
}
}
}

229
src/ImageSharp/Formats/Webp/BitReader/Vp8BitReader.cs

@ -0,0 +1,229 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Buffers;
using System.Buffers.Binary;
using System.IO;
using System.Runtime.CompilerServices;
using SixLabors.ImageSharp.Memory;
namespace SixLabors.ImageSharp.Formats.Webp.BitReader
{
/// <summary>
/// A bit reader for VP8 streams.
/// </summary>
internal class Vp8BitReader : BitReaderBase
{
private const int BitsCount = 56;
/// <summary>
/// Current value.
/// </summary>
private ulong value;
/// <summary>
/// Current range minus 1. In [127, 254] interval.
/// </summary>
private uint range;
/// <summary>
/// Number of valid bits left.
/// </summary>
private int bits;
/// <summary>
/// Max packed-read position of the buffer.
/// </summary>
private uint bufferMax;
private uint bufferEnd;
/// <summary>
/// True if input is exhausted.
/// </summary>
private bool eof;
/// <summary>
/// Byte position in buffer.
/// </summary>
private long pos;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8BitReader"/> class.
/// </summary>
/// <param name="inputStream">The input stream to read from.</param>
/// <param name="imageDataSize">The raw image data size in bytes.</param>
/// <param name="memoryAllocator">Used for allocating memory during reading data from the stream.</param>
/// <param name="partitionLength">The partition length.</param>
/// <param name="startPos">Start index in the data array. Defaults to 0.</param>
public Vp8BitReader(Stream inputStream, uint imageDataSize, MemoryAllocator memoryAllocator, uint partitionLength, int startPos = 0)
{
Guard.MustBeLessThan(imageDataSize, int.MaxValue, nameof(imageDataSize));
this.ImageDataSize = imageDataSize;
this.PartitionLength = partitionLength;
this.ReadImageDataFromStream(inputStream, (int)imageDataSize, memoryAllocator);
this.InitBitreader(partitionLength, startPos);
}
/// <summary>
/// Initializes a new instance of the <see cref="Vp8BitReader"/> class.
/// </summary>
/// <param name="imageData">The raw encoded image data.</param>
/// <param name="partitionLength">The partition length.</param>
/// <param name="startPos">Start index in the data array. Defaults to 0.</param>
public Vp8BitReader(IMemoryOwner<byte> imageData, uint partitionLength, int startPos = 0)
{
this.Data = imageData;
this.ImageDataSize = (uint)imageData.Memory.Length;
this.PartitionLength = partitionLength;
this.InitBitreader(partitionLength, startPos);
}
public int Pos => (int)this.pos;
public uint ImageDataSize { get; }
public uint PartitionLength { get; }
public uint Remaining { get; set; }
[MethodImpl(InliningOptions.ShortMethod)]
public int GetBit(int prob)
{
uint range = this.range;
if (this.bits < 0)
{
this.LoadNewBytes();
}
int pos = this.bits;
uint split = (uint)((range * prob) >> 8);
ulong value = this.value >> pos;
bool bit = value > split;
if (bit)
{
range -= split;
this.value -= (ulong)(split + 1) << pos;
}
else
{
range = split + 1;
}
int shift = 7 ^ Numerics.Log2(range);
range <<= shift;
this.bits -= shift;
this.range = range - 1;
return bit ? 1 : 0;
}
// Simplified version of VP8GetBit() for prob=0x80 (note shift is always 1 here)
public int GetSigned(int v)
{
if (this.bits < 0)
{
this.LoadNewBytes();
}
int pos = this.bits;
uint split = this.range >> 1;
ulong value = this.value >> pos;
ulong mask = (split - value) >> 31; // -1 or 0
this.bits -= 1;
this.range = (this.range + (uint)mask) | 1;
this.value -= ((split + 1) & mask) << pos;
return (v ^ (int)mask) - (int)mask;
}
[MethodImpl(InliningOptions.ShortMethod)]
public bool ReadBool() => this.ReadValue(1) is 1;
public uint ReadValue(int nBits)
{
Guard.MustBeGreaterThan(nBits, 0, nameof(nBits));
Guard.MustBeLessThanOrEqualTo(nBits, 32, nameof(nBits));
uint v = 0;
while (nBits-- > 0)
{
v |= (uint)this.GetBit(0x80) << nBits;
}
return v;
}
public int ReadSignedValue(int nBits)
{
Guard.MustBeGreaterThan(nBits, 0, nameof(nBits));
Guard.MustBeLessThanOrEqualTo(nBits, 32, nameof(nBits));
int value = (int)this.ReadValue(nBits);
return this.ReadValue(1) != 0 ? -value : value;
}
private void InitBitreader(uint size, int pos = 0)
{
long posPlusSize = pos + size;
this.range = 255 - 1;
this.value = 0;
this.bits = -8; // to load the very first 8 bits.
this.eof = false;
this.pos = pos;
this.bufferEnd = (uint)posPlusSize;
this.bufferMax = (uint)(size > 8 ? posPlusSize - 8 + 1 : pos);
this.LoadNewBytes();
}
[MethodImpl(InliningOptions.ColdPath)]
private void LoadNewBytes()
{
if (this.pos < this.bufferMax)
{
ulong inBits = BinaryPrimitives.ReadUInt64LittleEndian(this.Data.Memory.Span.Slice((int)this.pos, 8));
this.pos += BitsCount >> 3;
ulong bits = this.ByteSwap64(inBits);
bits >>= 64 - BitsCount;
this.value = bits | (this.value << BitsCount);
this.bits += BitsCount;
}
else
{
this.LoadFinalBytes();
}
}
private void LoadFinalBytes()
{
// Only read 8bits at a time.
if (this.pos < this.bufferEnd)
{
this.bits += 8;
this.value = this.Data.Memory.Span[(int)this.pos++] | (this.value << 8);
}
else if (!this.eof)
{
this.value <<= 8;
this.bits += 8;
this.eof = true;
}
else
{
this.bits = 0; // This is to avoid undefined behaviour with shifts.
}
}
[MethodImpl(InliningOptions.ShortMethod)]
private ulong ByteSwap64(ulong x)
{
x = ((x & 0xffffffff00000000ul) >> 32) | ((x & 0x00000000fffffffful) << 32);
x = ((x & 0xffff0000ffff0000ul) >> 16) | ((x & 0x0000ffff0000fffful) << 16);
x = ((x & 0xff00ff00ff00ff00ul) >> 8) | ((x & 0x00ff00ff00ff00fful) << 8);
return x;
}
}
}

215
src/ImageSharp/Formats/Webp/BitReader/Vp8LBitReader.cs

@ -0,0 +1,215 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Buffers;
using System.IO;
using System.Runtime.CompilerServices;
using SixLabors.ImageSharp.Memory;
namespace SixLabors.ImageSharp.Formats.Webp.BitReader
{
/// <summary>
/// A bit reader for reading lossless webp streams.
/// </summary>
internal class Vp8LBitReader : BitReaderBase
{
/// <summary>
/// Maximum number of bits (inclusive) the bit-reader can handle.
/// </summary>
private const int Vp8LMaxNumBitRead = 24;
/// <summary>
/// Number of bits prefetched.
/// </summary>
private const int Lbits = 64;
/// <summary>
/// Minimum number of bytes ready after VP8LFillBitWindow.
/// </summary>
private const int Wbits = 32;
private readonly uint[] bitMask =
{
0,
0x000001, 0x000003, 0x000007, 0x00000f,
0x00001f, 0x00003f, 0x00007f, 0x0000ff,
0x0001ff, 0x0003ff, 0x0007ff, 0x000fff,
0x001fff, 0x003fff, 0x007fff, 0x00ffff,
0x01ffff, 0x03ffff, 0x07ffff, 0x0fffff,
0x1fffff, 0x3fffff, 0x7fffff, 0xffffff
};
/// <summary>
/// Pre-fetched bits.
/// </summary>
private ulong value;
/// <summary>
/// Buffer length.
/// </summary>
private readonly long len;
/// <summary>
/// Byte position in buffer.
/// </summary>
private long pos;
/// <summary>
/// Current bit-reading position in value.
/// </summary>
private int bitPos;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8LBitReader"/> class.
/// </summary>
/// <param name="data">Lossless compressed image data.</param>
public Vp8LBitReader(IMemoryOwner<byte> data)
{
this.Data = data;
this.len = data.Memory.Length;
this.value = 0;
this.bitPos = 0;
this.Eos = false;
ulong currentValue = 0;
System.Span<byte> dataSpan = this.Data.Memory.Span;
for (int i = 0; i < 8; i++)
{
currentValue |= (ulong)dataSpan[i] << (8 * i);
}
this.value = currentValue;
this.pos = 8;
}
/// <summary>
/// Initializes a new instance of the <see cref="Vp8LBitReader"/> class.
/// </summary>
/// <param name="inputStream">The input stream to read from.</param>
/// <param name="imageDataSize">The raw image data size in bytes.</param>
/// <param name="memoryAllocator">Used for allocating memory during reading data from the stream.</param>
public Vp8LBitReader(Stream inputStream, uint imageDataSize, MemoryAllocator memoryAllocator)
{
long length = imageDataSize;
this.ReadImageDataFromStream(inputStream, (int)imageDataSize, memoryAllocator);
this.len = length;
this.value = 0;
this.bitPos = 0;
this.Eos = false;
if (length > sizeof(long))
{
length = sizeof(long);
}
ulong currentValue = 0;
System.Span<byte> dataSpan = this.Data.Memory.Span;
for (int i = 0; i < length; i++)
{
currentValue |= (ulong)dataSpan[i] << (8 * i);
}
this.value = currentValue;
this.pos = length;
}
/// <summary>
/// Gets or sets a value indicating whether a bit was read past the end of buffer.
/// </summary>
public bool Eos { get; set; }
/// <summary>
/// Reads a unsigned short value from the buffer. The bits of each byte are read in least-significant-bit-first order.
/// </summary>
/// <param name="nBits">The number of bits to read (should not exceed 16).</param>
/// <returns>A ushort value.</returns>
public uint ReadValue(int nBits)
{
Guard.MustBeGreaterThan(nBits, 0, nameof(nBits));
if (!this.Eos && nBits <= Vp8LMaxNumBitRead)
{
ulong val = this.PrefetchBits() & this.bitMask[nBits];
this.bitPos += nBits;
this.ShiftBytes();
return (uint)val;
}
this.SetEndOfStream();
return 0;
}
/// <summary>
/// Reads a single bit from the stream.
/// </summary>
/// <returns>True if the bit read was 1, false otherwise.</returns>
[MethodImpl(InliningOptions.ShortMethod)]
public bool ReadBit()
{
uint bit = this.ReadValue(1);
return bit != 0;
}
/// <summary>
/// For jumping over a number of bits in the bit stream when accessed with PrefetchBits and FillBitWindow.
/// </summary>
/// <param name="numberOfBits">The number of bits to advance the position.</param>
[MethodImpl(InliningOptions.ShortMethod)]
public void AdvanceBitPosition(int numberOfBits) => this.bitPos += numberOfBits;
/// <summary>
/// Return the pre-fetched bits, so they can be looked up.
/// </summary>
/// <returns>The pre-fetched bits.</returns>
[MethodImpl(InliningOptions.ShortMethod)]
public ulong PrefetchBits() => this.value >> (this.bitPos & (Lbits - 1));
/// <summary>
/// Advances the read buffer by 4 bytes to make room for reading next 32 bits.
/// </summary>
public void FillBitWindow()
{
if (this.bitPos >= Wbits)
{
this.DoFillBitWindow();
}
}
/// <summary>
/// Returns true if there was an attempt at reading bit past the end of the buffer.
/// </summary>
/// <returns>True, if end of buffer was reached.</returns>
public bool IsEndOfStream() => this.Eos || ((this.pos == this.len) && (this.bitPos > Lbits));
[MethodImpl(InliningOptions.ShortMethod)]
private void DoFillBitWindow() => this.ShiftBytes();
/// <summary>
/// If not at EOS, reload up to Vp8LLbits byte-by-byte.
/// </summary>
private void ShiftBytes()
{
System.Span<byte> dataSpan = this.Data.Memory.Span;
while (this.bitPos >= 8 && this.pos < this.len)
{
this.value >>= 8;
this.value |= (ulong)dataSpan[(int)this.pos] << (Lbits - 8);
++this.pos;
this.bitPos -= 8;
}
if (this.IsEndOfStream())
{
this.SetEndOfStream();
}
}
private void SetEndOfStream()
{
this.Eos = true;
this.bitPos = 0; // To avoid undefined behaviour with shifts.
}
}
}

148
src/ImageSharp/Formats/Webp/BitWriter/BitWriterBase.cs

@ -0,0 +1,148 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Buffers.Binary;
using System.IO;
using SixLabors.ImageSharp.Metadata.Profiles.Exif;
namespace SixLabors.ImageSharp.Formats.Webp.BitWriter
{
internal abstract class BitWriterBase
{
/// <summary>
/// Buffer to write to.
/// </summary>
private byte[] buffer;
/// <summary>
/// Initializes a new instance of the <see cref="BitWriterBase"/> class.
/// </summary>
/// <param name="expectedSize">The expected size in bytes.</param>
protected BitWriterBase(int expectedSize) => this.buffer = new byte[expectedSize];
/// <summary>
/// Initializes a new instance of the <see cref="BitWriterBase"/> class.
/// Used internally for cloning.
/// </summary>
private protected BitWriterBase(byte[] buffer) => this.buffer = buffer;
public byte[] Buffer => this.buffer;
/// <summary>
/// Writes the encoded bytes of the image to the stream. Call Finish() before this.
/// </summary>
/// <param name="stream">The stream to write to.</param>
public void WriteToStream(Stream stream) => stream.Write(this.Buffer.AsSpan(0, this.NumBytes()));
/// <summary>
/// Resizes the buffer to write to.
/// </summary>
/// <param name="extraSize">The extra size in bytes needed.</param>
public abstract void BitWriterResize(int extraSize);
/// <summary>
/// Returns the number of bytes of the encoded image data.
/// </summary>
/// <returns>The number of bytes of the image data.</returns>
public abstract int NumBytes();
/// <summary>
/// Flush leftover bits.
/// </summary>
public abstract void Finish();
/// <summary>
/// Writes the encoded image to the stream.
/// </summary>
/// <param name="stream">The stream to write to.</param>
/// <param name="exifProfile">The exif profile.</param>
/// <param name="width">The width of the image.</param>
/// <param name="height">The height of the image.</param>
public abstract void WriteEncodedImageToStream(Stream stream, ExifProfile exifProfile, uint width, uint height);
protected void ResizeBuffer(int maxBytes, int sizeRequired)
{
int newSize = (3 * maxBytes) >> 1;
if (newSize < sizeRequired)
{
newSize = sizeRequired;
}
// Make new size multiple of 1k.
newSize = ((newSize >> 10) + 1) << 10;
Array.Resize(ref this.buffer, newSize);
}
/// <summary>
/// Writes the RIFF header to the stream.
/// </summary>
/// <param name="stream">The stream to write to.</param>
/// <param name="riffSize">The block length.</param>
protected void WriteRiffHeader(Stream stream, uint riffSize)
{
Span<byte> buf = stackalloc byte[4];
stream.Write(WebpConstants.RiffFourCc);
BinaryPrimitives.WriteUInt32LittleEndian(buf, riffSize);
stream.Write(buf);
stream.Write(WebpConstants.WebpHeader);
}
/// <summary>
/// Writes the Exif profile to the stream.
/// </summary>
/// <param name="stream">The stream to write to.</param>
/// <param name="exifBytes">The exif profile bytes.</param>
protected void WriteExifProfile(Stream stream, byte[] exifBytes)
{
DebugGuard.NotNull(exifBytes, nameof(exifBytes));
Span<byte> buf = stackalloc byte[4];
BinaryPrimitives.WriteUInt32BigEndian(buf, (uint)WebpChunkType.Exif);
stream.Write(buf);
BinaryPrimitives.WriteUInt32LittleEndian(buf, (uint)exifBytes.Length);
stream.Write(buf);
stream.Write(exifBytes);
}
/// <summary>
/// Writes a VP8X header to the stream.
/// </summary>
/// <param name="stream">The stream to write to.</param>
/// <param name="exifProfile">A exif profile or null, if it does not exist.</param>
/// <param name="width">The width of the image.</param>
/// <param name="height">The height of the image.</param>
protected void WriteVp8XHeader(Stream stream, ExifProfile exifProfile, uint width, uint height)
{
int maxDimension = 16777215;
if (width > maxDimension || height > maxDimension)
{
WebpThrowHelper.ThrowInvalidImageDimensions($"Image width or height exceeds maximum allowed dimension of {maxDimension}");
}
// The spec states that the product of Canvas Width and Canvas Height MUST be at most 2^32 - 1.
if (width * height > 4294967295ul)
{
WebpThrowHelper.ThrowInvalidImageDimensions("The product of image width and height MUST be at most 2^32 - 1");
}
uint flags = 0;
if (exifProfile != null)
{
// Set exif bit.
flags |= 8;
}
Span<byte> buf = stackalloc byte[4];
stream.Write(WebpConstants.Vp8XMagicBytes);
BinaryPrimitives.WriteUInt32LittleEndian(buf, WebpConstants.Vp8XChunkSize);
stream.Write(buf);
BinaryPrimitives.WriteUInt32LittleEndian(buf, flags);
stream.Write(buf);
BinaryPrimitives.WriteUInt32LittleEndian(buf, width - 1);
stream.Write(buf.Slice(0, 3));
BinaryPrimitives.WriteUInt32LittleEndian(buf, height - 1);
stream.Write(buf.Slice(0, 3));
}
}
}

674
src/ImageSharp/Formats/Webp/BitWriter/Vp8BitWriter.cs

@ -0,0 +1,674 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Buffers.Binary;
using System.IO;
using SixLabors.ImageSharp.Formats.Webp.Lossy;
using SixLabors.ImageSharp.Metadata.Profiles.Exif;
namespace SixLabors.ImageSharp.Formats.Webp.BitWriter
{
/// <summary>
/// A bit writer for writing lossy webp streams.
/// </summary>
internal class Vp8BitWriter : BitWriterBase
{
#pragma warning disable SA1310 // Field names should not contain underscore
private const int DC_PRED = 0;
private const int TM_PRED = 1;
private const int V_PRED = 2;
private const int H_PRED = 3;
// 4x4 modes
private const int B_DC_PRED = 0;
private const int B_TM_PRED = 1;
private const int B_VE_PRED = 2;
private const int B_HE_PRED = 3;
private const int B_RD_PRED = 4;
private const int B_VR_PRED = 5;
private const int B_LD_PRED = 6;
private const int B_VL_PRED = 7;
private const int B_HD_PRED = 8;
private const int B_HU_PRED = 9;
#pragma warning restore SA1310 // Field names should not contain underscore
private readonly Vp8Encoder enc;
private int range;
private int value;
/// <summary>
/// Number of outstanding bits.
/// </summary>
private int run;
/// <summary>
/// Number of pending bits.
/// </summary>
private int nbBits;
private uint pos;
private readonly int maxPos;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8BitWriter"/> class.
/// </summary>
/// <param name="expectedSize">The expected size in bytes.</param>
public Vp8BitWriter(int expectedSize)
: base(expectedSize)
{
this.range = 255 - 1;
this.value = 0;
this.run = 0;
this.nbBits = -8;
this.pos = 0;
this.maxPos = 0;
}
/// <summary>
/// Initializes a new instance of the <see cref="Vp8BitWriter"/> class.
/// </summary>
/// <param name="expectedSize">The expected size in bytes.</param>
/// <param name="enc">The Vp8Encoder.</param>
public Vp8BitWriter(int expectedSize, Vp8Encoder enc)
: this(expectedSize) => this.enc = enc;
/// <inheritdoc/>
public override int NumBytes() => (int)this.pos;
public int PutCoeffs(int ctx, Vp8Residual residual)
{
int n = residual.First;
Vp8ProbaArray p = residual.Prob[n].Probabilities[ctx];
if (!this.PutBit(residual.Last >= 0, p.Probabilities[0]))
{
return 0;
}
while (n < 16)
{
int c = residual.Coeffs[n++];
bool sign = c < 0;
int v = sign ? -c : c;
if (!this.PutBit(v != 0, p.Probabilities[1]))
{
p = residual.Prob[WebpConstants.Vp8EncBands[n]].Probabilities[0];
continue;
}
if (!this.PutBit(v > 1, p.Probabilities[2]))
{
p = residual.Prob[WebpConstants.Vp8EncBands[n]].Probabilities[1];
}
else
{
if (!this.PutBit(v > 4, p.Probabilities[3]))
{
if (this.PutBit(v != 2, p.Probabilities[4]))
{
this.PutBit(v == 4, p.Probabilities[5]);
}
}
else if (!this.PutBit(v > 10, p.Probabilities[6]))
{
if (!this.PutBit(v > 6, p.Probabilities[7]))
{
this.PutBit(v == 6, 159);
}
else
{
this.PutBit(v >= 9, 165);
this.PutBit(!((v & 1) != 0), 145);
}
}
else
{
int mask;
byte[] tab;
if (v < 3 + (8 << 1))
{
// VP8Cat3 (3b)
this.PutBit(0, p.Probabilities[8]);
this.PutBit(0, p.Probabilities[9]);
v -= 3 + (8 << 0);
mask = 1 << 2;
tab = WebpConstants.Cat3;
}
else if (v < 3 + (8 << 2))
{
// VP8Cat4 (4b)
this.PutBit(0, p.Probabilities[8]);
this.PutBit(1, p.Probabilities[9]);
v -= 3 + (8 << 1);
mask = 1 << 3;
tab = WebpConstants.Cat4;
}
else if (v < 3 + (8 << 3))
{
// VP8Cat5 (5b)
this.PutBit(1, p.Probabilities[8]);
this.PutBit(0, p.Probabilities[10]);
v -= 3 + (8 << 2);
mask = 1 << 4;
tab = WebpConstants.Cat5;
}
else
{
// VP8Cat6 (11b)
this.PutBit(1, p.Probabilities[8]);
this.PutBit(1, p.Probabilities[10]);
v -= 3 + (8 << 3);
mask = 1 << 10;
tab = WebpConstants.Cat6;
}
int tabIdx = 0;
while (mask != 0)
{
this.PutBit(v & mask, tab[tabIdx++]);
mask >>= 1;
}
}
p = residual.Prob[WebpConstants.Vp8EncBands[n]].Probabilities[2];
}
this.PutBitUniform(sign ? 1 : 0);
if (n == 16 || !this.PutBit(n <= residual.Last, p.Probabilities[0]))
{
return 1; // EOB
}
}
return 1;
}
/// <summary>
/// Resizes the buffer to write to.
/// </summary>
/// <param name="extraSize">The extra size in bytes needed.</param>
public override void BitWriterResize(int extraSize)
{
long neededSize = this.pos + extraSize;
if (neededSize <= this.maxPos)
{
return;
}
this.ResizeBuffer(this.maxPos, (int)neededSize);
}
/// <inheritdoc/>
public override void Finish()
{
this.PutBits(0, 9 - this.nbBits);
this.nbBits = 0; // pad with zeroes.
this.Flush();
}
public void PutSegment(int s, Span<byte> p)
{
if (this.PutBit(s >= 2, p[0]))
{
p = p.Slice(1);
}
this.PutBit(s & 1, p[1]);
}
public void PutI16Mode(int mode)
{
if (this.PutBit(mode is TM_PRED or H_PRED, 156))
{
this.PutBit(mode == TM_PRED, 128); // TM or HE
}
else
{
this.PutBit(mode == V_PRED, 163); // VE or DC
}
}
public int PutI4Mode(int mode, Span<byte> prob)
{
if (this.PutBit(mode != B_DC_PRED, prob[0]))
{
if (this.PutBit(mode != B_TM_PRED, prob[1]))
{
if (this.PutBit(mode != B_VE_PRED, prob[2]))
{
if (!this.PutBit(mode >= B_LD_PRED, prob[3]))
{
if (this.PutBit(mode != B_HE_PRED, prob[4]))
{
this.PutBit(mode != B_RD_PRED, prob[5]);
}
}
else
{
if (this.PutBit(mode != B_LD_PRED, prob[6]))
{
if (this.PutBit(mode != B_VL_PRED, prob[7]))
{
this.PutBit(mode != B_HD_PRED, prob[8]);
}
}
}
}
}
}
return mode;
}
public void PutUvMode(int uvMode)
{
// DC_PRED
if (this.PutBit(uvMode != DC_PRED, 142))
{
// V_PRED
if (this.PutBit(uvMode != V_PRED, 114))
{
// H_PRED
this.PutBit(uvMode != H_PRED, 183);
}
}
}
private void PutBits(uint value, int nbBits)
{
for (uint mask = 1u << (nbBits - 1); mask != 0; mask >>= 1)
{
this.PutBitUniform((int)(value & mask));
}
}
private bool PutBit(bool bit, int prob) => this.PutBit(bit ? 1 : 0, prob);
private bool PutBit(int bit, int prob)
{
int split = (this.range * prob) >> 8;
if (bit != 0)
{
this.value += split + 1;
this.range -= split + 1;
}
else
{
this.range = split;
}
if (this.range < 127)
{
// emit 'shift' bits out and renormalize.
int shift = WebpLookupTables.Norm[this.range];
this.range = WebpLookupTables.NewRange[this.range];
this.value <<= shift;
this.nbBits += shift;
if (this.nbBits > 0)
{
this.Flush();
}
}
return bit != 0;
}
private int PutBitUniform(int bit)
{
int split = this.range >> 1;
if (bit != 0)
{
this.value += split + 1;
this.range -= split + 1;
}
else
{
this.range = split;
}
if (this.range < 127)
{
this.range = WebpLookupTables.NewRange[this.range];
this.value <<= 1;
this.nbBits += 1;
if (this.nbBits > 0)
{
this.Flush();
}
}
return bit;
}
private void PutSignedBits(int value, int nbBits)
{
if (this.PutBitUniform(value != 0 ? 1 : 0) == 0)
{
return;
}
if (value < 0)
{
int valueToWrite = (-value << 1) | 1;
this.PutBits((uint)valueToWrite, nbBits + 1);
}
else
{
this.PutBits((uint)(value << 1), nbBits + 1);
}
}
private void Flush()
{
int s = 8 + this.nbBits;
int bits = this.value >> s;
this.value -= bits << s;
this.nbBits -= 8;
if ((bits & 0xff) != 0xff)
{
uint pos = this.pos;
this.BitWriterResize(this.run + 1);
if ((bits & 0x100) != 0)
{
// overflow -> propagate carry over pending 0xff's
if (pos > 0)
{
this.Buffer[pos - 1]++;
}
}
if (this.run > 0)
{
int value = (bits & 0x100) != 0 ? 0x00 : 0xff;
for (; this.run > 0; --this.run)
{
this.Buffer[pos++] = (byte)value;
}
}
this.Buffer[pos++] = (byte)(bits & 0xff);
this.pos = pos;
}
else
{
this.run++; // Delay writing of bytes 0xff, pending eventual carry.
}
}
/// <inheritdoc/>
public override void WriteEncodedImageToStream(Stream stream, ExifProfile exifProfile, uint width, uint height)
{
bool isVp8X = false;
byte[] exifBytes = null;
uint riffSize = 0;
if (exifProfile != null)
{
isVp8X = true;
riffSize += WebpConstants.ChunkHeaderSize + WebpConstants.Vp8XChunkSize;
exifBytes = exifProfile.ToByteArray();
riffSize += WebpConstants.ChunkHeaderSize + (uint)exifBytes.Length;
}
this.Finish();
uint numBytes = (uint)this.NumBytes();
int mbSize = this.enc.Mbw * this.enc.Mbh;
int expectedSize = mbSize * 7 / 8;
var bitWriterPartZero = new Vp8BitWriter(expectedSize);
// Partition #0 with header and partition sizes
uint size0 = this.GeneratePartition0(bitWriterPartZero);
uint vp8Size = WebpConstants.Vp8FrameHeaderSize + size0;
vp8Size += numBytes;
uint pad = vp8Size & 1;
vp8Size += pad;
// Compute RIFF size
// At the minimum it is: "WEBPVP8 nnnn" + VP8 data size.
riffSize += WebpConstants.TagSize + WebpConstants.ChunkHeaderSize + vp8Size;
// Emit headers and partition #0
this.WriteWebpHeaders(stream, size0, vp8Size, riffSize, isVp8X, width, height, exifProfile);
bitWriterPartZero.WriteToStream(stream);
// Write the encoded image to the stream.
this.WriteToStream(stream);
if (pad == 1)
{
stream.WriteByte(0);
}
if (exifProfile != null)
{
this.WriteExifProfile(stream, exifBytes);
}
}
private uint GeneratePartition0(Vp8BitWriter bitWriter)
{
bitWriter.PutBitUniform(0); // colorspace
bitWriter.PutBitUniform(0); // clamp type
this.WriteSegmentHeader(bitWriter);
this.WriteFilterHeader(bitWriter);
bitWriter.PutBits(0, 2);
this.WriteQuant(bitWriter);
bitWriter.PutBitUniform(0);
this.WriteProbas(bitWriter);
this.CodeIntraModes(bitWriter);
bitWriter.Finish();
return (uint)bitWriter.NumBytes();
}
private void WriteSegmentHeader(Vp8BitWriter bitWriter)
{
Vp8EncSegmentHeader hdr = this.enc.SegmentHeader;
Vp8EncProba proba = this.enc.Proba;
if (bitWriter.PutBitUniform(hdr.NumSegments > 1 ? 1 : 0) != 0)
{
// We always 'update' the quant and filter strength values.
int updateData = 1;
bitWriter.PutBitUniform(hdr.UpdateMap ? 1 : 0);
if (bitWriter.PutBitUniform(updateData) != 0)
{
// We always use absolute values, not relative ones.
bitWriter.PutBitUniform(1); // (segment_feature_mode = 1. Paragraph 9.3.)
for (int s = 0; s < WebpConstants.NumMbSegments; ++s)
{
bitWriter.PutSignedBits(this.enc.SegmentInfos[s].Quant, 7);
}
for (int s = 0; s < WebpConstants.NumMbSegments; ++s)
{
bitWriter.PutSignedBits(this.enc.SegmentInfos[s].FStrength, 6);
}
}
if (hdr.UpdateMap)
{
for (int s = 0; s < 3; ++s)
{
if (bitWriter.PutBitUniform(proba.Segments[s] != 255 ? 1 : 0) != 0)
{
bitWriter.PutBits(proba.Segments[s], 8);
}
}
}
}
}
private void WriteFilterHeader(Vp8BitWriter bitWriter)
{
Vp8FilterHeader hdr = this.enc.FilterHeader;
bool useLfDelta = hdr.I4x4LfDelta != 0;
bitWriter.PutBitUniform(hdr.Simple ? 1 : 0);
bitWriter.PutBits((uint)hdr.FilterLevel, 6);
bitWriter.PutBits((uint)hdr.Sharpness, 3);
if (bitWriter.PutBitUniform(useLfDelta ? 1 : 0) != 0)
{
// '0' is the default value for i4x4LfDelta at frame #0.
bool needUpdate = hdr.I4x4LfDelta != 0;
if (bitWriter.PutBitUniform(needUpdate ? 1 : 0) != 0)
{
// we don't use refLfDelta => emit four 0 bits.
bitWriter.PutBits(0, 4);
// we use modeLfDelta for i4x4
bitWriter.PutSignedBits(hdr.I4x4LfDelta, 6);
bitWriter.PutBits(0, 3); // all others unused.
}
}
}
// Nominal quantization parameters
private void WriteQuant(Vp8BitWriter bitWriter)
{
bitWriter.PutBits((uint)this.enc.BaseQuant, 7);
bitWriter.PutSignedBits(this.enc.DqY1Dc, 4);
bitWriter.PutSignedBits(this.enc.DqY2Dc, 4);
bitWriter.PutSignedBits(this.enc.DqY2Ac, 4);
bitWriter.PutSignedBits(this.enc.DqUvDc, 4);
bitWriter.PutSignedBits(this.enc.DqUvAc, 4);
}
private void WriteProbas(Vp8BitWriter bitWriter)
{
Vp8EncProba probas = this.enc.Proba;
for (int t = 0; t < WebpConstants.NumTypes; ++t)
{
for (int b = 0; b < WebpConstants.NumBands; ++b)
{
for (int c = 0; c < WebpConstants.NumCtx; ++c)
{
for (int p = 0; p < WebpConstants.NumProbas; ++p)
{
byte p0 = probas.Coeffs[t][b].Probabilities[c].Probabilities[p];
bool update = p0 != WebpLookupTables.DefaultCoeffsProba[t, b, c, p];
if (bitWriter.PutBit(update, WebpLookupTables.CoeffsUpdateProba[t, b, c, p]))
{
bitWriter.PutBits(p0, 8);
}
}
}
}
}
if (bitWriter.PutBitUniform(probas.UseSkipProba ? 1 : 0) != 0)
{
bitWriter.PutBits(probas.SkipProba, 8);
}
}
// Writes the partition #0 modes (that is: all intra modes)
private void CodeIntraModes(Vp8BitWriter bitWriter)
{
var it = new Vp8EncIterator(this.enc.YTop, this.enc.UvTop, this.enc.Nz, this.enc.MbInfo, this.enc.Preds, this.enc.TopDerr, this.enc.Mbw, this.enc.Mbh);
int predsWidth = this.enc.PredsWidth;
do
{
Vp8MacroBlockInfo mb = it.CurrentMacroBlockInfo;
int predIdx = it.PredIdx;
Span<byte> preds = it.Preds.AsSpan(predIdx);
if (this.enc.SegmentHeader.UpdateMap)
{
bitWriter.PutSegment(mb.Segment, this.enc.Proba.Segments);
}
if (this.enc.Proba.UseSkipProba)
{
bitWriter.PutBit(mb.Skip, this.enc.Proba.SkipProba);
}
if (bitWriter.PutBit(mb.MacroBlockType != 0, 145))
{
// i16x16
bitWriter.PutI16Mode(preds[0]);
}
else
{
Span<byte> topPred = it.Preds.AsSpan(predIdx - predsWidth);
for (int y = 0; y < 4; y++)
{
int left = it.Preds[predIdx - 1];
for (int x = 0; x < 4; x++)
{
byte[] probas = WebpLookupTables.ModesProba[topPred[x], left];
left = bitWriter.PutI4Mode(it.Preds[predIdx + x], probas);
}
topPred = it.Preds.AsSpan(predIdx);
predIdx += predsWidth;
}
}
bitWriter.PutUvMode(mb.UvMode);
}
while (it.Next());
}
private void WriteWebpHeaders(Stream stream, uint size0, uint vp8Size, uint riffSize, bool isVp8X, uint width, uint height, ExifProfile exifProfile)
{
this.WriteRiffHeader(stream, riffSize);
// Write VP8X, header if necessary.
if (isVp8X)
{
this.WriteVp8XHeader(stream, exifProfile, width, height);
}
this.WriteVp8Header(stream, vp8Size);
this.WriteFrameHeader(stream, size0);
}
private void WriteVp8Header(Stream stream, uint size)
{
Span<byte> vp8ChunkHeader = stackalloc byte[WebpConstants.ChunkHeaderSize];
WebpConstants.Vp8MagicBytes.AsSpan().CopyTo(vp8ChunkHeader);
BinaryPrimitives.WriteUInt32LittleEndian(vp8ChunkHeader.Slice(4), size);
stream.Write(vp8ChunkHeader);
}
private void WriteFrameHeader(Stream stream, uint size0)
{
uint profile = 0;
int width = this.enc.Width;
int height = this.enc.Height;
byte[] vp8FrameHeader = new byte[WebpConstants.Vp8FrameHeaderSize];
// Paragraph 9.1.
uint bits = 0 // keyframe (1b)
| (profile << 1) // profile (3b)
| (1 << 4) // visible (1b)
| (size0 << 5); // partition length (19b)
vp8FrameHeader[0] = (byte)((bits >> 0) & 0xff);
vp8FrameHeader[1] = (byte)((bits >> 8) & 0xff);
vp8FrameHeader[2] = (byte)((bits >> 16) & 0xff);
// signature
vp8FrameHeader[3] = WebpConstants.Vp8HeaderMagicBytes[0];
vp8FrameHeader[4] = WebpConstants.Vp8HeaderMagicBytes[1];
vp8FrameHeader[5] = WebpConstants.Vp8HeaderMagicBytes[2];
// dimensions
vp8FrameHeader[6] = (byte)(width & 0xff);
vp8FrameHeader[7] = (byte)(width >> 8);
vp8FrameHeader[8] = (byte)(height & 0xff);
vp8FrameHeader[9] = (byte)(height >> 8);
stream.Write(vp8FrameHeader);
}
}
}

212
src/ImageSharp/Formats/Webp/BitWriter/Vp8LBitWriter.cs

@ -0,0 +1,212 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Buffers.Binary;
using System.IO;
using SixLabors.ImageSharp.Formats.Webp.Lossless;
using SixLabors.ImageSharp.Metadata.Profiles.Exif;
namespace SixLabors.ImageSharp.Formats.Webp.BitWriter
{
/// <summary>
/// A bit writer for writing lossless webp streams.
/// </summary>
internal class Vp8LBitWriter : BitWriterBase
{
/// <summary>
/// A scratch buffer to reduce allocations.
/// </summary>
private readonly byte[] scratchBuffer = new byte[8];
/// <summary>
/// This is the minimum amount of size the memory buffer is guaranteed to grow when extra space is needed.
/// </summary>
private const int MinExtraSize = 32768;
private const int WriterBytes = 4;
private const int WriterBits = 32;
/// <summary>
/// Bit accumulator.
/// </summary>
private ulong bits;
/// <summary>
/// Number of bits used in accumulator.
/// </summary>
private int used;
/// <summary>
/// Current write position.
/// </summary>
private int cur;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8LBitWriter"/> class.
/// </summary>
/// <param name="expectedSize">The expected size in bytes.</param>
public Vp8LBitWriter(int expectedSize)
: base(expectedSize)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="Vp8LBitWriter"/> class.
/// Used internally for cloning.
/// </summary>
private Vp8LBitWriter(byte[] buffer, ulong bits, int used, int cur)
: base(buffer)
{
this.bits = bits;
this.used = used;
this.cur = cur;
}
/// <summary>
/// This function writes bits into bytes in increasing addresses (little endian),
/// and within a byte least-significant-bit first. This function can write up to 32 bits in one go.
/// </summary>
public void PutBits(uint bits, int nBits)
{
if (nBits > 0)
{
if (this.used >= 32)
{
this.PutBitsFlushBits();
}
this.bits |= (ulong)bits << this.used;
this.used += nBits;
}
}
public void Reset(Vp8LBitWriter bwInit)
{
this.bits = bwInit.bits;
this.used = bwInit.used;
this.cur = bwInit.cur;
}
public void WriteHuffmanCode(HuffmanTreeCode code, int codeIndex)
{
int depth = code.CodeLengths[codeIndex];
int symbol = code.Codes[codeIndex];
this.PutBits((uint)symbol, depth);
}
public void WriteHuffmanCodeWithExtraBits(HuffmanTreeCode code, int codeIndex, int bits, int nBits)
{
int depth = code.CodeLengths[codeIndex];
int symbol = code.Codes[codeIndex];
this.PutBits((uint)((bits << depth) | symbol), depth + nBits);
}
/// <inheritdoc/>
public override int NumBytes() => this.cur + ((this.used + 7) >> 3);
public Vp8LBitWriter Clone()
{
byte[] clonedBuffer = new byte[this.Buffer.Length];
System.Buffer.BlockCopy(this.Buffer, 0, clonedBuffer, 0, this.cur);
return new Vp8LBitWriter(clonedBuffer, this.bits, this.used, this.cur);
}
/// <inheritdoc/>
public override void Finish()
{
this.BitWriterResize((this.used + 7) >> 3);
while (this.used > 0)
{
this.Buffer[this.cur++] = (byte)this.bits;
this.bits >>= 8;
this.used -= 8;
}
this.used = 0;
}
/// <inheritdoc/>
public override void WriteEncodedImageToStream(Stream stream, ExifProfile exifProfile, uint width, uint height)
{
Span<byte> buffer = stackalloc byte[4];
bool isVp8X = false;
byte[] exifBytes = null;
uint riffSize = 0;
if (exifProfile != null)
{
isVp8X = true;
riffSize += WebpConstants.ChunkHeaderSize + WebpConstants.Vp8XChunkSize;
exifBytes = exifProfile.ToByteArray();
riffSize += WebpConstants.ChunkHeaderSize + (uint)exifBytes.Length;
}
this.Finish();
uint size = (uint)this.NumBytes();
size++; // One byte extra for the VP8L signature.
// Write RIFF header.
uint pad = size & 1;
riffSize += WebpConstants.TagSize + WebpConstants.ChunkHeaderSize + size + pad;
this.WriteRiffHeader(stream, riffSize);
// Write VP8X, header if necessary.
if (isVp8X)
{
this.WriteVp8XHeader(stream, exifProfile, width, height);
}
// Write magic bytes indicating its a lossless webp.
stream.Write(WebpConstants.Vp8LMagicBytes);
// Write Vp8 Header.
BinaryPrimitives.WriteUInt32LittleEndian(buffer, size);
stream.Write(buffer);
stream.WriteByte(WebpConstants.Vp8LHeaderMagicByte);
// Write the encoded bytes of the image to the stream.
this.WriteToStream(stream);
if (pad == 1)
{
stream.WriteByte(0);
}
if (exifProfile != null)
{
this.WriteExifProfile(stream, exifBytes);
}
}
/// <summary>
/// Internal function for PutBits flushing 32 bits from the written state.
/// </summary>
private void PutBitsFlushBits()
{
// If needed, make some room by flushing some bits out.
if (this.cur + WriterBytes > this.Buffer.Length)
{
int extraSize = this.Buffer.Length - this.cur + MinExtraSize;
this.BitWriterResize(extraSize);
}
BinaryPrimitives.WriteUInt64LittleEndian(this.scratchBuffer, this.bits);
this.scratchBuffer.AsSpan(0, 4).CopyTo(this.Buffer.AsSpan(this.cur));
this.cur += WriterBytes;
this.bits >>= WriterBits;
this.used -= WriterBits;
}
/// <summary>
/// Resizes the buffer to write to.
/// </summary>
/// <param name="extraSize">The extra size in bytes needed.</param>
public override void BitWriterResize(int extraSize)
{
int maxBytes = this.Buffer.Length + this.Buffer.Length;
int sizeRequired = this.cur + extraSize;
this.ResizeBuffer(maxBytes, sizeRequired);
}
}
}

25
src/ImageSharp/Formats/Webp/EntropyIx.cs

@ -0,0 +1,25 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp
{
/// <summary>
/// These five modes are evaluated and their respective entropy is computed.
/// </summary>
internal enum EntropyIx
{
Direct = 0,
Spatial = 1,
SubGreen = 2,
SpatialSubGreen = 3,
Palette = 4,
PaletteAndSpatial = 5,
NumEntropyIx = 6
}
}

36
src/ImageSharp/Formats/Webp/HistoIx.cs

@ -0,0 +1,36 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp
{
internal enum HistoIx
{
HistoAlpha = 0,
HistoAlphaPred,
HistoGreen,
HistoGreenPred,
HistoRed,
HistoRedPred,
HistoBlue,
HistoBluePred,
HistoRedSubGreen,
HistoRedPredSubGreen,
HistoBlueSubGreen,
HistoBluePredSubGreen,
HistoPalette,
HistoTotal
}
}

16
src/ImageSharp/Formats/Webp/IWebpDecoderOptions.cs

@ -0,0 +1,16 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp
{
/// <summary>
/// Image decoder options for generating an image out of a webp stream.
/// </summary>
internal interface IWebpDecoderOptions
{
/// <summary>
/// Gets a value indicating whether the metadata should be ignored when the image is being decoded.
/// </summary>
bool IgnoreMetadata { get; }
}
}

77
src/ImageSharp/Formats/Webp/IWebpEncoderOptions.cs

@ -0,0 +1,77 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp
{
/// <summary>
/// Configuration options for use during webp encoding.
/// </summary>
internal interface IWebpEncoderOptions
{
/// <summary>
/// Gets the webp file format used. Either lossless or lossy.
/// </summary>
WebpFileFormatType? FileFormat { get; }
/// <summary>
/// Gets the compression quality. Between 0 and 100.
/// For lossy, 0 gives the smallest size and 100 the largest. For lossless,
/// this parameter is the amount of effort put into the compression: 0 is the fastest but gives larger
/// files compared to the slowest, but best, 100.
/// Defaults to 75.
/// </summary>
int Quality { get; }
/// <summary>
/// Gets the encoding method to use. Its a quality/speed trade-off (0=fast, 6=slower-better).
/// Defaults to 4.
/// </summary>
WebpEncodingMethod Method { get; }
/// <summary>
/// Gets a value indicating whether the alpha plane should be compressed with Webp lossless format.
/// </summary>
bool UseAlphaCompression { get; }
/// <summary>
/// Gets the number of entropy-analysis passes (in [1..10]).
/// </summary>
int EntropyPasses { get; }
/// <summary>
/// Gets the amplitude of the spatial noise shaping. Spatial noise shaping (or sns for short) refers to a general collection of built-in algorithms
/// used to decide which area of the picture should use relatively less bits, and where else to better transfer these bits.
/// The possible range goes from 0 (algorithm is off) to 100 (the maximal effect).
/// Defaults to 50.
/// </summary>
int SpatialNoiseShaping { get; }
/// <summary>
/// Gets the strength of the deblocking filter, between 0 (no filtering) and 100 (maximum filtering).
/// A value of 0 will turn off any filtering. Higher value will increase the strength of the filtering process applied after decoding the picture.
/// The higher the value the smoother the picture will appear.
/// Typical values are usually in the range of 20 to 50.
/// Defaults to 60.
/// </summary>
int FilterStrength { get; }
/// <summary>
/// Gets a value indicating whether to preserve the exact RGB values under transparent area. Otherwise, discard this invisible
/// RGB information for better compression.
/// The default value is Clear.
/// </summary>
WebpTransparentColorMode TransparentColorMode { get; }
/// <summary>
/// Gets a value indicating whether near lossless mode should be used.
/// This option adjusts pixel values to help compressibility, but has minimal impact on the visual quality.
/// </summary>
bool NearLossless { get; }
/// <summary>
/// Gets the quality of near-lossless image preprocessing. The range is 0 (maximum preprocessing) to 100 (no preprocessing, the default).
/// The typical value is around 60. Note that lossy with -q 100 can at times yield better results.
/// </summary>
int NearLosslessQuality { get; }
}
}

854
src/ImageSharp/Formats/Webp/Lossless/BackwardReferenceEncoder.cs

@ -0,0 +1,854 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Collections.Generic;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
internal class BackwardReferenceEncoder
{
/// <summary>
/// Maximum bit length.
/// </summary>
public const int MaxLengthBits = 12;
private const float MaxEntropy = 1e30f;
private const int WindowOffsetsSizeMax = 32;
/// <summary>
/// We want the max value to be attainable and stored in MaxLengthBits bits.
/// </summary>
public const int MaxLength = (1 << MaxLengthBits) - 1;
/// <summary>
/// Minimum number of pixels for which it is cheaper to encode a
/// distance + length instead of each pixel as a literal.
/// </summary>
private const int MinLength = 4;
/// <summary>
/// Evaluates best possible backward references for specified quality. The input cacheBits to 'GetBackwardReferences'
/// sets the maximum cache bits to use (passing 0 implies disabling the local color cache).
/// The optimal cache bits is evaluated and set for the cacheBits parameter.
/// The return value is the pointer to the best of the two backward refs viz, refs[0] or refs[1].
/// </summary>
public static Vp8LBackwardRefs GetBackwardReferences(
int width,
int height,
ReadOnlySpan<uint> bgra,
int quality,
int lz77TypesToTry,
ref int cacheBits,
Vp8LHashChain hashChain,
Vp8LBackwardRefs best,
Vp8LBackwardRefs worst)
{
int lz77TypeBest = 0;
double bitCostBest = -1;
int cacheBitsInitial = cacheBits;
Vp8LHashChain hashChainBox = null;
for (int lz77Type = 1; lz77TypesToTry > 0; lz77TypesToTry &= ~lz77Type, lz77Type <<= 1)
{
int cacheBitsTmp = cacheBitsInitial;
if ((lz77TypesToTry & lz77Type) == 0)
{
continue;
}
switch ((Vp8LLz77Type)lz77Type)
{
case Vp8LLz77Type.Lz77Rle:
BackwardReferencesRle(width, height, bgra, 0, worst);
break;
case Vp8LLz77Type.Lz77Standard:
// Compute LZ77 with no cache (0 bits), as the ideal LZ77 with a color cache is not that different in practice.
BackwardReferencesLz77(width, height, bgra, 0, hashChain, worst);
break;
case Vp8LLz77Type.Lz77Box:
hashChainBox = new Vp8LHashChain(width * height);
BackwardReferencesLz77Box(width, height, bgra, 0, hashChain, hashChainBox, worst);
break;
}
// Next, try with a color cache and update the references.
cacheBitsTmp = CalculateBestCacheSize(bgra, quality, worst, cacheBitsTmp);
if (cacheBitsTmp > 0)
{
BackwardRefsWithLocalCache(bgra, cacheBitsTmp, worst);
}
// Keep the best backward references.
var histo = new Vp8LHistogram(worst, cacheBitsTmp);
double bitCost = histo.EstimateBits();
if (lz77TypeBest == 0 || bitCost < bitCostBest)
{
Vp8LBackwardRefs tmp = worst;
worst = best;
best = tmp;
bitCostBest = bitCost;
cacheBits = cacheBitsTmp;
lz77TypeBest = lz77Type;
}
}
// Improve on simple LZ77 but only for high quality (TraceBackwards is costly).
if ((lz77TypeBest == (int)Vp8LLz77Type.Lz77Standard || lz77TypeBest == (int)Vp8LLz77Type.Lz77Box) && quality >= 25)
{
Vp8LHashChain hashChainTmp = lz77TypeBest == (int)Vp8LLz77Type.Lz77Standard ? hashChain : hashChainBox;
BackwardReferencesTraceBackwards(width, height, bgra, cacheBits, hashChainTmp, best, worst);
var histo = new Vp8LHistogram(worst, cacheBits);
double bitCostTrace = histo.EstimateBits();
if (bitCostTrace < bitCostBest)
{
best = worst;
}
}
BackwardReferences2DLocality(width, best);
return best;
}
/// <summary>
/// Evaluate optimal cache bits for the local color cache.
/// The input bestCacheBits sets the maximum cache bits to use (passing 0 implies disabling the local color cache).
/// The local color cache is also disabled for the lower (smaller then 25) quality.
/// </summary>
/// <returns>Best cache size.</returns>
private static int CalculateBestCacheSize(ReadOnlySpan<uint> bgra, int quality, Vp8LBackwardRefs refs, int bestCacheBits)
{
int cacheBitsMax = quality <= 25 ? 0 : bestCacheBits;
if (cacheBitsMax == 0)
{
// Local color cache is disabled.
return 0;
}
double entropyMin = MaxEntropy;
int pos = 0;
var colorCache = new ColorCache[WebpConstants.MaxColorCacheBits + 1];
var histos = new Vp8LHistogram[WebpConstants.MaxColorCacheBits + 1];
for (int i = 0; i <= WebpConstants.MaxColorCacheBits; i++)
{
histos[i] = new Vp8LHistogram(paletteCodeBits: i);
colorCache[i] = new ColorCache();
colorCache[i].Init(i);
}
// Find the cacheBits giving the lowest entropy.
for (int idx = 0; idx < refs.Refs.Count; idx++)
{
PixOrCopy v = refs.Refs[idx];
if (v.IsLiteral())
{
uint pix = bgra[pos++];
uint a = (pix >> 24) & 0xff;
uint r = (pix >> 16) & 0xff;
uint g = (pix >> 8) & 0xff;
uint b = (pix >> 0) & 0xff;
// The keys of the caches can be derived from the longest one.
int key = ColorCache.HashPix(pix, 32 - cacheBitsMax);
// Do not use the color cache for cacheBits = 0.
++histos[0].Blue[b];
++histos[0].Literal[g];
++histos[0].Red[r];
++histos[0].Alpha[a];
// Deal with cacheBits > 0.
for (int i = cacheBitsMax; i >= 1; --i, key >>= 1)
{
if (colorCache[i].Lookup(key) == pix)
{
++histos[i].Literal[WebpConstants.NumLiteralCodes + WebpConstants.NumLengthCodes + key];
}
else
{
colorCache[i].Set((uint)key, pix);
++histos[i].Blue[b];
++histos[i].Literal[g];
++histos[i].Red[r];
++histos[i].Alpha[a];
}
}
}
else
{
// We should compute the contribution of the (distance, length)
// histograms but those are the same independently from the cache size.
// As those constant contributions are in the end added to the other
// histogram contributions, we can ignore them, except for the length
// prefix that is part of the literal_ histogram.
int len = v.Len;
uint bgraPrev = bgra[pos] ^ 0xffffffffu;
int extraBits = 0, extraBitsValue = 0;
int code = LosslessUtils.PrefixEncode(len, ref extraBits, ref extraBitsValue);
for (int i = 0; i <= cacheBitsMax; i++)
{
++histos[i].Literal[WebpConstants.NumLiteralCodes + code];
}
// Update the color caches.
do
{
if (bgra[pos] != bgraPrev)
{
// Efficiency: insert only if the color changes.
int key = ColorCache.HashPix(bgra[pos], 32 - cacheBitsMax);
for (int i = cacheBitsMax; i >= 1; --i, key >>= 1)
{
colorCache[i].Colors[key] = bgra[pos];
}
bgraPrev = bgra[pos];
}
pos++;
}
while (--len != 0);
}
}
for (int i = 0; i <= cacheBitsMax; i++)
{
double entropy = histos[i].EstimateBits();
if (i == 0 || entropy < entropyMin)
{
entropyMin = entropy;
bestCacheBits = i;
}
}
return bestCacheBits;
}
private static void BackwardReferencesTraceBackwards(
int xSize,
int ySize,
ReadOnlySpan<uint> bgra,
int cacheBits,
Vp8LHashChain hashChain,
Vp8LBackwardRefs refsSrc,
Vp8LBackwardRefs refsDst)
{
int distArraySize = xSize * ySize;
ushort[] distArray = new ushort[distArraySize];
BackwardReferencesHashChainDistanceOnly(xSize, ySize, bgra, cacheBits, hashChain, refsSrc, distArray);
int chosenPathSize = TraceBackwards(distArray, distArraySize);
Span<ushort> chosenPath = distArray.AsSpan(distArraySize - chosenPathSize);
BackwardReferencesHashChainFollowChosenPath(bgra, cacheBits, chosenPath, chosenPathSize, hashChain, refsDst);
}
private static void BackwardReferencesHashChainDistanceOnly(
int xSize,
int ySize,
ReadOnlySpan<uint> bgra,
int cacheBits,
Vp8LHashChain hashChain,
Vp8LBackwardRefs refs,
ushort[] distArray)
{
int pixCount = xSize * ySize;
bool useColorCache = cacheBits > 0;
int literalArraySize = WebpConstants.NumLiteralCodes + WebpConstants.NumLengthCodes + (cacheBits > 0 ? 1 << cacheBits : 0);
var costModel = new CostModel(literalArraySize);
int offsetPrev = -1;
int lenPrev = -1;
double offsetCost = -1;
int firstOffsetIsConstant = -1; // initialized with 'impossible' value.
int reach = 0;
var colorCache = new ColorCache();
if (useColorCache)
{
colorCache.Init(cacheBits);
}
costModel.Build(xSize, cacheBits, refs);
var costManager = new CostManager(distArray, pixCount, costModel);
// We loop one pixel at a time, but store all currently best points to non-processed locations from this point.
distArray[0] = 0;
// Add first pixel as literal.
AddSingleLiteralWithCostModel(bgra, colorCache, costModel, 0, useColorCache, 0.0f, costManager.Costs, distArray);
for (int i = 1; i < pixCount; i++)
{
float prevCost = costManager.Costs[i - 1];
int offset = hashChain.FindOffset(i);
int len = hashChain.FindLength(i);
// Try adding the pixel as a literal.
AddSingleLiteralWithCostModel(bgra, colorCache, costModel, i, useColorCache, prevCost, costManager.Costs, distArray);
// If we are dealing with a non-literal.
if (len >= 2)
{
if (offset != offsetPrev)
{
int code = DistanceToPlaneCode(xSize, offset);
offsetCost = costModel.GetDistanceCost(code);
firstOffsetIsConstant = 1;
costManager.PushInterval(prevCost + offsetCost, i, len);
}
else
{
// Instead of considering all contributions from a pixel i by calling:
// costManager.PushInterval(prevCost + offsetCost, i, len);
// we optimize these contributions in case offsetCost stays the same
// for consecutive pixels. This describes a set of pixels similar to a
// previous set (e.g. constant color regions).
if (firstOffsetIsConstant != 0)
{
reach = i - 1 + lenPrev - 1;
firstOffsetIsConstant = 0;
}
if (i + len - 1 > reach)
{
int lenJ = 0;
int j;
for (j = i; j <= reach; j++)
{
int offsetJ = hashChain.FindOffset(j + 1);
lenJ = hashChain.FindLength(j + 1);
if (offsetJ != offset)
{
lenJ = hashChain.FindLength(j);
break;
}
}
// Update the cost at j - 1 and j.
costManager.UpdateCostAtIndex(j - 1, false);
costManager.UpdateCostAtIndex(j, false);
costManager.PushInterval(costManager.Costs[j - 1] + offsetCost, j, lenJ);
reach = j + lenJ - 1;
}
}
}
costManager.UpdateCostAtIndex(i, true);
offsetPrev = offset;
lenPrev = len;
}
}
private static int TraceBackwards(ushort[] distArray, int distArraySize)
{
int chosenPathSize = 0;
int pathPos = distArraySize;
int curPos = distArraySize - 1;
while (curPos >= 0)
{
ushort cur = distArray[curPos];
pathPos--;
chosenPathSize++;
distArray[pathPos] = cur;
curPos -= cur;
}
return chosenPathSize;
}
private static void BackwardReferencesHashChainFollowChosenPath(ReadOnlySpan<uint> bgra, int cacheBits, Span<ushort> chosenPath, int chosenPathSize, Vp8LHashChain hashChain, Vp8LBackwardRefs backwardRefs)
{
bool useColorCache = cacheBits > 0;
var colorCache = new ColorCache();
int i = 0;
if (useColorCache)
{
colorCache.Init(cacheBits);
}
backwardRefs.Refs.Clear();
for (int ix = 0; ix < chosenPathSize; ix++)
{
int len = chosenPath[ix];
if (len != 1)
{
int offset = hashChain.FindOffset(i);
backwardRefs.Add(PixOrCopy.CreateCopy((uint)offset, (ushort)len));
if (useColorCache)
{
for (int k = 0; k < len; k++)
{
colorCache.Insert(bgra[i + k]);
}
}
i += len;
}
else
{
PixOrCopy v;
int idx = useColorCache ? colorCache.Contains(bgra[i]) : -1;
if (idx >= 0)
{
// useColorCache is true and color cache contains bgra[i]
// Push pixel as a color cache index.
v = PixOrCopy.CreateCacheIdx(idx);
}
else
{
if (useColorCache)
{
colorCache.Insert(bgra[i]);
}
v = PixOrCopy.CreateLiteral(bgra[i]);
}
backwardRefs.Add(v);
i++;
}
}
}
private static void AddSingleLiteralWithCostModel(
ReadOnlySpan<uint> bgra,
ColorCache colorCache,
CostModel costModel,
int idx,
bool useColorCache,
float prevCost,
float[] cost,
ushort[] distArray)
{
double costVal = prevCost;
uint color = bgra[idx];
int ix = useColorCache ? colorCache.Contains(color) : -1;
if (ix >= 0)
{
double mul0 = 0.68;
costVal += costModel.GetCacheCost((uint)ix) * mul0;
}
else
{
double mul1 = 0.82;
if (useColorCache)
{
colorCache.Insert(color);
}
costVal += costModel.GetLiteralCost(color) * mul1;
}
if (cost[idx] > costVal)
{
cost[idx] = (float)costVal;
distArray[idx] = 1; // only one is inserted.
}
}
private static void BackwardReferencesLz77(int xSize, int ySize, ReadOnlySpan<uint> bgra, int cacheBits, Vp8LHashChain hashChain, Vp8LBackwardRefs refs)
{
int iLastCheck = -1;
bool useColorCache = cacheBits > 0;
int pixCount = xSize * ySize;
var colorCache = new ColorCache();
if (useColorCache)
{
colorCache.Init(cacheBits);
}
refs.Refs.Clear();
for (int i = 0; i < pixCount;)
{
// Alternative #1: Code the pixels starting at 'i' using backward reference.
int j;
int offset = hashChain.FindOffset(i);
int len = hashChain.FindLength(i);
if (len >= MinLength)
{
int lenIni = len;
int maxReach = 0;
int jMax = i + lenIni >= pixCount ? pixCount - 1 : i + lenIni;
// Only start from what we have not checked already.
iLastCheck = i > iLastCheck ? i : iLastCheck;
// We know the best match for the current pixel but we try to find the
// best matches for the current pixel AND the next one combined.
// The naive method would use the intervals:
// [i,i+len) + [i+len, length of best match at i+len)
// while we check if we can use:
// [i,j) (where j<=i+len) + [j, length of best match at j)
for (j = iLastCheck + 1; j <= jMax; j++)
{
int lenJ = hashChain.FindLength(j);
int reach = j + (lenJ >= MinLength ? lenJ : 1); // 1 for single literal.
if (reach > maxReach)
{
len = j - i;
maxReach = reach;
if (maxReach >= pixCount)
{
break;
}
}
}
}
else
{
len = 1;
}
// Go with literal or backward reference.
if (len == 1)
{
AddSingleLiteral(bgra[i], useColorCache, colorCache, refs);
}
else
{
refs.Add(PixOrCopy.CreateCopy((uint)offset, (ushort)len));
if (useColorCache)
{
for (j = i; j < i + len; j++)
{
colorCache.Insert(bgra[j]);
}
}
}
i += len;
}
}
/// <summary>
/// Compute an LZ77 by forcing matches to happen within a given distance cost.
/// We therefore limit the algorithm to the lowest 32 values in the PlaneCode definition.
/// </summary>
private static void BackwardReferencesLz77Box(int xSize, int ySize, ReadOnlySpan<uint> bgra, int cacheBits, Vp8LHashChain hashChainBest, Vp8LHashChain hashChain, Vp8LBackwardRefs refs)
{
int pixelCount = xSize * ySize;
int[] windowOffsets = new int[WindowOffsetsSizeMax];
int[] windowOffsetsNew = new int[WindowOffsetsSizeMax];
int windowOffsetsSize = 0;
int windowOffsetsNewSize = 0;
short[] counts = new short[xSize * ySize];
int bestOffsetPrev = -1;
int bestLengthPrev = -1;
// counts[i] counts how many times a pixel is repeated starting at position i.
int i = pixelCount - 2;
int countsPos = i;
counts[countsPos + 1] = 1;
for (; i >= 0; --i, --countsPos)
{
if (bgra[i] == bgra[i + 1])
{
// Max out the counts to MaxLength.
counts[countsPos] = counts[countsPos + 1];
if (counts[countsPos + 1] != MaxLength)
{
counts[countsPos]++;
}
}
else
{
counts[countsPos] = 1;
}
}
// Figure out the window offsets around a pixel. They are stored in a
// spiraling order around the pixel as defined by DistanceToPlaneCode.
for (int y = 0; y <= 6; y++)
{
for (int x = -6; x <= 6; x++)
{
int offset = (y * xSize) + x;
// Ignore offsets that bring us after the pixel.
if (offset <= 0)
{
continue;
}
int planeCode = DistanceToPlaneCode(xSize, offset) - 1;
if (planeCode >= WindowOffsetsSizeMax)
{
continue;
}
windowOffsets[planeCode] = offset;
}
}
// For narrow images, not all plane codes are reached, so remove those.
for (i = 0; i < WindowOffsetsSizeMax; i++)
{
if (windowOffsets[i] == 0)
{
continue;
}
windowOffsets[windowOffsetsSize++] = windowOffsets[i];
}
// Given a pixel P, find the offsets that reach pixels unreachable from P-1
// with any of the offsets in windowOffsets[].
for (i = 0; i < windowOffsetsSize; i++)
{
bool isReachable = false;
for (int j = 0; j < windowOffsetsSize && !isReachable; j++)
{
isReachable |= windowOffsets[i] == windowOffsets[j] + 1;
}
if (!isReachable)
{
windowOffsetsNew[windowOffsetsNewSize] = windowOffsets[i];
++windowOffsetsNewSize;
}
}
hashChain.OffsetLength[0] = 0;
for (i = 1; i < pixelCount; i++)
{
int ind;
int bestLength = hashChainBest.FindLength(i);
int bestOffset = 0;
bool doCompute = true;
if (bestLength >= MaxLength)
{
// Do not recompute the best match if we already have a maximal one in the window.
bestOffset = hashChainBest.FindOffset(i);
for (ind = 0; ind < windowOffsetsSize; ind++)
{
if (bestOffset == windowOffsets[ind])
{
doCompute = false;
break;
}
}
}
if (doCompute)
{
// Figure out if we should use the offset/length from the previous pixel
// as an initial guess and therefore only inspect the offsets in windowOffsetsNew[].
bool usePrev = bestLengthPrev is > 1 and < MaxLength;
int numInd = usePrev ? windowOffsetsNewSize : windowOffsetsSize;
bestLength = usePrev ? bestLengthPrev - 1 : 0;
bestOffset = usePrev ? bestOffsetPrev : 0;
// Find the longest match in a window around the pixel.
for (ind = 0; ind < numInd; ind++)
{
int currLength = 0;
int j = i;
int jOffset = usePrev ? i - windowOffsetsNew[ind] : i - windowOffsets[ind];
if (jOffset < 0 || bgra[jOffset] != bgra[i])
{
continue;
}
// The longest match is the sum of how many times each pixel is repeated.
do
{
int countsJOffset = counts[jOffset];
int countsJ = counts[j];
if (countsJOffset != countsJ)
{
currLength += countsJOffset < countsJ ? countsJOffset : countsJ;
break;
}
// The same color is repeated counts_pos times at jOffset and j.
currLength += countsJOffset;
jOffset += countsJOffset;
j += countsJOffset;
}
while (currLength <= MaxLength && j < pixelCount && bgra[jOffset] == bgra[j]);
if (bestLength < currLength)
{
bestOffset = usePrev ? windowOffsetsNew[ind] : windowOffsets[ind];
if (currLength >= MaxLength)
{
bestLength = MaxLength;
break;
}
else
{
bestLength = currLength;
}
}
}
}
if (bestLength <= MinLength)
{
hashChain.OffsetLength[i] = 0;
bestOffsetPrev = 0;
bestLengthPrev = 0;
}
else
{
hashChain.OffsetLength[i] = (uint)((bestOffset << MaxLengthBits) | bestLength);
bestOffsetPrev = bestOffset;
bestLengthPrev = bestLength;
}
}
hashChain.OffsetLength[0] = 0;
BackwardReferencesLz77(xSize, ySize, bgra, cacheBits, hashChain, refs);
}
private static void BackwardReferencesRle(int xSize, int ySize, ReadOnlySpan<uint> bgra, int cacheBits, Vp8LBackwardRefs refs)
{
int pixelCount = xSize * ySize;
bool useColorCache = cacheBits > 0;
var colorCache = new ColorCache();
if (useColorCache)
{
colorCache.Init(cacheBits);
}
refs.Refs.Clear();
// Add first pixel as literal.
AddSingleLiteral(bgra[0], useColorCache, colorCache, refs);
int i = 1;
while (i < pixelCount)
{
int maxLen = LosslessUtils.MaxFindCopyLength(pixelCount - i);
int rleLen = LosslessUtils.FindMatchLength(bgra.Slice(i), bgra.Slice(i - 1), 0, maxLen);
int prevRowLen = i < xSize ? 0 : LosslessUtils.FindMatchLength(bgra.Slice(i), bgra.Slice(i - xSize), 0, maxLen);
if (rleLen >= prevRowLen && rleLen >= MinLength)
{
refs.Add(PixOrCopy.CreateCopy(1, (ushort)rleLen));
// We don't need to update the color cache here since it is always the
// same pixel being copied, and that does not change the color cache state.
i += rleLen;
}
else if (prevRowLen >= MinLength)
{
refs.Add(PixOrCopy.CreateCopy((uint)xSize, (ushort)prevRowLen));
if (useColorCache)
{
for (int k = 0; k < prevRowLen; ++k)
{
colorCache.Insert(bgra[i + k]);
}
}
i += prevRowLen;
}
else
{
AddSingleLiteral(bgra[i], useColorCache, colorCache, refs);
i++;
}
}
}
/// <summary>
/// Update (in-place) backward references for the specified cacheBits.
/// </summary>
private static void BackwardRefsWithLocalCache(ReadOnlySpan<uint> bgra, int cacheBits, Vp8LBackwardRefs refs)
{
int pixelIndex = 0;
var colorCache = new ColorCache();
colorCache.Init(cacheBits);
for (int idx = 0; idx < refs.Refs.Count; idx++)
{
PixOrCopy v = refs.Refs[idx];
if (v.IsLiteral())
{
uint bgraLiteral = v.BgraOrDistance;
int ix = colorCache.Contains(bgraLiteral);
if (ix >= 0)
{
// Color cache contains bgraLiteral
v.Mode = PixOrCopyMode.CacheIdx;
v.BgraOrDistance = (uint)ix;
v.Len = 1;
}
else
{
colorCache.Insert(bgraLiteral);
}
pixelIndex++;
}
else
{
// refs was created without local cache, so it can not have cache indexes.
for (int k = 0; k < v.Len; ++k)
{
colorCache.Insert(bgra[pixelIndex++]);
}
}
}
}
private static void BackwardReferences2DLocality(int xSize, Vp8LBackwardRefs refs)
{
using List<PixOrCopy>.Enumerator c = refs.Refs.GetEnumerator();
while (c.MoveNext())
{
if (c.Current.IsCopy())
{
int dist = (int)c.Current.BgraOrDistance;
int transformedDist = DistanceToPlaneCode(xSize, dist);
c.Current.BgraOrDistance = (uint)transformedDist;
}
}
}
private static void AddSingleLiteral(uint pixel, bool useColorCache, ColorCache colorCache, Vp8LBackwardRefs refs)
{
PixOrCopy v;
if (useColorCache)
{
int key = colorCache.GetIndex(pixel);
if (colorCache.Lookup(key) == pixel)
{
v = PixOrCopy.CreateCacheIdx(key);
}
else
{
v = PixOrCopy.CreateLiteral(pixel);
colorCache.Set((uint)key, pixel);
}
}
else
{
v = PixOrCopy.CreateLiteral(pixel);
}
refs.Add(v);
}
public static int DistanceToPlaneCode(int xSize, int dist)
{
int yOffset = dist / xSize;
int xOffset = dist - (yOffset * xSize);
if (xOffset <= 8 && yOffset < 8)
{
return (int)WebpLookupTables.PlaneToCodeLut[(yOffset * 16) + 8 - xOffset] + 1;
}
else if (xOffset > xSize - 8 && yOffset < 7)
{
return (int)WebpLookupTables.PlaneToCodeLut[((yOffset + 1) * 16) + 8 + (xSize - xOffset)] + 1;
}
return dist + 120;
}
}
}

84
src/ImageSharp/Formats/Webp/Lossless/ColorCache.cs

@ -0,0 +1,84 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// A small hash-addressed array to store recently used colors, to be able to recall them with shorter codes.
/// </summary>
internal class ColorCache
{
private const uint HashMul = 0x1e35a7bdu;
/// <summary>
/// Gets the color entries.
/// </summary>
public uint[] Colors { get; private set; }
/// <summary>
/// Gets the hash shift: 32 - hashBits.
/// </summary>
public int HashShift { get; private set; }
/// <summary>
/// Gets the hash bits.
/// </summary>
public int HashBits { get; private set; }
/// <summary>
/// Initializes a new color cache.
/// </summary>
/// <param name="hashBits">The hashBits determine the size of cache. It will be 1 left shifted by hashBits.</param>
public void Init(int hashBits)
{
int hashSize = 1 << hashBits;
this.Colors = new uint[hashSize];
this.HashBits = hashBits;
this.HashShift = 32 - hashBits;
}
/// <summary>
/// Inserts a new color into the cache.
/// </summary>
/// <param name="bgra">The color to insert.</param>
public void Insert(uint bgra)
{
int key = HashPix(bgra, this.HashShift);
this.Colors[key] = bgra;
}
/// <summary>
/// Gets a color for a given key.
/// </summary>
/// <param name="key">The key to lookup.</param>
/// <returns>The color for the key.</returns>
public uint Lookup(int key) => this.Colors[key];
/// <summary>
/// Returns the index of the given color.
/// </summary>
/// <param name="bgra">The color to check.</param>
/// <returns>The index of the color in the cache or -1 if its not present.</returns>
public int Contains(uint bgra)
{
int key = HashPix(bgra, this.HashShift);
return (this.Colors[key] == bgra) ? key : -1;
}
/// <summary>
/// Gets the index of a color.
/// </summary>
/// <param name="bgra">The color.</param>
/// <returns>The index for the color.</returns>
public int GetIndex(uint bgra) => HashPix(bgra, this.HashShift);
/// <summary>
/// Adds a new color to the cache.
/// </summary>
/// <param name="key">The key.</param>
/// <param name="bgra">The color to add.</param>
public void Set(uint key, uint bgra) => this.Colors[key] = bgra;
public static int HashPix(uint argb, int shift) => (int)((argb * HashMul) >> shift);
}
}

20
src/ImageSharp/Formats/Webp/Lossless/CostCacheInterval.cs

@ -0,0 +1,20 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Diagnostics;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// The GetLengthCost(costModel, k) are cached in a CostCacheInterval.
/// </summary>
[DebuggerDisplay("Start: {Start}, End: {End}, Cost: {Cost}")]
internal class CostCacheInterval
{
public double Cost { get; set; }
public int Start { get; set; }
public int End { get; set; } // Exclusive.
}
}

39
src/ImageSharp/Formats/Webp/Lossless/CostInterval.cs

@ -0,0 +1,39 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Diagnostics;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// To perform backward reference every pixel at index index_ is considered and
/// the cost for the MAX_LENGTH following pixels computed. Those following pixels
/// at index index_ + k (k from 0 to MAX_LENGTH) have a cost of:
/// cost = distance cost at index + GetLengthCost(costModel, k)
/// and the minimum value is kept. GetLengthCost(costModel, k) is cached in an
/// array of size MAX_LENGTH.
/// Instead of performing MAX_LENGTH comparisons per pixel, we keep track of the
/// minimal values using intervals of constant cost.
/// An interval is defined by the index_ of the pixel that generated it and
/// is only useful in a range of indices from start to end (exclusive), i.e.
/// it contains the minimum value for pixels between start and end.
/// Intervals are stored in a linked list and ordered by start. When a new
/// interval has a better value, old intervals are split or removed. There are
/// therefore no overlapping intervals.
/// </summary>
[DebuggerDisplay("Start: {Start}, End: {End}, Cost: {Cost}")]
internal class CostInterval
{
public float Cost { get; set; }
public int Start { get; set; }
public int End { get; set; }
public int Index { get; set; }
public CostInterval Previous { get; set; }
public CostInterval Next { get; set; }
}
}

308
src/ImageSharp/Formats/Webp/Lossless/CostManager.cs

@ -0,0 +1,308 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Collections.Generic;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// The CostManager is in charge of managing intervals and costs.
/// It caches the different CostCacheInterval, caches the different
/// GetLengthCost(costModel, k) in costCache and the CostInterval's.
/// </summary>
internal class CostManager
{
private CostInterval head;
public CostManager(ushort[] distArray, int pixCount, CostModel costModel)
{
int costCacheSize = pixCount > BackwardReferenceEncoder.MaxLength ? BackwardReferenceEncoder.MaxLength : pixCount;
this.CacheIntervals = new List<CostCacheInterval>();
this.CostCache = new List<double>();
this.Costs = new float[pixCount];
this.DistArray = distArray;
this.Count = 0;
// Fill in the cost cache.
this.CacheIntervalsSize++;
this.CostCache.Add(costModel.GetLengthCost(0));
for (int i = 1; i < costCacheSize; i++)
{
this.CostCache.Add(costModel.GetLengthCost(i));
// Get the number of bound intervals.
if (this.CostCache[i] != this.CostCache[i - 1])
{
this.CacheIntervalsSize++;
}
}
// Fill in the cache intervals.
var cur = new CostCacheInterval()
{
Start = 0,
End = 1,
Cost = this.CostCache[0]
};
this.CacheIntervals.Add(cur);
for (int i = 1; i < costCacheSize; i++)
{
double costVal = this.CostCache[i];
if (costVal != cur.Cost)
{
cur = new CostCacheInterval()
{
Start = i,
Cost = costVal
};
this.CacheIntervals.Add(cur);
}
cur.End = i + 1;
}
// Set the initial costs high for every pixel as we will keep the minimum.
for (int i = 0; i < pixCount; i++)
{
this.Costs[i] = 1e38f;
}
}
/// <summary>
/// Gets or sets the number of stored intervals.
/// </summary>
public int Count { get; set; }
/// <summary>
/// Gets the costs cache. Contains the GetLengthCost(costModel, k).
/// </summary>
public List<double> CostCache { get; }
public int CacheIntervalsSize { get; }
public float[] Costs { get; }
public ushort[] DistArray { get; }
public List<CostCacheInterval> CacheIntervals { get; }
/// <summary>
/// Update the cost at index i by going over all the stored intervals that overlap with i.
/// </summary>
/// <param name="i">The index to update.</param>
/// <param name="doCleanIntervals">If 'doCleanIntervals' is true, intervals that end before 'i' will be popped.</param>
public void UpdateCostAtIndex(int i, bool doCleanIntervals)
{
CostInterval current = this.head;
while (current != null && current.Start <= i)
{
CostInterval next = current.Next;
if (current.End <= i)
{
if (doCleanIntervals)
{
// We have an outdated interval, remove it.
this.PopInterval(current);
}
}
else
{
this.UpdateCost(i, current.Index, current.Cost);
}
current = next;
}
}
/// <summary>
/// Given a new cost interval defined by its start at position, its length value
/// and distanceCost, add its contributions to the previous intervals and costs.
/// If handling the interval or one of its sub-intervals becomes to heavy, its
/// contribution is added to the costs right away.
/// </summary>
public void PushInterval(double distanceCost, int position, int len)
{
// If the interval is small enough, no need to deal with the heavy
// interval logic, just serialize it right away. This constant is empirical.
int skipDistance = 10;
if (len < skipDistance)
{
for (int j = position; j < position + len; j++)
{
int k = j - position;
float costTmp = (float)(distanceCost + this.CostCache[k]);
if (this.Costs[j] > costTmp)
{
this.Costs[j] = costTmp;
this.DistArray[j] = (ushort)(k + 1);
}
}
return;
}
CostInterval interval = this.head;
for (int i = 0; i < this.CacheIntervalsSize && this.CacheIntervals[i].Start < len; i++)
{
// Define the intersection of the ith interval with the new one.
int start = position + this.CacheIntervals[i].Start;
int end = position + (this.CacheIntervals[i].End > len ? len : this.CacheIntervals[i].End);
float cost = (float)(distanceCost + this.CacheIntervals[i].Cost);
CostInterval intervalNext;
for (; interval != null && interval.Start < end; interval = intervalNext)
{
intervalNext = interval.Next;
// Make sure we have some overlap.
if (start >= interval.End)
{
continue;
}
if (cost >= interval.Cost)
{
// If we are worse than what we already have, add whatever we have so far up to interval.
int startNew = interval.End;
this.InsertInterval(interval, cost, position, start, interval.Start);
start = startNew;
if (start >= end)
{
break;
}
continue;
}
if (start <= interval.Start)
{
if (interval.End <= end)
{
// We can safely remove the old interval as it is fully included.
this.PopInterval(interval);
}
else
{
interval.Start = end;
break;
}
}
else
{
if (end < interval.End)
{
// We have to split the old interval as it fully contains the new one.
int endOriginal = interval.End;
interval.End = start;
this.InsertInterval(interval, interval.Cost, interval.Index, end, endOriginal);
break;
}
else
{
interval.End = start;
}
}
}
// Insert the remaining interval from start to end.
this.InsertInterval(interval, cost, position, start, end);
}
}
/// <summary>
/// Pop an interval from the manager.
/// </summary>
/// <param name="interval">The interval to remove.</param>
private void PopInterval(CostInterval interval)
{
if (interval == null)
{
return;
}
this.ConnectIntervals(interval.Previous, interval.Next);
this.Count--;
}
private void InsertInterval(CostInterval intervalIn, float cost, int position, int start, int end)
{
if (start >= end)
{
return;
}
// TODO: should we use COST_CACHE_INTERVAL_SIZE_MAX?
var intervalNew = new CostInterval()
{
Cost = cost,
Start = start,
End = end,
Index = position
};
this.PositionOrphanInterval(intervalNew, intervalIn);
this.Count++;
}
/// <summary>
/// Given a current orphan interval and its previous interval, before
/// it was orphaned (which can be NULL), set it at the right place in the list
/// of intervals using the start_ ordering and the previous interval as a hint.
/// </summary>
private void PositionOrphanInterval(CostInterval current, CostInterval previous)
{
previous ??= this.head;
while (previous != null && current.Start < previous.Start)
{
previous = previous.Previous;
}
while (previous?.Next != null && previous.Next.Start < current.Start)
{
previous = previous.Next;
}
this.ConnectIntervals(current, previous != null ? previous.Next : this.head);
this.ConnectIntervals(previous, current);
}
/// <summary>
/// Given two intervals, make 'prev' be the previous one of 'next' in 'manager'.
/// </summary>
private void ConnectIntervals(CostInterval prev, CostInterval next)
{
if (prev != null)
{
prev.Next = next;
}
else
{
this.head = next;
}
if (next != null)
{
next.Previous = prev;
}
}
/// <summary>
/// Given the cost and the position that define an interval, update the cost at
/// pixel 'i' if it is smaller than the previously computed value.
/// </summary>
private void UpdateCost(int i, int position, float cost)
{
int k = i - position;
if (this.Costs[i] > cost)
{
this.Costs[i] = cost;
this.DistArray[i] = (ushort)(k + 1);
}
}
}
}

102
src/ImageSharp/Formats/Webp/Lossless/CostModel.cs

@ -0,0 +1,102 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
internal class CostModel
{
private const int ValuesInBytes = 256;
/// <summary>
/// Initializes a new instance of the <see cref="CostModel"/> class.
/// </summary>
/// <param name="literalArraySize">The literal array size.</param>
public CostModel(int literalArraySize)
{
this.Alpha = new double[ValuesInBytes];
this.Red = new double[ValuesInBytes];
this.Blue = new double[ValuesInBytes];
this.Distance = new double[WebpConstants.NumDistanceCodes];
this.Literal = new double[literalArraySize];
}
public double[] Alpha { get; }
public double[] Red { get; }
public double[] Blue { get; }
public double[] Distance { get; }
public double[] Literal { get; }
public void Build(int xSize, int cacheBits, Vp8LBackwardRefs backwardRefs)
{
var histogram = new Vp8LHistogram(cacheBits);
using System.Collections.Generic.List<PixOrCopy>.Enumerator refsEnumerator = backwardRefs.Refs.GetEnumerator();
// The following code is similar to HistogramCreate but converts the distance to plane code.
while (refsEnumerator.MoveNext())
{
histogram.AddSinglePixOrCopy(refsEnumerator.Current, true, xSize);
}
ConvertPopulationCountTableToBitEstimates(histogram.NumCodes(), histogram.Literal, this.Literal);
ConvertPopulationCountTableToBitEstimates(ValuesInBytes, histogram.Red, this.Red);
ConvertPopulationCountTableToBitEstimates(ValuesInBytes, histogram.Blue, this.Blue);
ConvertPopulationCountTableToBitEstimates(ValuesInBytes, histogram.Alpha, this.Alpha);
ConvertPopulationCountTableToBitEstimates(WebpConstants.NumDistanceCodes, histogram.Distance, this.Distance);
}
public double GetLengthCost(int length)
{
int extraBits = 0;
int code = LosslessUtils.PrefixEncodeBits(length, ref extraBits);
return this.Literal[ValuesInBytes + code] + extraBits;
}
public double GetDistanceCost(int distance)
{
int extraBits = 0;
int code = LosslessUtils.PrefixEncodeBits(distance, ref extraBits);
return this.Distance[code] + extraBits;
}
public double GetCacheCost(uint idx)
{
int literalIdx = (int)(ValuesInBytes + WebpConstants.NumLengthCodes + idx);
return this.Literal[literalIdx];
}
public double GetLiteralCost(uint v) => this.Alpha[v >> 24] + this.Red[(v >> 16) & 0xff] + this.Literal[(v >> 8) & 0xff] + this.Blue[v & 0xff];
private static void ConvertPopulationCountTableToBitEstimates(int numSymbols, uint[] populationCounts, double[] output)
{
uint sum = 0;
int nonzeros = 0;
for (int i = 0; i < numSymbols; i++)
{
sum += populationCounts[i];
if (populationCounts[i] > 0)
{
nonzeros++;
}
}
if (nonzeros <= 1)
{
output.AsSpan(0, numSymbols).Fill(0);
}
else
{
double logsum = LosslessUtils.FastLog2(sum);
for (int i = 0; i < numSymbols; i++)
{
output[i] = logsum - LosslessUtils.FastLog2(populationCounts[i]);
}
}
}
}
}

14
src/ImageSharp/Formats/Webp/Lossless/CrunchConfig.cs

@ -0,0 +1,14 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Collections.Generic;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
internal class CrunchConfig
{
public EntropyIx EntropyIdx { get; set; }
public List<CrunchSubConfig> SubConfigs { get; } = new List<CrunchSubConfig>();
}
}

12
src/ImageSharp/Formats/Webp/Lossless/CrunchSubConfig.cs

@ -0,0 +1,12 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
internal class CrunchSubConfig
{
public int Lz77 { get; set; }
public bool DoNotCache { get; set; }
}
}

92
src/ImageSharp/Formats/Webp/Lossless/DominantCostRange.cs

@ -0,0 +1,92 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// Data container to keep track of cost range for the three dominant entropy symbols.
/// </summary>
internal class DominantCostRange
{
/// <summary>
/// Initializes a new instance of the <see cref="DominantCostRange"/> class.
/// </summary>
public DominantCostRange()
{
this.LiteralMax = 0.0d;
this.LiteralMin = double.MaxValue;
this.RedMax = 0.0d;
this.RedMin = double.MaxValue;
this.BlueMax = 0.0d;
this.BlueMin = double.MaxValue;
}
public double LiteralMax { get; set; }
public double LiteralMin { get; set; }
public double RedMax { get; set; }
public double RedMin { get; set; }
public double BlueMax { get; set; }
public double BlueMin { get; set; }
public void UpdateDominantCostRange(Vp8LHistogram h)
{
if (this.LiteralMax < h.LiteralCost)
{
this.LiteralMax = h.LiteralCost;
}
if (this.LiteralMin > h.LiteralCost)
{
this.LiteralMin = h.LiteralCost;
}
if (this.RedMax < h.RedCost)
{
this.RedMax = h.RedCost;
}
if (this.RedMin > h.RedCost)
{
this.RedMin = h.RedCost;
}
if (this.BlueMax < h.BlueCost)
{
this.BlueMax = h.BlueCost;
}
if (this.BlueMin > h.BlueCost)
{
this.BlueMin = h.BlueCost;
}
}
public int GetHistoBinIndex(Vp8LHistogram h, int numPartitions)
{
int binId = GetBinIdForEntropy(this.LiteralMin, this.LiteralMax, h.LiteralCost, numPartitions);
binId = (binId * numPartitions) + GetBinIdForEntropy(this.RedMin, this.RedMax, h.RedCost, numPartitions);
binId = (binId * numPartitions) + GetBinIdForEntropy(this.BlueMin, this.BlueMax, h.BlueCost, numPartitions);
return binId;
}
private static int GetBinIdForEntropy(double min, double max, double val, int numPartitions)
{
double range = max - min;
if (range > 0.0d)
{
double delta = val - min;
return (int)((numPartitions - 1e-6) * delta / range);
}
else
{
return 0;
}
}
}
}

59
src/ImageSharp/Formats/Webp/Lossless/HTreeGroup.cs

@ -0,0 +1,59 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Collections.Generic;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// Huffman table group.
/// Includes special handling for the following cases:
/// - IsTrivialLiteral: one common literal base for RED/BLUE/ALPHA (not GREEN)
/// - IsTrivialCode: only 1 code (no bit is read from the bitstream)
/// - UsePackedTable: few enough literal symbols, so all the bit codes can fit into a small look-up table PackedTable[]
/// The common literal base, if applicable, is stored in 'LiteralArb'.
/// </summary>
internal class HTreeGroup
{
public HTreeGroup(uint packedTableSize)
{
this.HTrees = new List<HuffmanCode[]>(WebpConstants.HuffmanCodesPerMetaCode);
this.PackedTable = new HuffmanCode[packedTableSize];
for (int i = 0; i < packedTableSize; i++)
{
this.PackedTable[i] = new HuffmanCode();
}
}
/// <summary>
/// Gets the Huffman trees. This has a maximum of <see cref="WebpConstants.HuffmanCodesPerMetaCode" /> (5) entry's.
/// </summary>
public List<HuffmanCode[]> HTrees { get; }
/// <summary>
/// Gets or sets a value indicating whether huffman trees for Red, Blue and Alpha Symbols are trivial (have a single code).
/// </summary>
public bool IsTrivialLiteral { get; set; }
/// <summary>
/// Gets or sets a the literal argb value of the pixel.
/// If IsTrivialLiteral is true, this is the ARGB value of the pixel, with Green channel being set to zero.
/// </summary>
public uint LiteralArb { get; set; }
/// <summary>
/// Gets or sets a value indicating whether there is only one code.
/// </summary>
public bool IsTrivialCode { get; set; }
/// <summary>
/// Gets or sets a value indicating whether to use packed table below for short literal code.
/// </summary>
public bool UsePackedTable { get; set; }
/// <summary>
/// Gets or sets table mapping input bits to packed values, or escape case to literal code.
/// </summary>
public HuffmanCode[] PackedTable { get; set; }
}
}

18
src/ImageSharp/Formats/Webp/Lossless/HistogramBinInfo.cs

@ -0,0 +1,18 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
internal struct HistogramBinInfo
{
/// <summary>
/// Position of the histogram that accumulates all histograms with the same binId.
/// </summary>
public short First;
/// <summary>
/// Number of combine failures per binId.
/// </summary>
public ushort NumCombineFailures;
}
}

685
src/ImageSharp/Formats/Webp/Lossless/HistogramEncoder.cs

@ -0,0 +1,685 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.CompilerServices;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
internal class HistogramEncoder
{
/// <summary>
/// Number of partitions for the three dominant (literal, red and blue) symbol costs.
/// </summary>
private const int NumPartitions = 4;
/// <summary>
/// The size of the bin-hash corresponding to the three dominant costs.
/// </summary>
private const int BinSize = NumPartitions * NumPartitions * NumPartitions;
/// <summary>
/// Maximum number of histograms allowed in greedy combining algorithm.
/// </summary>
private const int MaxHistoGreedy = 100;
private const uint NonTrivialSym = 0xffffffff;
private const ushort InvalidHistogramSymbol = ushort.MaxValue;
public static void GetHistoImageSymbols(int xSize, int ySize, Vp8LBackwardRefs refs, int quality, int histoBits, int cacheBits, List<Vp8LHistogram> imageHisto, Vp8LHistogram tmpHisto, ushort[] histogramSymbols)
{
int histoXSize = histoBits > 0 ? LosslessUtils.SubSampleSize(xSize, histoBits) : 1;
int histoYSize = histoBits > 0 ? LosslessUtils.SubSampleSize(ySize, histoBits) : 1;
int imageHistoRawSize = histoXSize * histoYSize;
int entropyCombineNumBins = BinSize;
ushort[] mapTmp = new ushort[imageHistoRawSize];
ushort[] clusterMappings = new ushort[imageHistoRawSize];
var origHisto = new List<Vp8LHistogram>(imageHistoRawSize);
for (int i = 0; i < imageHistoRawSize; i++)
{
origHisto.Add(new Vp8LHistogram(cacheBits));
}
// Construct the histograms from the backward references.
HistogramBuild(xSize, histoBits, refs, origHisto);
// Copies the histograms and computes its bitCost. histogramSymbols is optimized.
int numUsed = HistogramCopyAndAnalyze(origHisto, imageHisto, histogramSymbols);
bool entropyCombine = numUsed > entropyCombineNumBins * 2 && quality < 100;
if (entropyCombine)
{
ushort[] binMap = mapTmp;
int numClusters = numUsed;
double combineCostFactor = GetCombineCostFactor(imageHistoRawSize, quality);
HistogramAnalyzeEntropyBin(imageHisto, binMap);
// Collapse histograms with similar entropy.
HistogramCombineEntropyBin(imageHisto, histogramSymbols, clusterMappings, tmpHisto, binMap, entropyCombineNumBins, combineCostFactor);
OptimizeHistogramSymbols(clusterMappings, numClusters, mapTmp, histogramSymbols);
}
float x = quality / 100.0f;
// Cubic ramp between 1 and MaxHistoGreedy:
int thresholdSize = (int)(1 + (x * x * x * (MaxHistoGreedy - 1)));
bool doGreedy = HistogramCombineStochastic(imageHisto, thresholdSize);
if (doGreedy)
{
RemoveEmptyHistograms(imageHisto);
HistogramCombineGreedy(imageHisto);
}
// Find the optimal map from original histograms to the final ones.
RemoveEmptyHistograms(imageHisto);
HistogramRemap(origHisto, imageHisto, histogramSymbols);
}
private static void RemoveEmptyHistograms(List<Vp8LHistogram> histograms)
{
int size = 0;
for (int i = 0; i < histograms.Count; i++)
{
if (histograms[i] == null)
{
continue;
}
histograms[size++] = histograms[i];
}
histograms.RemoveRange(size, histograms.Count - size);
}
/// <summary>
/// Construct the histograms from the backward references.
/// </summary>
private static void HistogramBuild(int xSize, int histoBits, Vp8LBackwardRefs backwardRefs, List<Vp8LHistogram> histograms)
{
int x = 0, y = 0;
int histoXSize = LosslessUtils.SubSampleSize(xSize, histoBits);
using List<PixOrCopy>.Enumerator backwardRefsEnumerator = backwardRefs.Refs.GetEnumerator();
while (backwardRefsEnumerator.MoveNext())
{
PixOrCopy v = backwardRefsEnumerator.Current;
int ix = ((y >> histoBits) * histoXSize) + (x >> histoBits);
histograms[ix].AddSinglePixOrCopy(v, false);
x += v.Len;
while (x >= xSize)
{
x -= xSize;
y++;
}
}
}
/// <summary>
/// Partition histograms to different entropy bins for three dominant (literal,
/// red and blue) symbol costs and compute the histogram aggregate bitCost.
/// </summary>
private static void HistogramAnalyzeEntropyBin(List<Vp8LHistogram> histograms, ushort[] binMap)
{
int histoSize = histograms.Count;
var costRange = new DominantCostRange();
// Analyze the dominant (literal, red and blue) entropy costs.
for (int i = 0; i < histoSize; i++)
{
if (histograms[i] == null)
{
continue;
}
costRange.UpdateDominantCostRange(histograms[i]);
}
// bin-hash histograms on three of the dominant (literal, red and blue)
// symbol costs and store the resulting bin_id for each histogram.
for (int i = 0; i < histoSize; i++)
{
if (histograms[i] == null)
{
continue;
}
binMap[i] = (ushort)costRange.GetHistoBinIndex(histograms[i], NumPartitions);
}
}
private static int HistogramCopyAndAnalyze(List<Vp8LHistogram> origHistograms, List<Vp8LHistogram> histograms, ushort[] histogramSymbols)
{
for (int clusterId = 0, i = 0; i < origHistograms.Count; i++)
{
Vp8LHistogram origHistogram = origHistograms[i];
origHistogram.UpdateHistogramCost();
// Skip the histogram if it is completely empty, which can happen for tiles with no information (when they are skipped because of LZ77).
if (!origHistogram.IsUsed[0] && !origHistogram.IsUsed[1] && !origHistogram.IsUsed[2] && !origHistogram.IsUsed[3] && !origHistogram.IsUsed[4])
{
origHistograms[i] = null;
histograms[i] = null;
histogramSymbols[i] = InvalidHistogramSymbol;
}
else
{
histograms[i] = (Vp8LHistogram)origHistogram.DeepClone();
histogramSymbols[i] = (ushort)clusterId++;
}
}
int numUsed = histogramSymbols.Count(h => h != InvalidHistogramSymbol);
return numUsed;
}
private static void HistogramCombineEntropyBin(List<Vp8LHistogram> histograms, ushort[] clusters, ushort[] clusterMappings, Vp8LHistogram curCombo, ushort[] binMap, int numBins, double combineCostFactor)
{
var binInfo = new HistogramBinInfo[BinSize];
for (int idx = 0; idx < numBins; idx++)
{
binInfo[idx].First = -1;
binInfo[idx].NumCombineFailures = 0;
}
// By default, a cluster matches itself.
for (int idx = 0; idx < histograms.Count; idx++)
{
clusterMappings[idx] = (ushort)idx;
}
var indicesToRemove = new List<int>();
for (int idx = 0; idx < histograms.Count; idx++)
{
if (histograms[idx] == null)
{
continue;
}
int binId = binMap[idx];
int first = binInfo[binId].First;
if (first == -1)
{
binInfo[binId].First = (short)idx;
}
else
{
// Try to merge #idx into #first (both share the same binId)
double bitCost = histograms[idx].BitCost;
double bitCostThresh = -bitCost * combineCostFactor;
double currCostDiff = histograms[first].AddEval(histograms[idx], bitCostThresh, curCombo);
if (currCostDiff < bitCostThresh)
{
// Try to merge two histograms only if the combo is a trivial one or
// the two candidate histograms are already non-trivial.
// For some images, 'tryCombine' turns out to be false for a lot of
// histogram pairs. In that case, we fallback to combining
// histograms as usual to avoid increasing the header size.
bool tryCombine = curCombo.TrivialSymbol != NonTrivialSym || (histograms[idx].TrivialSymbol == NonTrivialSym && histograms[first].TrivialSymbol == NonTrivialSym);
int maxCombineFailures = 32;
if (tryCombine || binInfo[binId].NumCombineFailures >= maxCombineFailures)
{
// Move the (better) merged histogram to its final slot.
Vp8LHistogram tmp = curCombo;
curCombo = histograms[first];
histograms[first] = tmp;
histograms[idx] = null;
indicesToRemove.Add(idx);
clusterMappings[clusters[idx]] = clusters[first];
}
else
{
binInfo[binId].NumCombineFailures++;
}
}
}
}
foreach (int index in indicesToRemove.OrderByDescending(i => i))
{
histograms.RemoveAt(index);
}
}
/// <summary>
/// Given a Histogram set, the mapping of clusters 'clusterMapping' and the
/// current assignment of the cells in 'symbols', merge the clusters and assign the smallest possible clusters values.
/// </summary>
private static void OptimizeHistogramSymbols(ushort[] clusterMappings, int numClusters, ushort[] clusterMappingsTmp, ushort[] symbols)
{
bool doContinue = true;
// First, assign the lowest cluster to each pixel.
while (doContinue)
{
doContinue = false;
for (int i = 0; i < numClusters; i++)
{
int k = clusterMappings[i];
while (k != clusterMappings[k])
{
clusterMappings[k] = clusterMappings[clusterMappings[k]];
k = clusterMappings[k];
}
if (k != clusterMappings[i])
{
doContinue = true;
clusterMappings[i] = (ushort)k;
}
}
}
// Create a mapping from a cluster id to its minimal version.
int clusterMax = 0;
clusterMappingsTmp.AsSpan().Fill(0);
// Re-map the ids.
for (int i = 0; i < symbols.Length; i++)
{
if (symbols[i] == InvalidHistogramSymbol)
{
continue;
}
int cluster = clusterMappings[symbols[i]];
if (cluster > 0 && clusterMappingsTmp[cluster] == 0)
{
clusterMax++;
clusterMappingsTmp[cluster] = (ushort)clusterMax;
}
symbols[i] = clusterMappingsTmp[cluster];
}
}
/// <summary>
/// Perform histogram aggregation using a stochastic approach.
/// </summary>
/// <returns>true if a greedy approach needs to be performed afterwards, false otherwise.</returns>
private static bool HistogramCombineStochastic(List<Vp8LHistogram> histograms, int minClusterSize)
{
uint seed = 1;
int triesWithNoSuccess = 0;
int numUsed = histograms.Count(h => h != null);
int outerIters = numUsed;
int numTriesNoSuccess = outerIters / 2;
if (numUsed < minClusterSize)
{
return true;
}
// Priority list of histogram pairs. Its size impacts the quality of the compression and the speed:
// the smaller the faster but the worse for the compression.
var histoPriorityList = new List<HistogramPair>();
int maxSize = 9;
// Fill the initial mapping.
int[] mappings = new int[histograms.Count];
for (int j = 0, iter = 0; iter < histograms.Count; iter++)
{
if (histograms[iter] == null)
{
continue;
}
mappings[j++] = iter;
}
// Collapse similar histograms.
for (int iter = 0; iter < outerIters && numUsed >= minClusterSize && ++triesWithNoSuccess < numTriesNoSuccess; iter++)
{
double bestCost = histoPriorityList.Count == 0 ? 0.0d : histoPriorityList[0].CostDiff;
int numTries = numUsed / 2;
uint randRange = (uint)((numUsed - 1) * numUsed);
// Pick random samples.
for (int j = 0; numUsed >= 2 && j < numTries; j++)
{
// Choose two different histograms at random and try to combine them.
uint tmp = MyRand(ref seed) % randRange;
int idx1 = (int)(tmp / (numUsed - 1));
int idx2 = (int)(tmp % (numUsed - 1));
if (idx2 >= idx1)
{
idx2++;
}
idx1 = mappings[idx1];
idx2 = mappings[idx2];
// Calculate cost reduction on combination.
double currCost = HistoPriorityListPush(histoPriorityList, maxSize, histograms, idx1, idx2, bestCost);
// Found a better pair?
if (currCost < 0)
{
bestCost = currCost;
if (histoPriorityList.Count == maxSize)
{
break;
}
}
}
if (histoPriorityList.Count == 0)
{
continue;
}
// Get the best histograms.
int bestIdx1 = histoPriorityList[0].Idx1;
int bestIdx2 = histoPriorityList[0].Idx2;
int mappingIndex = Array.IndexOf(mappings, bestIdx2);
Span<int> src = mappings.AsSpan(mappingIndex + 1, numUsed - mappingIndex - 1);
Span<int> dst = mappings.AsSpan(mappingIndex);
src.CopyTo(dst);
// Merge the histograms and remove bestIdx2 from the list.
HistogramAdd(histograms[bestIdx2], histograms[bestIdx1], histograms[bestIdx1]);
histograms.ElementAt(bestIdx1).BitCost = histoPriorityList[0].CostCombo;
histograms[bestIdx2] = null;
numUsed--;
for (int j = 0; j < histoPriorityList.Count;)
{
HistogramPair p = histoPriorityList[j];
bool isIdx1Best = p.Idx1 == bestIdx1 || p.Idx1 == bestIdx2;
bool isIdx2Best = p.Idx2 == bestIdx1 || p.Idx2 == bestIdx2;
bool doEval = false;
// The front pair could have been duplicated by a random pick so
// check for it all the time nevertheless.
if (isIdx1Best && isIdx2Best)
{
histoPriorityList[j] = histoPriorityList[histoPriorityList.Count - 1];
histoPriorityList.RemoveAt(histoPriorityList.Count - 1);
continue;
}
// Any pair containing one of the two best indices should only refer to
// bestIdx1. Its cost should also be updated.
if (isIdx1Best)
{
p.Idx1 = bestIdx1;
doEval = true;
}
else if (isIdx2Best)
{
p.Idx2 = bestIdx1;
doEval = true;
}
// Make sure the index order is respected.
if (p.Idx1 > p.Idx2)
{
int tmp = p.Idx2;
p.Idx2 = p.Idx1;
p.Idx1 = tmp;
}
if (doEval)
{
// Re-evaluate the cost of an updated pair.
HistoListUpdatePair(histograms[p.Idx1], histograms[p.Idx2], 0.0d, p);
if (p.CostDiff >= 0.0d)
{
histoPriorityList[j] = histoPriorityList[histoPriorityList.Count - 1];
histoPriorityList.RemoveAt(histoPriorityList.Count - 1);
continue;
}
}
HistoListUpdateHead(histoPriorityList, p);
j++;
}
triesWithNoSuccess = 0;
}
bool doGreedy = numUsed <= minClusterSize;
return doGreedy;
}
private static void HistogramCombineGreedy(List<Vp8LHistogram> histograms)
{
int histoSize = histograms.Count(h => h != null);
// Priority list of histogram pairs.
var histoPriorityList = new List<HistogramPair>();
int maxSize = histoSize * histoSize;
for (int i = 0; i < histoSize; i++)
{
if (histograms[i] == null)
{
continue;
}
for (int j = i + 1; j < histoSize; j++)
{
if (histograms[j] == null)
{
continue;
}
HistoPriorityListPush(histoPriorityList, maxSize, histograms, i, j, 0.0d);
}
}
while (histoPriorityList.Count > 0)
{
int idx1 = histoPriorityList[0].Idx1;
int idx2 = histoPriorityList[0].Idx2;
HistogramAdd(histograms[idx2], histograms[idx1], histograms[idx1]);
histograms[idx1].BitCost = histoPriorityList[0].CostCombo;
// Remove merged histogram.
histograms[idx2] = null;
// Remove pairs intersecting the just combined best pair.
for (int i = 0; i < histoPriorityList.Count;)
{
HistogramPair p = histoPriorityList.ElementAt(i);
if (p.Idx1 == idx1 || p.Idx2 == idx1 || p.Idx1 == idx2 || p.Idx2 == idx2)
{
// Replace item at pos i with the last one and shrinking the list.
histoPriorityList[i] = histoPriorityList[histoPriorityList.Count - 1];
histoPriorityList.RemoveAt(histoPriorityList.Count - 1);
}
else
{
HistoListUpdateHead(histoPriorityList, p);
i++;
}
}
// Push new pairs formed with combined histogram to the list.
for (int i = 0; i < histoSize; i++)
{
if (i == idx1 || histograms[i] == null)
{
continue;
}
HistoPriorityListPush(histoPriorityList, maxSize, histograms, idx1, i, 0.0d);
}
}
}
private static void HistogramRemap(List<Vp8LHistogram> input, List<Vp8LHistogram> output, ushort[] symbols)
{
int inSize = input.Count;
int outSize = output.Count;
if (outSize > 1)
{
for (int i = 0; i < inSize; i++)
{
if (input[i] == null)
{
// Arbitrarily set to the previous value if unused to help future LZ77.
symbols[i] = symbols[i - 1];
continue;
}
int bestOut = 0;
double bestBits = double.MaxValue;
for (int k = 0; k < outSize; k++)
{
double curBits = output[k].AddThresh(input[i], bestBits);
if (k == 0 || curBits < bestBits)
{
bestBits = curBits;
bestOut = k;
}
}
symbols[i] = (ushort)bestOut;
}
}
else
{
for (int i = 0; i < inSize; i++)
{
symbols[i] = 0;
}
}
// Recompute each output.
int paletteCodeBits = output.First().PaletteCodeBits;
output.Clear();
for (int i = 0; i < outSize; i++)
{
output.Add(new Vp8LHistogram(paletteCodeBits));
}
for (int i = 0; i < inSize; i++)
{
if (input[i] == null)
{
continue;
}
int idx = symbols[i];
input[i].Add(output[idx], output[idx]);
}
}
/// <summary>
/// Create a pair from indices "idx1" and "idx2" provided its cost is inferior to "threshold", a negative entropy.
/// </summary>
/// <returns>The cost of the pair, or 0 if it superior to threshold.</returns>
private static double HistoPriorityListPush(List<HistogramPair> histoList, int maxSize, List<Vp8LHistogram> histograms, int idx1, int idx2, double threshold)
{
var pair = new HistogramPair();
if (histoList.Count == maxSize)
{
return 0.0d;
}
if (idx1 > idx2)
{
int tmp = idx2;
idx2 = idx1;
idx1 = tmp;
}
pair.Idx1 = idx1;
pair.Idx2 = idx2;
Vp8LHistogram h1 = histograms[idx1];
Vp8LHistogram h2 = histograms[idx2];
HistoListUpdatePair(h1, h2, threshold, pair);
// Do not even consider the pair if it does not improve the entropy.
if (pair.CostDiff >= threshold)
{
return 0.0d;
}
histoList.Add(pair);
HistoListUpdateHead(histoList, pair);
return pair.CostDiff;
}
/// <summary>
/// Update the cost diff and combo of a pair of histograms. This needs to be called when the the histograms have been merged with a third one.
/// </summary>
private static void HistoListUpdatePair(Vp8LHistogram h1, Vp8LHistogram h2, double threshold, HistogramPair pair)
{
double sumCost = h1.BitCost + h2.BitCost;
pair.CostCombo = 0.0d;
h1.GetCombinedHistogramEntropy(h2, sumCost + threshold, costInitial: pair.CostCombo, out double cost);
pair.CostCombo = cost;
pair.CostDiff = pair.CostCombo - sumCost;
}
/// <summary>
/// Check whether a pair in the list should be updated as head or not.
/// </summary>
private static void HistoListUpdateHead(List<HistogramPair> histoList, HistogramPair pair)
{
if (pair.CostDiff < histoList[0].CostDiff)
{
// Replace the best pair.
int oldIdx = histoList.IndexOf(pair);
histoList[oldIdx] = histoList[0];
histoList[0] = pair;
}
}
private static void HistogramAdd(Vp8LHistogram a, Vp8LHistogram b, Vp8LHistogram output)
{
a.Add(b, output);
output.TrivialSymbol = a.TrivialSymbol == b.TrivialSymbol ? a.TrivialSymbol : NonTrivialSym;
}
private static double GetCombineCostFactor(int histoSize, int quality)
{
double combineCostFactor = 0.16d;
if (quality < 90)
{
if (histoSize > 256)
{
combineCostFactor /= 2.0d;
}
if (histoSize > 512)
{
combineCostFactor /= 2.0d;
}
if (histoSize > 1024)
{
combineCostFactor /= 2.0d;
}
if (quality <= 50)
{
combineCostFactor /= 2.0d;
}
}
return combineCostFactor;
}
// Implement a Lehmer random number generator with a multiplicative constant of 48271 and a modulo constant of 2^31 - 1.
[MethodImpl(InliningOptions.ShortMethod)]
private static uint MyRand(ref uint seed)
{
seed = (uint)(((ulong)seed * 48271u) % 2147483647u);
return seed;
}
}
}

22
src/ImageSharp/Formats/Webp/Lossless/HistogramPair.cs

@ -0,0 +1,22 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Diagnostics;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// Pair of histograms. Negative Idx1 value means that pair is out-of-date.
/// </summary>
[DebuggerDisplay("Idx1: {Idx1}, Idx2: {Idx2}, CostDiff: {CostDiff}, CostCombo: {CostCombo}")]
internal class HistogramPair
{
public int Idx1 { get; set; }
public int Idx2 { get; set; }
public double CostDiff { get; set; }
public double CostCombo { get; set; }
}
}

36
src/ImageSharp/Formats/Webp/Lossless/HuffIndex.cs

@ -0,0 +1,36 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// Five Huffman codes are used at each meta code.
/// </summary>
internal static class HuffIndex
{
/// <summary>
/// Green + length prefix codes + color cache codes.
/// </summary>
public const int Green = 0;
/// <summary>
/// Red.
/// </summary>
public const int Red = 1;
/// <summary>
/// Blue.
/// </summary>
public const int Blue = 2;
/// <summary>
/// Alpha.
/// </summary>
public const int Alpha = 3;
/// <summary>
/// Distance prefix codes.
/// </summary>
public const int Dist = 4;
}
}

24
src/ImageSharp/Formats/Webp/Lossless/HuffmanCode.cs

@ -0,0 +1,24 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Diagnostics;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// A classic way to do entropy coding where a smaller number of bits are used for more frequent codes.
/// </summary>
[DebuggerDisplay("BitsUsed: {BitsUsed}, Value: {Value}")]
internal class HuffmanCode
{
/// <summary>
/// Gets or sets the number of bits used for this symbol.
/// </summary>
public int BitsUsed { get; set; }
/// <summary>
/// Gets or sets the symbol value or table offset.
/// </summary>
public uint Value { get; set; }
}
}

64
src/ImageSharp/Formats/Webp/Lossless/HuffmanTree.cs

@ -0,0 +1,64 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Diagnostics;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// Represents the Huffman tree.
/// </summary>
[DebuggerDisplay("TotalCount = {TotalCount}, Value = {Value}, Left = {PoolIndexLeft}, Right = {PoolIndexRight}")]
internal struct HuffmanTree : IDeepCloneable
{
/// <summary>
/// Initializes a new instance of the <see cref="HuffmanTree"/> struct.
/// </summary>
/// <param name="other">The HuffmanTree to create an instance from.</param>
private HuffmanTree(HuffmanTree other)
{
this.TotalCount = other.TotalCount;
this.Value = other.Value;
this.PoolIndexLeft = other.PoolIndexLeft;
this.PoolIndexRight = other.PoolIndexRight;
}
/// <summary>
/// Gets or sets the symbol frequency.
/// </summary>
public int TotalCount { get; set; }
/// <summary>
/// Gets or sets the symbol value.
/// </summary>
public int Value { get; set; }
/// <summary>
/// Gets or sets the index for the left sub-tree.
/// </summary>
public int PoolIndexLeft { get; set; }
/// <summary>
/// Gets or sets the index for the right sub-tree.
/// </summary>
public int PoolIndexRight { get; set; }
public static int Compare(HuffmanTree t1, HuffmanTree t2)
{
if (t1.TotalCount > t2.TotalCount)
{
return -1;
}
else if (t1.TotalCount < t2.TotalCount)
{
return 1;
}
else
{
return t1.Value < t2.Value ? -1 : 1;
}
}
public IDeepCloneable DeepClone() => new HuffmanTree(this);
}
}

26
src/ImageSharp/Formats/Webp/Lossless/HuffmanTreeCode.cs

@ -0,0 +1,26 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// Represents the tree codes (depth and bits array).
/// </summary>
internal struct HuffmanTreeCode
{
/// <summary>
/// Gets or sets the number of symbols.
/// </summary>
public int NumSymbols { get; set; }
/// <summary>
/// Gets or sets the code lengths of the symbols.
/// </summary>
public byte[] CodeLengths { get; set; }
/// <summary>
/// Gets or sets the symbol Codes.
/// </summary>
public short[] Codes { get; set; }
}
}

24
src/ImageSharp/Formats/Webp/Lossless/HuffmanTreeToken.cs

@ -0,0 +1,24 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Diagnostics;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// Holds the tree header in coded form.
/// </summary>
[DebuggerDisplay("Code = {Code}, ExtraBits = {ExtraBits}")]
internal class HuffmanTreeToken
{
/// <summary>
/// Gets or sets the code. Value (0..15) or escape code (16, 17, 18).
/// </summary>
public byte Code { get; set; }
/// <summary>
/// Gets or sets the extra bits for escape codes.
/// </summary>
public byte ExtraBits { get; set; }
}
}

656
src/ImageSharp/Formats/Webp/Lossless/HuffmanUtils.cs

@ -0,0 +1,656 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// Utility functions related to creating the huffman tables.
/// </summary>
internal static class HuffmanUtils
{
public const int HuffmanTableBits = 8;
public const int HuffmanPackedBits = 6;
public const int HuffmanTableMask = (1 << HuffmanTableBits) - 1;
public const uint HuffmanPackedTableSize = 1u << HuffmanPackedBits;
// Pre-reversed 4-bit values.
private static readonly byte[] ReversedBits =
{
0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe,
0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf
};
public static void CreateHuffmanTree(uint[] histogram, int treeDepthLimit, bool[] bufRle, HuffmanTree[] huffTree, HuffmanTreeCode huffCode)
{
int numSymbols = huffCode.NumSymbols;
bufRle.AsSpan().Fill(false);
OptimizeHuffmanForRle(numSymbols, bufRle, histogram);
GenerateOptimalTree(huffTree, histogram, numSymbols, treeDepthLimit, huffCode.CodeLengths);
// Create the actual bit codes for the bit lengths.
ConvertBitDepthsToSymbols(huffCode);
}
/// <summary>
/// Change the population counts in a way that the consequent
/// Huffman tree compression, especially its RLE-part, give smaller output.
/// </summary>
public static void OptimizeHuffmanForRle(int length, bool[] goodForRle, uint[] counts)
{
// 1) Let's make the Huffman code more compatible with rle encoding.
for (; length >= 0; --length)
{
if (length == 0)
{
return; // All zeros.
}
if (counts[length - 1] != 0)
{
// Now counts[0..length - 1] does not have trailing zeros.
break;
}
}
// 2) Let's mark all population counts that already can be encoded with an rle code.
// Let's not spoil any of the existing good rle codes.
// Mark any seq of 0's that is longer as 5 as a goodForRle.
// Mark any seq of non-0's that is longer as 7 as a goodForRle.
uint symbol = counts[0];
int stride = 0;
for (int i = 0; i < length + 1; i++)
{
if (i == length || counts[i] != symbol)
{
if ((symbol == 0 && stride >= 5) || (symbol != 0 && stride >= 7))
{
for (int k = 0; k < stride; k++)
{
goodForRle[i - k - 1] = true;
}
}
stride = 1;
if (i != length)
{
symbol = counts[i];
}
}
else
{
++stride;
}
}
// 3) Let's replace those population counts that lead to more rle codes.
stride = 0;
uint limit = counts[0];
uint sum = 0;
for (int i = 0; i < length + 1; i++)
{
if (i == length || goodForRle[i] || (i != 0 && goodForRle[i - 1]) || !ValuesShouldBeCollapsedToStrideAverage((int)counts[i], (int)limit))
{
if (stride >= 4 || (stride >= 3 && sum == 0))
{
uint k;
// The stride must end, collapse what we have, if we have enough (4).
uint count = (uint)((sum + (stride / 2)) / stride);
if (count < 1)
{
count = 1;
}
if (sum == 0)
{
// Don't make an all zeros stride to be upgraded to ones.
count = 0;
}
for (k = 0; k < stride; k++)
{
// We don't want to change value at counts[i],
// that is already belonging to the next stride. Thus - 1.
counts[i - k - 1] = count;
}
}
stride = 0;
sum = 0;
if (i < length - 3)
{
// All interesting strides have a count of at least 4, at least when non-zeros.
limit = (counts[i] + counts[i + 1] +
counts[i + 2] + counts[i + 3] + 2) / 4;
}
else if (i < length)
{
limit = counts[i];
}
else
{
limit = 0;
}
}
++stride;
if (i != length)
{
sum += counts[i];
if (stride >= 4)
{
limit = (uint)((sum + (stride / 2)) / stride);
}
}
}
}
/// <summary>
/// Create an optimal Huffman tree.
/// </summary>
/// <see href="http://en.wikipedia.org/wiki/Huffman_coding"/>
/// <param name="tree">The huffman tree.</param>
/// <param name="histogram">The histogram.</param>
/// <param name="histogramSize">The size of the histogram.</param>
/// <param name="treeDepthLimit">The tree depth limit.</param>
/// <param name="bitDepths">How many bits are used for the symbol.</param>
public static void GenerateOptimalTree(HuffmanTree[] tree, uint[] histogram, int histogramSize, int treeDepthLimit, byte[] bitDepths)
{
uint countMin;
int treeSizeOrig = 0;
for (int i = 0; i < histogramSize; i++)
{
if (histogram[i] != 0)
{
++treeSizeOrig;
}
}
if (treeSizeOrig == 0)
{
return;
}
Span<HuffmanTree> treePool = tree.AsSpan(treeSizeOrig);
// For block sizes with less than 64k symbols we never need to do a
// second iteration of this loop.
for (countMin = 1; ; countMin *= 2)
{
int treeSize = treeSizeOrig;
// We need to pack the Huffman tree in treeDepthLimit bits.
// So, we try by faking histogram entries to be at least 'countMin'.
int idx = 0;
for (int j = 0; j < histogramSize; j++)
{
if (histogram[j] != 0)
{
uint count = histogram[j] < countMin ? countMin : histogram[j];
tree[idx].TotalCount = (int)count;
tree[idx].Value = j;
tree[idx].PoolIndexLeft = -1;
tree[idx].PoolIndexRight = -1;
idx++;
}
}
// Build the Huffman tree.
HuffmanTree[] treeCopy = tree.AsSpan().Slice(0, treeSize).ToArray();
Array.Sort(treeCopy, HuffmanTree.Compare);
treeCopy.AsSpan().CopyTo(tree);
if (treeSize > 1)
{
// Normal case.
int treePoolSize = 0;
while (treeSize > 1)
{
// Finish when we have only one root.
treePool[treePoolSize++] = (HuffmanTree)tree[treeSize - 1].DeepClone();
treePool[treePoolSize++] = (HuffmanTree)tree[treeSize - 2].DeepClone();
int count = treePool[treePoolSize - 1].TotalCount + treePool[treePoolSize - 2].TotalCount;
treeSize -= 2;
// Search for the insertion point.
int k;
for (k = 0; k < treeSize; k++)
{
if (tree[k].TotalCount <= count)
{
break;
}
}
int endIdx = k + 1;
int num = treeSize - k;
int startIdx = endIdx + num - 1;
for (int i = startIdx; i >= endIdx; i--)
{
tree[i] = (HuffmanTree)tree[i - 1].DeepClone();
}
tree[k].TotalCount = count;
tree[k].Value = -1;
tree[k].PoolIndexLeft = treePoolSize - 1;
tree[k].PoolIndexRight = treePoolSize - 2;
treeSize++;
}
SetBitDepths(tree, treePool, bitDepths, 0);
}
else if (treeSize == 1)
{
// Trivial case: only one element.
bitDepths[tree[0].Value] = 1;
}
// Test if this Huffman tree satisfies our 'treeDepthLimit' criteria.
int maxDepth = bitDepths[0];
for (int j = 1; j < histogramSize; j++)
{
if (maxDepth < bitDepths[j])
{
maxDepth = bitDepths[j];
}
}
if (maxDepth <= treeDepthLimit)
{
break;
}
}
}
public static int CreateCompressedHuffmanTree(HuffmanTreeCode tree, HuffmanTreeToken[] tokensArray)
{
int depthSize = tree.NumSymbols;
int prevValue = 8; // 8 is the initial value for rle.
int i = 0;
int tokenPos = 0;
while (i < depthSize)
{
int value = tree.CodeLengths[i];
int k = i + 1;
while (k < depthSize && tree.CodeLengths[k] == value)
{
k++;
}
int runs = k - i;
if (value == 0)
{
tokenPos += CodeRepeatedZeros(runs, tokensArray.AsSpan(tokenPos));
}
else
{
tokenPos += CodeRepeatedValues(runs, tokensArray.AsSpan(tokenPos), value, prevValue);
prevValue = value;
}
i += runs;
}
return tokenPos;
}
public static int BuildHuffmanTable(Span<HuffmanCode> table, int rootBits, int[] codeLengths, int codeLengthsSize)
{
Guard.MustBeGreaterThan(rootBits, 0, nameof(rootBits));
Guard.NotNull(codeLengths, nameof(codeLengths));
Guard.MustBeGreaterThan(codeLengthsSize, 0, nameof(codeLengthsSize));
// sorted[codeLengthsSize] is a pre-allocated array for sorting symbols by code length.
int[] sorted = new int[codeLengthsSize];
int totalSize = 1 << rootBits; // total size root table + 2nd level table.
int len; // current code length.
int symbol; // symbol index in original or sorted table.
int[] counts = new int[WebpConstants.MaxAllowedCodeLength + 1]; // number of codes of each length.
int[] offsets = new int[WebpConstants.MaxAllowedCodeLength + 1]; // offsets in sorted table for each length.
// Build histogram of code lengths.
for (symbol = 0; symbol < codeLengthsSize; ++symbol)
{
int codeLengthOfSymbol = codeLengths[symbol];
if (codeLengthOfSymbol > WebpConstants.MaxAllowedCodeLength)
{
return 0;
}
counts[codeLengthOfSymbol]++;
}
// Error, all code lengths are zeros.
if (counts[0] == codeLengthsSize)
{
return 0;
}
// Generate offsets into sorted symbol table by code length.
offsets[1] = 0;
for (len = 1; len < WebpConstants.MaxAllowedCodeLength; ++len)
{
int codesOfLength = counts[len];
if (codesOfLength > 1 << len)
{
return 0;
}
offsets[len + 1] = offsets[len] + codesOfLength;
}
// Sort symbols by length, by symbol order within each length.
for (symbol = 0; symbol < codeLengthsSize; ++symbol)
{
int symbolCodeLength = codeLengths[symbol];
if (symbolCodeLength > 0)
{
sorted[offsets[symbolCodeLength]++] = symbol;
}
}
// Special case code with only one value.
if (offsets[WebpConstants.MaxAllowedCodeLength] == 1)
{
var huffmanCode = new HuffmanCode()
{
BitsUsed = 0,
Value = (uint)sorted[0]
};
ReplicateValue(table, 1, totalSize, huffmanCode);
return totalSize;
}
int step; // step size to replicate values in current table
int low = -1; // low bits for current root entry
int mask = totalSize - 1; // mask for low bits
int key = 0; // reversed prefix code
int numNodes = 1; // number of Huffman tree nodes
int numOpen = 1; // number of open branches in current tree level
int tableBits = rootBits; // key length of current table
int tableSize = 1 << tableBits; // size of current table
symbol = 0;
// Fill in root table.
for (len = 1, step = 2; len <= rootBits; ++len, step <<= 1)
{
int countsLen = counts[len];
numOpen <<= 1;
numNodes += numOpen;
numOpen -= counts[len];
if (numOpen < 0)
{
return 0;
}
for (; countsLen > 0; countsLen--)
{
var huffmanCode = new HuffmanCode()
{
BitsUsed = len,
Value = (uint)sorted[symbol++]
};
ReplicateValue(table.Slice(key), step, tableSize, huffmanCode);
key = GetNextKey(key, len);
}
counts[len] = countsLen;
}
// Fill in 2nd level tables and add pointers to root table.
Span<HuffmanCode> tableSpan = table;
int tablePos = 0;
for (len = rootBits + 1, step = 2; len <= WebpConstants.MaxAllowedCodeLength; ++len, step <<= 1)
{
numOpen <<= 1;
numNodes += numOpen;
numOpen -= counts[len];
if (numOpen < 0)
{
return 0;
}
for (; counts[len] > 0; --counts[len])
{
if ((key & mask) != low)
{
tableSpan = tableSpan.Slice(tableSize);
tablePos += tableSize;
tableBits = NextTableBitSize(counts, len, rootBits);
tableSize = 1 << tableBits;
totalSize += tableSize;
low = key & mask;
table[low] = new HuffmanCode
{
BitsUsed = tableBits + rootBits,
Value = (uint)(tablePos - low)
};
}
var huffmanCode = new HuffmanCode
{
BitsUsed = len - rootBits,
Value = (uint)sorted[symbol++]
};
ReplicateValue(tableSpan.Slice(key >> rootBits), step, tableSize, huffmanCode);
key = GetNextKey(key, len);
}
}
return totalSize;
}
private static int CodeRepeatedZeros(int repetitions, Span<HuffmanTreeToken> tokens)
{
int pos = 0;
while (repetitions >= 1)
{
if (repetitions < 3)
{
for (int i = 0; i < repetitions; i++)
{
tokens[pos].Code = 0; // 0-value
tokens[pos].ExtraBits = 0;
pos++;
}
break;
}
else if (repetitions < 11)
{
tokens[pos].Code = 17;
tokens[pos].ExtraBits = (byte)(repetitions - 3);
pos++;
break;
}
else if (repetitions < 139)
{
tokens[pos].Code = 18;
tokens[pos].ExtraBits = (byte)(repetitions - 11);
pos++;
break;
}
else
{
tokens[pos].Code = 18;
tokens[pos].ExtraBits = 0x7f; // 138 repeated 0s
pos++;
repetitions -= 138;
}
}
return pos;
}
private static int CodeRepeatedValues(int repetitions, Span<HuffmanTreeToken> tokens, int value, int prevValue)
{
int pos = 0;
if (value != prevValue)
{
tokens[pos].Code = (byte)value;
tokens[pos].ExtraBits = 0;
pos++;
repetitions--;
}
while (repetitions >= 1)
{
if (repetitions < 3)
{
int i;
for (i = 0; i < repetitions; i++)
{
tokens[pos].Code = (byte)value;
tokens[pos].ExtraBits = 0;
pos++;
}
break;
}
else if (repetitions < 7)
{
tokens[pos].Code = 16;
tokens[pos].ExtraBits = (byte)(repetitions - 3);
pos++;
break;
}
else
{
tokens[pos].Code = 16;
tokens[pos].ExtraBits = 3;
pos++;
repetitions -= 6;
}
}
return pos;
}
/// <summary>
/// Get the actual bit values for a tree of bit depths.
/// </summary>
/// <param name="tree">The hiffman tree.</param>
private static void ConvertBitDepthsToSymbols(HuffmanTreeCode tree)
{
// 0 bit-depth means that the symbol does not exist.
uint[] nextCode = new uint[WebpConstants.MaxAllowedCodeLength + 1];
int[] depthCount = new int[WebpConstants.MaxAllowedCodeLength + 1];
int len = tree.NumSymbols;
for (int i = 0; i < len; i++)
{
int codeLength = tree.CodeLengths[i];
depthCount[codeLength]++;
}
depthCount[0] = 0; // ignore unused symbol.
nextCode[0] = 0;
uint code = 0;
for (int i = 1; i <= WebpConstants.MaxAllowedCodeLength; i++)
{
code = (uint)((code + depthCount[i - 1]) << 1);
nextCode[i] = code;
}
for (int i = 0; i < len; i++)
{
int codeLength = tree.CodeLengths[i];
tree.Codes[i] = (short)ReverseBits(codeLength, nextCode[codeLength]++);
}
}
private static void SetBitDepths(Span<HuffmanTree> tree, Span<HuffmanTree> pool, byte[] bitDepths, int level)
{
if (tree[0].PoolIndexLeft >= 0)
{
SetBitDepths(pool.Slice(tree[0].PoolIndexLeft), pool, bitDepths, level + 1);
SetBitDepths(pool.Slice(tree[0].PoolIndexRight), pool, bitDepths, level + 1);
}
else
{
bitDepths[tree[0].Value] = (byte)level;
}
}
private static uint ReverseBits(int numBits, uint bits)
{
uint retval = 0;
int i = 0;
while (i < numBits)
{
i += 4;
retval |= (uint)(ReversedBits[bits & 0xf] << (WebpConstants.MaxAllowedCodeLength + 1 - i));
bits >>= 4;
}
retval >>= WebpConstants.MaxAllowedCodeLength + 1 - numBits;
return retval;
}
/// <summary>
/// Returns the table width of the next 2nd level table. count is the histogram of bit lengths for the remaining symbols,
/// len is the code length of the next processed symbol.
/// </summary>
private static int NextTableBitSize(int[] count, int len, int rootBits)
{
int left = 1 << (len - rootBits);
while (len < WebpConstants.MaxAllowedCodeLength)
{
left -= count[len];
if (left <= 0)
{
break;
}
++len;
left <<= 1;
}
return len - rootBits;
}
/// <summary>
/// Stores code in table[0], table[step], table[2*step], ..., table[end-step].
/// Assumes that end is an integer multiple of step.
/// </summary>
private static void ReplicateValue(Span<HuffmanCode> table, int step, int end, HuffmanCode code)
{
Guard.IsTrue(end % step == 0, nameof(end), "end must be a multiple of step");
do
{
end -= step;
table[end] = code;
}
while (end > 0);
}
/// <summary>
/// Returns reverse(reverse(key, len) + 1, len), where reverse(key, len) is the
/// bit-wise reversal of the len least significant bits of key.
/// </summary>
private static int GetNextKey(int key, int len)
{
int step = 1 << (len - 1);
while ((key & step) != 0)
{
step >>= 1;
}
return step != 0 ? (key & (step - 1)) + step : key;
}
/// <summary>
/// Heuristics for selecting the stride ranges to collapse.
/// </summary>
private static bool ValuesShouldBeCollapsedToStrideAverage(int a, int b) => Math.Abs(a - b) < 4;
}
}

1279
src/ImageSharp/Formats/Webp/Lossless/LosslessUtils.cs

File diff suppressed because it is too large

125
src/ImageSharp/Formats/Webp/Lossless/NearLosslessEnc.cs

@ -0,0 +1,125 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// Near-lossless image preprocessing adjusts pixel values to help compressibility with a guarantee
/// of maximum deviation between original and resulting pixel values.
/// </summary>
internal static class NearLosslessEnc
{
private const int MinDimForNearLossless = 64;
public static void ApplyNearLossless(int xSize, int ySize, int quality, Span<uint> argbSrc, Span<uint> argbDst, int stride)
{
uint[] copyBuffer = new uint[xSize * 3];
int limitBits = LosslessUtils.NearLosslessBits(quality);
// For small icon images, don't attempt to apply near-lossless compression.
if ((xSize < MinDimForNearLossless && ySize < MinDimForNearLossless) || ySize < 3)
{
for (int i = 0; i < ySize; i++)
{
argbSrc.Slice(i * stride, xSize).CopyTo(argbDst.Slice(i * xSize, xSize));
}
return;
}
NearLossless(xSize, ySize, argbSrc, stride, limitBits, copyBuffer, argbDst);
for (int i = limitBits - 1; i != 0; i--)
{
NearLossless(xSize, ySize, argbDst, xSize, i, copyBuffer, argbDst);
}
}
// Adjusts pixel values of image with given maximum error.
private static void NearLossless(int xSize, int ySize, Span<uint> argbSrc, int stride, int limitBits, Span<uint> copyBuffer, Span<uint> argbDst)
{
int y;
int limit = 1 << limitBits;
Span<uint> prevRow = copyBuffer;
Span<uint> currRow = copyBuffer.Slice(xSize, xSize);
Span<uint> nextRow = copyBuffer.Slice(xSize * 2, xSize);
argbSrc.Slice(0, xSize).CopyTo(currRow);
argbSrc.Slice(xSize, xSize).CopyTo(nextRow);
int srcOffset = 0;
int dstOffset = 0;
for (y = 0; y < ySize; y++)
{
if (y == 0 || y == ySize - 1)
{
argbSrc.Slice(srcOffset, xSize).CopyTo(argbDst.Slice(dstOffset, xSize));
}
else
{
argbSrc.Slice(srcOffset + stride, xSize).CopyTo(nextRow);
argbDst[dstOffset] = argbSrc[srcOffset];
argbDst[dstOffset + xSize - 1] = argbSrc[srcOffset + xSize - 1];
for (int x = 1; x < xSize - 1; x++)
{
if (IsSmooth(prevRow, currRow, nextRow, x, limit))
{
argbDst[dstOffset + x] = currRow[x];
}
else
{
argbDst[dstOffset + x] = ClosestDiscretizedArgb(currRow[x], limitBits);
}
}
}
Span<uint> temp = prevRow;
prevRow = currRow;
currRow = nextRow;
nextRow = temp;
srcOffset += stride;
dstOffset += xSize;
}
}
// Applies FindClosestDiscretized to all channels of pixel.
private static uint ClosestDiscretizedArgb(uint a, int bits) =>
(FindClosestDiscretized(a >> 24, bits) << 24) |
(FindClosestDiscretized((a >> 16) & 0xff, bits) << 16) |
(FindClosestDiscretized((a >> 8) & 0xff, bits) << 8) |
FindClosestDiscretized(a & 0xff, bits);
private static uint FindClosestDiscretized(uint a, int bits)
{
uint mask = (1u << bits) - 1;
uint biased = a + (mask >> 1) + ((a >> bits) & 1);
if (biased > 0xff)
{
return 0xff;
}
return biased & ~mask;
}
private static bool IsSmooth(Span<uint> prevRow, Span<uint> currRow, Span<uint> nextRow, int ix, int limit) =>
IsNear(currRow[ix], currRow[ix - 1], limit) && // Check that all pixels in 4-connected neighborhood are smooth.
IsNear(currRow[ix], currRow[ix + 1], limit) &&
IsNear(currRow[ix], prevRow[ix], limit) &&
IsNear(currRow[ix], nextRow[ix], limit);
// Checks if distance between corresponding channel values of pixels a and b is within the given limit.
private static bool IsNear(uint a, uint b, int limit)
{
for (int k = 0; k < 4; ++k)
{
int delta = (int)((a >> (k * 8)) & 0xff) - (int)((b >> (k * 8)) & 0xff);
if (delta >= limit || delta <= -limit)
{
return false;
}
}
return true;
}
}
}

54
src/ImageSharp/Formats/Webp/Lossless/PixOrCopy.cs

@ -0,0 +1,54 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Diagnostics;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
[DebuggerDisplay("Mode: {Mode}, Len: {Len}, BgraOrDistance: {BgraOrDistance}")]
internal class PixOrCopy
{
public PixOrCopyMode Mode { get; set; }
public ushort Len { get; set; }
public uint BgraOrDistance { get; set; }
public static PixOrCopy CreateCacheIdx(int idx) =>
new PixOrCopy()
{
Mode = PixOrCopyMode.CacheIdx,
BgraOrDistance = (uint)idx,
Len = 1
};
public static PixOrCopy CreateLiteral(uint bgra) =>
new PixOrCopy()
{
Mode = PixOrCopyMode.Literal,
BgraOrDistance = bgra,
Len = 1
};
public static PixOrCopy CreateCopy(uint distance, ushort len) => new PixOrCopy()
{
Mode = PixOrCopyMode.Copy,
BgraOrDistance = distance,
Len = len
};
public uint Literal(int component) => (this.BgraOrDistance >> (component * 8)) & 0xff;
public uint CacheIdx() => this.BgraOrDistance;
public ushort Length() => this.Len;
public uint Distance() => this.BgraOrDistance;
public bool IsLiteral() => this.Mode == PixOrCopyMode.Literal;
public bool IsCacheIdx() => this.Mode == PixOrCopyMode.CacheIdx;
public bool IsCopy() => this.Mode == PixOrCopyMode.Copy;
}
}

16
src/ImageSharp/Formats/Webp/Lossless/PixOrCopyMode.cs

@ -0,0 +1,16 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
internal enum PixOrCopyMode
{
Literal,
CacheIdx,
Copy,
None
}
}

1181
src/ImageSharp/Formats/Webp/Lossless/PredictorEncoder.cs

File diff suppressed because it is too large

24
src/ImageSharp/Formats/Webp/Lossless/Vp8LBackwardRefs.cs

@ -0,0 +1,24 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Collections.Generic;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
internal class Vp8LBackwardRefs
{
public Vp8LBackwardRefs() => this.Refs = new List<PixOrCopy>();
/// <summary>
/// Gets or sets the common block-size.
/// </summary>
public int BlockSize { get; set; }
/// <summary>
/// Gets the backward references.
/// </summary>
public List<PixOrCopy> Refs { get; }
public void Add(PixOrCopy pixOrCopy) => this.Refs.Add(pixOrCopy);
}
}

221
src/ImageSharp/Formats/Webp/Lossless/Vp8LBitEntropy.cs

@ -0,0 +1,221 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// Holds bit entropy results and entropy-related functions.
/// </summary>
internal class Vp8LBitEntropy
{
/// <summary>
/// Not a trivial literal symbol.
/// </summary>
private const uint NonTrivialSym = 0xffffffff;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8LBitEntropy"/> class.
/// </summary>
public Vp8LBitEntropy()
{
this.Entropy = 0.0d;
this.Sum = 0;
this.NoneZeros = 0;
this.MaxVal = 0;
this.NoneZeroCode = NonTrivialSym;
}
/// <summary>
/// Gets or sets the entropy.
/// </summary>
public double Entropy { get; set; }
/// <summary>
/// Gets or sets the sum of the population.
/// </summary>
public uint Sum { get; set; }
/// <summary>
/// Gets or sets the number of non-zero elements in the population.
/// </summary>
public int NoneZeros { get; set; }
/// <summary>
/// Gets or sets the maximum value in the population.
/// </summary>
public uint MaxVal { get; set; }
/// <summary>
/// Gets or sets the index of the last non-zero in the population.
/// </summary>
public uint NoneZeroCode { get; set; }
public void Init()
{
this.Entropy = 0.0d;
this.Sum = 0;
this.NoneZeros = 0;
this.MaxVal = 0;
this.NoneZeroCode = NonTrivialSym;
}
public double BitsEntropyRefine()
{
double mix;
if (this.NoneZeros < 5)
{
if (this.NoneZeros <= 1)
{
return 0;
}
// Two symbols, they will be 0 and 1 in a Huffman code.
// Let's mix in a bit of entropy to favor good clustering when
// distributions of these are combined.
if (this.NoneZeros == 2)
{
return (0.99 * this.Sum) + (0.01 * this.Entropy);
}
// No matter what the entropy says, we cannot be better than minLimit
// with Huffman coding. I am mixing a bit of entropy into the
// minLimit since it produces much better (~0.5 %) compression results
// perhaps because of better entropy clustering.
if (this.NoneZeros == 3)
{
mix = 0.95;
}
else
{
mix = 0.7; // nonzeros == 4.
}
}
else
{
mix = 0.627;
}
double minLimit = (2 * this.Sum) - this.MaxVal;
minLimit = (mix * minLimit) + ((1.0 - mix) * this.Entropy);
return this.Entropy < minLimit ? minLimit : this.Entropy;
}
public void BitsEntropyUnrefined(Span<uint> array, int n)
{
this.Init();
for (int i = 0; i < n; i++)
{
if (array[i] != 0)
{
this.Sum += array[i];
this.NoneZeroCode = (uint)i;
this.NoneZeros++;
this.Entropy -= LosslessUtils.FastSLog2(array[i]);
if (this.MaxVal < array[i])
{
this.MaxVal = array[i];
}
}
}
this.Entropy += LosslessUtils.FastSLog2(this.Sum);
}
/// <summary>
/// Get the entropy for the distribution 'X'.
/// </summary>
public void BitsEntropyUnrefined(uint[] x, int length, Vp8LStreaks stats)
{
int i;
int iPrev = 0;
uint xPrev = x[0];
this.Init();
for (i = 1; i < length; i++)
{
uint xi = x[i];
if (xi != xPrev)
{
this.GetEntropyUnrefined(xi, i, ref xPrev, ref iPrev, stats);
}
}
this.GetEntropyUnrefined(0, i, ref xPrev, ref iPrev, stats);
this.Entropy += LosslessUtils.FastSLog2(this.Sum);
}
public void GetCombinedEntropyUnrefined(uint[] x, uint[] y, int length, Vp8LStreaks stats)
{
int i;
int iPrev = 0;
uint xyPrev = x[0] + y[0];
this.Init();
for (i = 1; i < length; i++)
{
uint xy = x[i] + y[i];
if (xy != xyPrev)
{
this.GetEntropyUnrefined(xy, i, ref xyPrev, ref iPrev, stats);
}
}
this.GetEntropyUnrefined(0, i, ref xyPrev, ref iPrev, stats);
this.Entropy += LosslessUtils.FastSLog2(this.Sum);
}
public void GetEntropyUnrefined(uint[] x, int length, Vp8LStreaks stats)
{
int i;
int iPrev = 0;
uint xPrev = x[0];
this.Init();
for (i = 1; i < length; i++)
{
uint xi = x[i];
if (xi != xPrev)
{
this.GetEntropyUnrefined(xi, i, ref xPrev, ref iPrev, stats);
}
}
this.GetEntropyUnrefined(0, i, ref xPrev, ref iPrev, stats);
this.Entropy += LosslessUtils.FastSLog2(this.Sum);
}
private void GetEntropyUnrefined(uint val, int i, ref uint valPrev, ref int iPrev, Vp8LStreaks stats)
{
int streak = i - iPrev;
// Gather info for the bit entropy.
if (valPrev != 0)
{
this.Sum += (uint)(valPrev * streak);
this.NoneZeros += streak;
this.NoneZeroCode = (uint)iPrev;
this.Entropy -= LosslessUtils.FastSLog2(valPrev) * streak;
if (this.MaxVal < valPrev)
{
this.MaxVal = valPrev;
}
}
// Gather info for the Huffman cost.
stats.Counts[valPrev != 0 ? 1 : 0] += streak > 3 ? 1 : 0;
stats.Streaks[valPrev != 0 ? 1 : 0][streak > 3 ? 1 : 0] += streak;
valPrev = val;
iPrev = i;
}
}
}

70
src/ImageSharp/Formats/Webp/Lossless/Vp8LDecoder.cs

@ -0,0 +1,70 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Buffers;
using System.Collections.Generic;
using SixLabors.ImageSharp.Memory;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// Holds information for decoding a lossless webp image.
/// </summary>
internal class Vp8LDecoder : IDisposable
{
/// <summary>
/// Initializes a new instance of the <see cref="Vp8LDecoder"/> class.
/// </summary>
/// <param name="width">The width of the image.</param>
/// <param name="height">The height of the image.</param>
/// <param name="memoryAllocator">Used for allocating memory for the pixel data output.</param>
public Vp8LDecoder(int width, int height, MemoryAllocator memoryAllocator)
{
this.Width = width;
this.Height = height;
this.Metadata = new Vp8LMetadata();
this.Pixels = memoryAllocator.Allocate<uint>(width * height, AllocationOptions.Clean);
}
/// <summary>
/// Gets or sets the width of the image to decode.
/// </summary>
public int Width { get; set; }
/// <summary>
/// Gets or sets the height of the image to decode.
/// </summary>
public int Height { get; set; }
/// <summary>
/// Gets or sets the necessary VP8L metadata (like huffman tables) to decode the image.
/// </summary>
public Vp8LMetadata Metadata { get; set; }
/// <summary>
/// Gets or sets the transformations which needs to be reversed.
/// </summary>
public List<Vp8LTransform> Transforms { get; set; }
/// <summary>
/// Gets the pixel data.
/// </summary>
public IMemoryOwner<uint> Pixels { get; }
/// <inheritdoc/>
public void Dispose()
{
this.Pixels.Dispose();
this.Metadata?.HuffmanImage?.Dispose();
if (this.Transforms != null)
{
foreach (Vp8LTransform transform in this.Transforms)
{
transform.Data?.Dispose();
}
}
}
}
}

1786
src/ImageSharp/Formats/Webp/Lossless/Vp8LEncoder.cs

File diff suppressed because it is too large

284
src/ImageSharp/Formats/Webp/Lossless/Vp8LHashChain.cs

@ -0,0 +1,284 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Buffers;
using System.Runtime.CompilerServices;
using SixLabors.ImageSharp.Memory;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
internal class Vp8LHashChain
{
private const uint HashMultiplierHi = 0xc6a4a793u;
private const uint HashMultiplierLo = 0x5bd1e996u;
private const int HashBits = 18;
private const int HashSize = 1 << HashBits;
/// <summary>
/// The number of bits for the window size.
/// </summary>
private const int WindowSizeBits = 20;
/// <summary>
/// 1M window (4M bytes) minus 120 special codes for short distances.
/// </summary>
private const int WindowSize = (1 << WindowSizeBits) - 120;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8LHashChain"/> class.
/// </summary>
/// <param name="size">The size off the chain.</param>
public Vp8LHashChain(int size)
{
this.OffsetLength = new uint[size];
this.OffsetLength.AsSpan().Fill(0xcdcdcdcd);
this.Size = size;
}
/// <summary>
/// Gets the offset length.
/// The 20 most significant bits contain the offset at which the best match is found.
/// These 20 bits are the limit defined by GetWindowSizeForHashChain (through WindowSize = 1 &lt;&lt; 20).
/// The lower 12 bits contain the length of the match.
/// </summary>
public uint[] OffsetLength { get; }
/// <summary>
/// Gets the size of the hash chain.
/// This is the maximum size of the hash_chain that can be constructed.
/// Typically this is the pixel count (width x height) for a given image.
/// </summary>
public int Size { get; }
public void Fill(MemoryAllocator memoryAllocator, ReadOnlySpan<uint> bgra, int quality, int xSize, int ySize, bool lowEffort)
{
int size = xSize * ySize;
int iterMax = GetMaxItersForQuality(quality);
int windowSize = GetWindowSizeForHashChain(quality, xSize);
int pos;
if (size <= 2)
{
this.OffsetLength[0] = 0;
return;
}
using IMemoryOwner<int> hashToFirstIndexBuffer = memoryAllocator.Allocate<int>(HashSize);
Span<int> hashToFirstIndex = hashToFirstIndexBuffer.GetSpan();
// Initialize hashToFirstIndex array to -1.
hashToFirstIndex.Fill(-1);
int[] chain = new int[size];
// Fill the chain linking pixels with the same hash.
bool bgraComp = bgra.Length > 1 && bgra[0] == bgra[1];
for (pos = 0; pos < size - 2;)
{
uint hashCode;
bool bgraCompNext = bgra[pos + 1] == bgra[pos + 2];
if (bgraComp && bgraCompNext)
{
// Consecutive pixels with the same color will share the same hash.
// We therefore use a different hash: the color and its repetition length.
uint[] tmp = new uint[2];
uint len = 1;
tmp[0] = bgra[pos];
// Figure out how far the pixels are the same. The last pixel has a different 64 bit hash,
// as its next pixel does not have the same color, so we just need to get to
// the last pixel equal to its follower.
while (pos + (int)len + 2 < size && bgra[(int)(pos + len + 2)] == bgra[pos])
{
++len;
}
if (len > BackwardReferenceEncoder.MaxLength)
{
// Skip the pixels that match for distance=1 and length>MaxLength
// because they are linked to their predecessor and we automatically
// check that in the main for loop below. Skipping means setting no
// predecessor in the chain, hence -1.
pos += (int)(len - BackwardReferenceEncoder.MaxLength);
len = BackwardReferenceEncoder.MaxLength;
}
// Process the rest of the hash chain.
while (len > 0)
{
tmp[1] = len--;
hashCode = GetPixPairHash64(tmp);
chain[pos] = hashToFirstIndex[(int)hashCode];
hashToFirstIndex[(int)hashCode] = pos++;
}
bgraComp = false;
}
else
{
// Just move one pixel forward.
hashCode = GetPixPairHash64(bgra.Slice(pos));
chain[pos] = hashToFirstIndex[(int)hashCode];
hashToFirstIndex[(int)hashCode] = pos++;
bgraComp = bgraCompNext;
}
}
// Process the penultimate pixel.
chain[pos] = hashToFirstIndex[(int)GetPixPairHash64(bgra.Slice(pos))];
// Find the best match interval at each pixel, defined by an offset to the
// pixel and a length. The right-most pixel cannot match anything to the right
// (hence a best length of 0) and the left-most pixel nothing to the left (hence an offset of 0).
this.OffsetLength[0] = this.OffsetLength[size - 1] = 0;
for (int basePosition = size - 2; basePosition > 0;)
{
int maxLen = LosslessUtils.MaxFindCopyLength(size - 1 - basePosition);
int bgraStart = basePosition;
int iter = iterMax;
int bestLength = 0;
uint bestDistance = 0;
int minPos = basePosition > windowSize ? basePosition - windowSize : 0;
int lengthMax = maxLen < 256 ? maxLen : 256;
pos = chain[basePosition];
int currLength;
if (!lowEffort)
{
// Heuristic: use the comparison with the above line as an initialization.
if (basePosition >= (uint)xSize)
{
currLength = LosslessUtils.FindMatchLength(bgra.Slice(bgraStart - xSize), bgra.Slice(bgraStart), bestLength, maxLen);
if (currLength > bestLength)
{
bestLength = currLength;
bestDistance = (uint)xSize;
}
iter--;
}
// Heuristic: compare to the previous pixel.
currLength = LosslessUtils.FindMatchLength(bgra.Slice(bgraStart - 1), bgra.Slice(bgraStart), bestLength, maxLen);
if (currLength > bestLength)
{
bestLength = currLength;
bestDistance = 1;
}
iter--;
// Skip the for loop if we already have the maximum.
if (bestLength == BackwardReferenceEncoder.MaxLength)
{
pos = minPos - 1;
}
}
uint bestBgra = bgra.Slice(bgraStart)[bestLength];
for (; pos >= minPos && (--iter > 0); pos = chain[pos])
{
if (bgra[pos + bestLength] != bestBgra)
{
continue;
}
currLength = LosslessUtils.VectorMismatch(bgra.Slice(pos), bgra.Slice(bgraStart), maxLen);
if (bestLength < currLength)
{
bestLength = currLength;
bestDistance = (uint)(basePosition - pos);
bestBgra = bgra.Slice(bgraStart)[bestLength];
// Stop if we have reached a good enough length.
if (bestLength >= lengthMax)
{
break;
}
}
}
// We have the best match but in case the two intervals continue matching
// to the left, we have the best matches for the left-extended pixels.
uint maxBasePosition = (uint)basePosition;
while (true)
{
this.OffsetLength[basePosition] = (bestDistance << BackwardReferenceEncoder.MaxLengthBits) | (uint)bestLength;
--basePosition;
// Stop if we don't have a match or if we are out of bounds.
if (bestDistance == 0 || basePosition == 0)
{
break;
}
// Stop if we cannot extend the matching intervals to the left.
if (basePosition < bestDistance || bgra[(int)(basePosition - bestDistance)] != bgra[basePosition])
{
break;
}
// Stop if we are matching at its limit because there could be a closer
// matching interval with the same maximum length. Then again, if the
// matching interval is as close as possible (best_distance == 1), we will
// never find anything better so let's continue.
if (bestLength == BackwardReferenceEncoder.MaxLength && bestDistance != 1 && basePosition + BackwardReferenceEncoder.MaxLength < maxBasePosition)
{
break;
}
if (bestLength < BackwardReferenceEncoder.MaxLength)
{
bestLength++;
maxBasePosition = (uint)basePosition;
}
}
}
}
[MethodImpl(InliningOptions.ShortMethod)]
public int FindLength(int basePosition) => (int)(this.OffsetLength[basePosition] & ((1U << BackwardReferenceEncoder.MaxLengthBits) - 1));
[MethodImpl(InliningOptions.ShortMethod)]
public int FindOffset(int basePosition) => (int)(this.OffsetLength[basePosition] >> BackwardReferenceEncoder.MaxLengthBits);
/// <summary>
/// Calculates the hash for a pixel pair.
/// </summary>
/// <param name="bgra">An Span with two pixels.</param>
/// <returns>The hash.</returns>
[MethodImpl(InliningOptions.ShortMethod)]
private static uint GetPixPairHash64(ReadOnlySpan<uint> bgra)
{
uint key = bgra[1] * HashMultiplierHi;
key += bgra[0] * HashMultiplierLo;
key >>= 32 - HashBits;
return key;
}
/// <summary>
/// Returns the maximum number of hash chain lookups to do for a
/// given compression quality. Return value in range [8, 86].
/// </summary>
/// <param name="quality">The quality.</param>
/// <returns>Number of hash chain lookups.</returns>
[MethodImpl(InliningOptions.ShortMethod)]
private static int GetMaxItersForQuality(int quality) => 8 + (quality * quality / 128);
[MethodImpl(InliningOptions.ShortMethod)]
private static int GetWindowSizeForHashChain(int quality, int xSize)
{
int maxWindowSize = quality > 75 ? WindowSize
: quality > 50 ? xSize << 8
: quality > 25 ? xSize << 6
: xSize << 4;
return maxWindowSize > WindowSize ? WindowSize : maxWindowSize;
}
}
}

515
src/ImageSharp/Formats/Webp/Lossless/Vp8LHistogram.cs

@ -0,0 +1,515 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Collections.Generic;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
internal class Vp8LHistogram : IDeepCloneable
{
private const uint NonTrivialSym = 0xffffffff;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8LHistogram"/> class.
/// </summary>
/// <param name="other">The histogram to create an instance from.</param>
private Vp8LHistogram(Vp8LHistogram other)
: this(other.PaletteCodeBits)
{
other.Red.AsSpan().CopyTo(this.Red);
other.Blue.AsSpan().CopyTo(this.Blue);
other.Alpha.AsSpan().CopyTo(this.Alpha);
other.Literal.AsSpan().CopyTo(this.Literal);
other.Distance.AsSpan().CopyTo(this.Distance);
other.IsUsed.AsSpan().CopyTo(this.IsUsed);
this.LiteralCost = other.LiteralCost;
this.RedCost = other.RedCost;
this.BlueCost = other.BlueCost;
this.BitCost = other.BitCost;
this.TrivialSymbol = other.TrivialSymbol;
this.PaletteCodeBits = other.PaletteCodeBits;
}
/// <summary>
/// Initializes a new instance of the <see cref="Vp8LHistogram"/> class.
/// </summary>
/// <param name="refs">The backward references to initialize the histogram with.</param>
/// <param name="paletteCodeBits">The palette code bits.</param>
public Vp8LHistogram(Vp8LBackwardRefs refs, int paletteCodeBits)
: this(paletteCodeBits) => this.StoreRefs(refs);
/// <summary>
/// Initializes a new instance of the <see cref="Vp8LHistogram"/> class.
/// </summary>
/// <param name="paletteCodeBits">The palette code bits.</param>
public Vp8LHistogram(int paletteCodeBits)
{
this.PaletteCodeBits = paletteCodeBits;
this.Red = new uint[WebpConstants.NumLiteralCodes + 1];
this.Blue = new uint[WebpConstants.NumLiteralCodes + 1];
this.Alpha = new uint[WebpConstants.NumLiteralCodes + 1];
this.Distance = new uint[WebpConstants.NumDistanceCodes];
int literalSize = WebpConstants.NumLiteralCodes + WebpConstants.NumLengthCodes + (1 << WebpConstants.MaxColorCacheBits);
this.Literal = new uint[literalSize + 1];
// 5 for literal, red, blue, alpha, distance.
this.IsUsed = new bool[5];
}
/// <summary>
/// Gets or sets the palette code bits.
/// </summary>
public int PaletteCodeBits { get; set; }
/// <summary>
/// Gets or sets the cached value of bit cost.
/// </summary>
public double BitCost { get; set; }
/// <summary>
/// Gets or sets the cached value of literal entropy costs.
/// </summary>
public double LiteralCost { get; set; }
/// <summary>
/// Gets or sets the cached value of red entropy costs.
/// </summary>
public double RedCost { get; set; }
/// <summary>
/// Gets or sets the cached value of blue entropy costs.
/// </summary>
public double BlueCost { get; set; }
public uint[] Red { get; }
public uint[] Blue { get; }
public uint[] Alpha { get; }
public uint[] Literal { get; }
public uint[] Distance { get; }
public uint TrivialSymbol { get; set; }
public bool[] IsUsed { get; }
/// <inheritdoc/>
public IDeepCloneable DeepClone() => new Vp8LHistogram(this);
/// <summary>
/// Collect all the references into a histogram (without reset).
/// </summary>
/// <param name="refs">The backward references.</param>
public void StoreRefs(Vp8LBackwardRefs refs)
{
using List<PixOrCopy>.Enumerator c = refs.Refs.GetEnumerator();
while (c.MoveNext())
{
this.AddSinglePixOrCopy(c.Current, false);
}
}
/// <summary>
/// Accumulate a token 'v' into a histogram.
/// </summary>
/// <param name="v">The token to add.</param>
/// <param name="useDistanceModifier">Indicates whether to use the distance modifier.</param>
/// <param name="xSize">xSize is only used when useDistanceModifier is true.</param>
public void AddSinglePixOrCopy(PixOrCopy v, bool useDistanceModifier, int xSize = 0)
{
if (v.IsLiteral())
{
this.Alpha[v.Literal(3)]++;
this.Red[v.Literal(2)]++;
this.Literal[v.Literal(1)]++;
this.Blue[v.Literal(0)]++;
}
else if (v.IsCacheIdx())
{
int literalIx = (int)(WebpConstants.NumLiteralCodes + WebpConstants.NumLengthCodes + v.CacheIdx());
this.Literal[literalIx]++;
}
else
{
int extraBits = 0;
int code = LosslessUtils.PrefixEncodeBits(v.Length(), ref extraBits);
this.Literal[WebpConstants.NumLiteralCodes + code]++;
if (!useDistanceModifier)
{
code = LosslessUtils.PrefixEncodeBits((int)v.Distance(), ref extraBits);
}
else
{
code = LosslessUtils.PrefixEncodeBits(BackwardReferenceEncoder.DistanceToPlaneCode(xSize, (int)v.Distance()), ref extraBits);
}
this.Distance[code]++;
}
}
public int NumCodes() => WebpConstants.NumLiteralCodes + WebpConstants.NumLengthCodes + (this.PaletteCodeBits > 0 ? 1 << this.PaletteCodeBits : 0);
/// <summary>
/// Estimate how many bits the combined entropy of literals and distance approximately maps to.
/// </summary>
/// <returns>Estimated bits.</returns>
public double EstimateBits()
{
uint notUsed = 0;
return
PopulationCost(this.Literal, this.NumCodes(), ref notUsed, ref this.IsUsed[0])
+ PopulationCost(this.Red, WebpConstants.NumLiteralCodes, ref notUsed, ref this.IsUsed[1])
+ PopulationCost(this.Blue, WebpConstants.NumLiteralCodes, ref notUsed, ref this.IsUsed[2])
+ PopulationCost(this.Alpha, WebpConstants.NumLiteralCodes, ref notUsed, ref this.IsUsed[3])
+ PopulationCost(this.Distance, WebpConstants.NumDistanceCodes, ref notUsed, ref this.IsUsed[4])
+ ExtraCost(this.Literal.AsSpan(WebpConstants.NumLiteralCodes), WebpConstants.NumLengthCodes)
+ ExtraCost(this.Distance, WebpConstants.NumDistanceCodes);
}
public void UpdateHistogramCost()
{
uint alphaSym = 0, redSym = 0, blueSym = 0;
uint notUsed = 0;
double alphaCost = PopulationCost(this.Alpha, WebpConstants.NumLiteralCodes, ref alphaSym, ref this.IsUsed[3]);
double distanceCost = PopulationCost(this.Distance, WebpConstants.NumDistanceCodes, ref notUsed, ref this.IsUsed[4]) + ExtraCost(this.Distance, WebpConstants.NumDistanceCodes);
int numCodes = this.NumCodes();
this.LiteralCost = PopulationCost(this.Literal, numCodes, ref notUsed, ref this.IsUsed[0]) + ExtraCost(this.Literal.AsSpan(WebpConstants.NumLiteralCodes), WebpConstants.NumLengthCodes);
this.RedCost = PopulationCost(this.Red, WebpConstants.NumLiteralCodes, ref redSym, ref this.IsUsed[1]);
this.BlueCost = PopulationCost(this.Blue, WebpConstants.NumLiteralCodes, ref blueSym, ref this.IsUsed[2]);
this.BitCost = this.LiteralCost + this.RedCost + this.BlueCost + alphaCost + distanceCost;
if ((alphaSym | redSym | blueSym) == NonTrivialSym)
{
this.TrivialSymbol = NonTrivialSym;
}
else
{
this.TrivialSymbol = (alphaSym << 24) | (redSym << 16) | (blueSym << 0);
}
}
/// <summary>
/// Performs output = a + b, computing the cost C(a+b) - C(a) - C(b) while comparing
/// to the threshold value 'costThreshold'. The score returned is
/// Score = C(a+b) - C(a) - C(b), where C(a) + C(b) is known and fixed.
/// Since the previous score passed is 'costThreshold', we only need to compare
/// the partial cost against 'costThreshold + C(a) + C(b)' to possibly bail-out early.
/// </summary>
public double AddEval(Vp8LHistogram b, double costThreshold, Vp8LHistogram output)
{
double sumCost = this.BitCost + b.BitCost;
costThreshold += sumCost;
if (this.GetCombinedHistogramEntropy(b, costThreshold, costInitial: 0, out double cost))
{
this.Add(b, output);
output.BitCost = cost;
output.PaletteCodeBits = this.PaletteCodeBits;
}
return cost - sumCost;
}
public double AddThresh(Vp8LHistogram b, double costThreshold)
{
double costInitial = -this.BitCost;
this.GetCombinedHistogramEntropy(b, costThreshold, costInitial, out double cost);
return cost;
}
public void Add(Vp8LHistogram b, Vp8LHistogram output)
{
int literalSize = this.NumCodes();
this.AddLiteral(b, output, literalSize);
this.AddRed(b, output, WebpConstants.NumLiteralCodes);
this.AddBlue(b, output, WebpConstants.NumLiteralCodes);
this.AddAlpha(b, output, WebpConstants.NumLiteralCodes);
this.AddDistance(b, output, WebpConstants.NumDistanceCodes);
for (int i = 0; i < 5; i++)
{
output.IsUsed[i] = this.IsUsed[i] | b.IsUsed[i];
}
output.TrivialSymbol = this.TrivialSymbol == b.TrivialSymbol
? this.TrivialSymbol
: NonTrivialSym;
}
public bool GetCombinedHistogramEntropy(Vp8LHistogram b, double costThreshold, double costInitial, out double cost)
{
bool trivialAtEnd = false;
cost = costInitial;
cost += GetCombinedEntropy(this.Literal, b.Literal, this.NumCodes(), this.IsUsed[0], b.IsUsed[0], false);
cost += ExtraCostCombined(this.Literal.AsSpan(WebpConstants.NumLiteralCodes), b.Literal.AsSpan(WebpConstants.NumLiteralCodes), WebpConstants.NumLengthCodes);
if (cost > costThreshold)
{
return false;
}
if (this.TrivialSymbol != NonTrivialSym && this.TrivialSymbol == b.TrivialSymbol)
{
// A, R and B are all 0 or 0xff.
uint colorA = (this.TrivialSymbol >> 24) & 0xff;
uint colorR = (this.TrivialSymbol >> 16) & 0xff;
uint colorB = (this.TrivialSymbol >> 0) & 0xff;
if ((colorA == 0 || colorA == 0xff) &&
(colorR == 0 || colorR == 0xff) &&
(colorB == 0 || colorB == 0xff))
{
trivialAtEnd = true;
}
}
cost += GetCombinedEntropy(this.Red, b.Red, WebpConstants.NumLiteralCodes, this.IsUsed[1], b.IsUsed[1], trivialAtEnd);
if (cost > costThreshold)
{
return false;
}
cost += GetCombinedEntropy(this.Blue, b.Blue, WebpConstants.NumLiteralCodes, this.IsUsed[2], b.IsUsed[2], trivialAtEnd);
if (cost > costThreshold)
{
return false;
}
cost += GetCombinedEntropy(this.Alpha, b.Alpha, WebpConstants.NumLiteralCodes, this.IsUsed[3], b.IsUsed[3], trivialAtEnd);
if (cost > costThreshold)
{
return false;
}
cost += GetCombinedEntropy(this.Distance, b.Distance, WebpConstants.NumDistanceCodes, this.IsUsed[4], b.IsUsed[4], false);
if (cost > costThreshold)
{
return false;
}
cost += ExtraCostCombined(this.Distance, b.Distance, WebpConstants.NumDistanceCodes);
if (cost > costThreshold)
{
return false;
}
return true;
}
private void AddLiteral(Vp8LHistogram b, Vp8LHistogram output, int literalSize)
{
if (this.IsUsed[0])
{
if (b.IsUsed[0])
{
AddVector(this.Literal, b.Literal, output.Literal, literalSize);
}
else
{
this.Literal.AsSpan(0, literalSize).CopyTo(output.Literal);
}
}
else if (b.IsUsed[0])
{
b.Literal.AsSpan(0, literalSize).CopyTo(output.Literal);
}
else
{
output.Literal.AsSpan(0, literalSize).Fill(0);
}
}
private void AddRed(Vp8LHistogram b, Vp8LHistogram output, int size)
{
if (this.IsUsed[1])
{
if (b.IsUsed[1])
{
AddVector(this.Red, b.Red, output.Red, size);
}
else
{
this.Red.AsSpan(0, size).CopyTo(output.Red);
}
}
else if (b.IsUsed[1])
{
b.Red.AsSpan(0, size).CopyTo(output.Red);
}
else
{
output.Red.AsSpan(0, size).Fill(0);
}
}
private void AddBlue(Vp8LHistogram b, Vp8LHistogram output, int size)
{
if (this.IsUsed[2])
{
if (b.IsUsed[2])
{
AddVector(this.Blue, b.Blue, output.Blue, size);
}
else
{
this.Blue.AsSpan(0, size).CopyTo(output.Blue);
}
}
else if (b.IsUsed[2])
{
b.Blue.AsSpan(0, size).CopyTo(output.Blue);
}
else
{
output.Blue.AsSpan(0, size).Fill(0);
}
}
private void AddAlpha(Vp8LHistogram b, Vp8LHistogram output, int size)
{
if (this.IsUsed[3])
{
if (b.IsUsed[3])
{
AddVector(this.Alpha, b.Alpha, output.Alpha, size);
}
else
{
this.Alpha.AsSpan(0, size).CopyTo(output.Alpha);
}
}
else if (b.IsUsed[3])
{
b.Alpha.AsSpan(0, size).CopyTo(output.Alpha);
}
else
{
output.Alpha.AsSpan(0, size).Fill(0);
}
}
private void AddDistance(Vp8LHistogram b, Vp8LHistogram output, int size)
{
if (this.IsUsed[4])
{
if (b.IsUsed[4])
{
AddVector(this.Distance, b.Distance, output.Distance, size);
}
else
{
this.Distance.AsSpan(0, size).CopyTo(output.Distance);
}
}
else if (b.IsUsed[4])
{
b.Distance.AsSpan(0, size).CopyTo(output.Distance);
}
else
{
output.Distance.AsSpan(0, size).Fill(0);
}
}
private static double GetCombinedEntropy(uint[] x, uint[] y, int length, bool isXUsed, bool isYUsed, bool trivialAtEnd)
{
var stats = new Vp8LStreaks();
if (trivialAtEnd)
{
// This configuration is due to palettization that transforms an indexed
// pixel into 0xff000000 | (pixel << 8) in BundleColorMap.
// BitsEntropyRefine is 0 for histograms with only one non-zero value.
// Only FinalHuffmanCost needs to be evaluated.
// Deal with the non-zero value at index 0 or length-1.
stats.Streaks[1][0] = 1;
// Deal with the following/previous zero streak.
stats.Counts[0] = 1;
stats.Streaks[0][1] = length - 1;
return stats.FinalHuffmanCost();
}
var bitEntropy = new Vp8LBitEntropy();
if (isXUsed)
{
if (isYUsed)
{
bitEntropy.GetCombinedEntropyUnrefined(x, y, length, stats);
}
else
{
bitEntropy.GetEntropyUnrefined(x, length, stats);
}
}
else
{
if (isYUsed)
{
bitEntropy.GetEntropyUnrefined(y, length, stats);
}
else
{
stats.Counts[0] = 1;
stats.Streaks[0][length > 3 ? 1 : 0] = length;
bitEntropy.Init();
}
}
return bitEntropy.BitsEntropyRefine() + stats.FinalHuffmanCost();
}
private static double ExtraCostCombined(Span<uint> x, Span<uint> y, int length)
{
double cost = 0.0d;
for (int i = 2; i < length - 2; i++)
{
int xy = (int)(x[i + 2] + y[i + 2]);
cost += (i >> 1) * xy;
}
return cost;
}
/// <summary>
/// Get the symbol entropy for the distribution 'population'.
/// </summary>
private static double PopulationCost(uint[] population, int length, ref uint trivialSym, ref bool isUsed)
{
var bitEntropy = new Vp8LBitEntropy();
var stats = new Vp8LStreaks();
bitEntropy.BitsEntropyUnrefined(population, length, stats);
trivialSym = (bitEntropy.NoneZeros == 1) ? bitEntropy.NoneZeroCode : NonTrivialSym;
// The histogram is used if there is at least one non-zero streak.
isUsed = stats.Streaks[1][0] != 0 || stats.Streaks[1][1] != 0;
return bitEntropy.BitsEntropyRefine() + stats.FinalHuffmanCost();
}
private static double ExtraCost(Span<uint> population, int length)
{
double cost = 0.0d;
for (int i = 2; i < length - 2; i++)
{
cost += (i >> 1) * population[i + 2];
}
return cost;
}
private static void AddVector(uint[] a, uint[] b, uint[] output, int size)
{
for (int i = 0; i < size; i++)
{
output[i] = a[i] + b[i];
}
}
}
}

14
src/ImageSharp/Formats/Webp/Lossless/Vp8LLz77Type.cs

@ -0,0 +1,14 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
internal enum Vp8LLz77Type
{
Lz77Standard = 1,
Lz77Rle = 2,
Lz77Box = 4
}
}

28
src/ImageSharp/Formats/Webp/Lossless/Vp8LMetadata.cs

@ -0,0 +1,28 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Buffers;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
internal class Vp8LMetadata
{
public int ColorCacheSize { get; set; }
public ColorCache ColorCache { get; set; }
public int HuffmanMask { get; set; }
public int HuffmanSubSampleBits { get; set; }
public int HuffmanXSize { get; set; }
public IMemoryOwner<uint> HuffmanImage { get; set; }
public int NumHTreeGroups { get; set; }
public HTreeGroup[] HTreeGroups { get; set; }
public HuffmanCode[] HuffmanTables { get; set; }
}
}

14
src/ImageSharp/Formats/Webp/Lossless/Vp8LMultipliers.cs

@ -0,0 +1,14 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
internal struct Vp8LMultipliers
{
public byte GreenToRed;
public byte GreenToBlue;
public byte RedToBlue;
}
}

63
src/ImageSharp/Formats/Webp/Lossless/Vp8LStreaks.cs

@ -0,0 +1,63 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
internal class Vp8LStreaks
{
/// <summary>
/// Initializes a new instance of the <see cref="Vp8LStreaks"/> class.
/// </summary>
public Vp8LStreaks()
{
this.Counts = new int[2];
this.Streaks = new int[2][];
this.Streaks[0] = new int[2];
this.Streaks[1] = new int[2];
}
/// <summary>
/// Gets the streak count.
/// index: 0=zero streak, 1=non-zero streak.
/// </summary>
public int[] Counts { get; }
/// <summary>
/// Gets the streaks.
/// [zero/non-zero][streak &lt; 3 / streak >= 3].
/// </summary>
public int[][] Streaks { get; }
public double FinalHuffmanCost()
{
// The constants in this function are experimental and got rounded from
// their original values in 1/8 when switched to 1/1024.
double retval = InitialHuffmanCost();
// Second coefficient: Many zeros in the histogram are covered efficiently
// by a run-length encode. Originally 2/8.
retval += (this.Counts[0] * 1.5625) + (0.234375 * this.Streaks[0][1]);
// Second coefficient: Constant values are encoded less efficiently, but still
// RLE'ed. Originally 6/8.
retval += (this.Counts[1] * 2.578125) + (0.703125 * this.Streaks[1][1]);
// 0s are usually encoded more efficiently than non-0s.
// Originally 15/8.
retval += 1.796875 * this.Streaks[0][0];
// Originally 26/8.
retval += 3.28125 * this.Streaks[1][0];
return retval;
}
private static double InitialHuffmanCost()
{
// Small bias because Huffman code length is typically not stored in full length.
int huffmanCodeOfHuffmanCodeSize = WebpConstants.CodeLengthCodes * 3;
double smallBias = 9.1;
return huffmanCodeOfHuffmanCodeSize - smallBias;
}
}
}

47
src/ImageSharp/Formats/Webp/Lossless/Vp8LTransform.cs

@ -0,0 +1,47 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Buffers;
using System.Diagnostics;
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// Data associated with a VP8L transformation to reduce the entropy.
/// </summary>
[DebuggerDisplay("Transformtype: {" + nameof(TransformType) + "}")]
internal class Vp8LTransform
{
public Vp8LTransform(Vp8LTransformType transformType, int xSize, int ySize)
{
this.TransformType = transformType;
this.XSize = xSize;
this.YSize = ySize;
}
/// <summary>
/// Gets the transform type.
/// </summary>
public Vp8LTransformType TransformType { get; }
/// <summary>
/// Gets or sets the subsampling bits defining the transform window.
/// </summary>
public int Bits { get; set; }
/// <summary>
/// Gets or sets the transform window X index.
/// </summary>
public int XSize { get; set; }
/// <summary>
/// Gets the transform window Y index.
/// </summary>
public int YSize { get; }
/// <summary>
/// Gets or sets the transform data.
/// </summary>
public IMemoryOwner<uint> Data { get; set; }
}
}

37
src/ImageSharp/Formats/Webp/Lossless/Vp8LTransformType.cs

@ -0,0 +1,37 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossless
{
/// <summary>
/// Enum for the different transform types. Transformations are reversible manipulations of the image data
/// that can reduce the remaining symbolic entropy by modeling spatial and color correlations.
/// Transformations can make the final compression more dense.
/// </summary>
internal enum Vp8LTransformType : uint
{
/// <summary>
/// The predictor transform can be used to reduce entropy by exploiting the fact that neighboring pixels are often correlated.
/// </summary>
PredictorTransform = 0,
/// <summary>
/// The goal of the color transform is to de-correlate the R, G and B values of each pixel.
/// Color transform keeps the green (G) value as it is, transforms red (R) based on green and transforms blue (B) based on green and then based on red.
/// </summary>
CrossColorTransform = 1,
/// <summary>
/// The subtract green transform subtracts green values from red and blue values of each pixel.
/// When this transform is present, the decoder needs to add the green value to both red and blue.
/// There is no data associated with this transform.
/// </summary>
SubtractGreen = 2,
/// <summary>
/// If there are not many unique pixel values, it may be more efficient to create a color index array and replace the pixel values by the array's indices.
/// The color indexing transform achieves this.
/// </summary>
ColorIndexingTransform = 3,
}
}

1001
src/ImageSharp/Formats/Webp/Lossless/WebpLosslessDecoder.cs

File diff suppressed because it is too large

BIN
src/ImageSharp/Formats/Webp/Lossless/Webp_Lossless_Bitstream_Specification.pdf

Binary file not shown.

28
src/ImageSharp/Formats/Webp/Lossy/IntraPredictionMode.cs

@ -0,0 +1,28 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal enum IntraPredictionMode
{
/// <summary>
/// Predict DC using row above and column to the left.
/// </summary>
DcPrediction = 0,
/// <summary>
/// Propagate second differences a la "True Motion".
/// </summary>
TrueMotion = 1,
/// <summary>
/// Predict rows using row above.
/// </summary>
VPrediction = 2,
/// <summary>
/// Predict columns using column to the left.
/// </summary>
HPrediction = 3,
}
}

26
src/ImageSharp/Formats/Webp/Lossy/LoopFilter.cs

@ -0,0 +1,26 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Enum for the different loop filters used. VP8 supports two types of loop filters.
/// </summary>
internal enum LoopFilter
{
/// <summary>
/// No filter is used.
/// </summary>
None = 0,
/// <summary>
/// Simple loop filter.
/// </summary>
Simple = 1,
/// <summary>
/// Complex loop filter.
/// </summary>
Complex = 2,
}
}

1086
src/ImageSharp/Formats/Webp/Lossy/LossyUtils.cs

File diff suppressed because it is too large

76
src/ImageSharp/Formats/Webp/Lossy/PassStats.cs

@ -0,0 +1,76 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Class for organizing convergence in either size or PSNR.
/// </summary>
internal class PassStats
{
public PassStats(long targetSize, float targetPsnr, int qMin, int qMax, int quality)
{
bool doSizeSearch = targetSize != 0;
this.IsFirst = true;
this.Dq = 10.0f;
this.Qmin = qMin;
this.Qmax = qMax;
this.Q = Numerics.Clamp(quality, qMin, qMax);
this.LastQ = this.Q;
this.Target = doSizeSearch ? targetSize
: targetPsnr > 0.0f ? targetPsnr
: 40.0f; // default, just in case
this.Value = 0.0f;
this.LastValue = 0.0f;
this.DoSizeSearch = doSizeSearch;
}
public bool IsFirst { get; set; }
public float Dq { get; set; }
public float Q { get; set; }
public float LastQ { get; set; }
public float Qmin { get; }
public float Qmax { get; }
public double Value { get; set; } // PSNR or size
public double LastValue { get; set; }
public double Target { get; }
public bool DoSizeSearch { get; }
public float ComputeNextQ()
{
float dq;
if (this.IsFirst)
{
dq = this.Value > this.Target ? -this.Dq : this.Dq;
this.IsFirst = false;
}
else if (this.Value != this.LastValue)
{
double slope = (this.Target - this.Value) / (this.LastValue - this.Value);
dq = (float)(slope * (this.LastQ - this.Q));
}
else
{
dq = 0.0f; // we're done?!
}
// Limit variable to avoid large swings.
this.Dq = Numerics.Clamp(dq, -30.0f, 30.0f);
this.LastQ = this.Q;
this.LastValue = this.Value;
this.Q = Numerics.Clamp(this.Q + this.Dq, this.Qmin, this.Qmax);
return this.Q;
}
}
}

637
src/ImageSharp/Formats/Webp/Lossy/QuantEnc.cs

@ -0,0 +1,637 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Runtime.CompilerServices;
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Quantization methods.
/// </summary>
internal static class QuantEnc
{
private static readonly byte[] Zigzag = { 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15 };
private static readonly ushort[] WeightY = { 38, 32, 20, 9, 32, 28, 17, 7, 20, 17, 10, 4, 9, 7, 4, 2 };
private const int MaxLevel = 2047;
// Diffusion weights. We under-correct a bit (15/16th of the error is actually
// diffused) to avoid 'rainbow' chessboard pattern of blocks at q~=0.
private const int C1 = 7; // fraction of error sent to the 4x4 block below
private const int C2 = 8; // fraction of error sent to the 4x4 block on the right
private const int DSHIFT = 4;
private const int DSCALE = 1; // storage descaling, needed to make the error fit byte
public static void PickBestIntra16(Vp8EncIterator it, ref Vp8ModeScore rd, Vp8SegmentInfo[] segmentInfos, Vp8EncProba proba)
{
const int numBlocks = 16;
Vp8SegmentInfo dqm = segmentInfos[it.CurrentMacroBlockInfo.Segment];
int lambda = dqm.LambdaI16;
int tlambda = dqm.TLambda;
Span<byte> src = it.YuvIn.AsSpan(Vp8EncIterator.YOffEnc);
var rdTmp = new Vp8ModeScore();
Vp8ModeScore rdCur = rdTmp;
Vp8ModeScore rdBest = rd;
int mode;
bool isFlat = IsFlatSource16(src);
rd.ModeI16 = -1;
for (mode = 0; mode < WebpConstants.NumPredModes; ++mode)
{
// scratch buffer.
Span<byte> tmpDst = it.YuvOut2.AsSpan(Vp8EncIterator.YOffEnc);
rdCur.ModeI16 = mode;
// Reconstruct.
rdCur.Nz = (uint)ReconstructIntra16(it, dqm, rdCur, tmpDst, mode);
// Measure RD-score.
rdCur.D = LossyUtils.Vp8Sse16X16(src, tmpDst);
rdCur.SD = tlambda != 0 ? Mult8B(tlambda, LossyUtils.Vp8Disto16X16(src, tmpDst, WeightY)) : 0;
rdCur.H = WebpConstants.Vp8FixedCostsI16[mode];
rdCur.R = it.GetCostLuma16(rdCur, proba);
if (isFlat)
{
// Refine the first impression (which was in pixel space).
isFlat = IsFlat(rdCur.YAcLevels, numBlocks, WebpConstants.FlatnessLimitI16);
if (isFlat)
{
// Block is very flat. We put emphasis on the distortion being very low!
rdCur.D *= 2;
rdCur.SD *= 2;
}
}
// Since we always examine Intra16 first, we can overwrite *rd directly.
rdCur.SetRdScore(lambda);
if (mode == 0 || rdCur.Score < rdBest.Score)
{
Vp8ModeScore tmp = rdCur;
rdCur = rdBest;
rdBest = tmp;
it.SwapOut();
}
}
if (rdBest != rd)
{
rd = rdBest;
}
// Finalize score for mode decision.
rd.SetRdScore(dqm.LambdaMode);
it.SetIntra16Mode(rd.ModeI16);
// We have a blocky macroblock (only DCs are non-zero) with fairly high
// distortion, record max delta so we can later adjust the minimal filtering
// strength needed to smooth these blocks out.
if ((rd.Nz & 0x100ffff) == 0x1000000 && rd.D > dqm.MinDisto)
{
dqm.StoreMaxDelta(rd.YDcLevels);
}
}
public static bool PickBestIntra4(Vp8EncIterator it, ref Vp8ModeScore rd, Vp8SegmentInfo[] segmentInfos, Vp8EncProba proba, int maxI4HeaderBits)
{
Vp8SegmentInfo dqm = segmentInfos[it.CurrentMacroBlockInfo.Segment];
int lambda = dqm.LambdaI4;
int tlambda = dqm.TLambda;
Span<byte> src0 = it.YuvIn.AsSpan(Vp8EncIterator.YOffEnc);
Span<byte> bestBlocks = it.YuvOut2.AsSpan(Vp8EncIterator.YOffEnc);
int totalHeaderBits = 0;
var rdBest = new Vp8ModeScore();
if (maxI4HeaderBits == 0)
{
return false;
}
rdBest.InitScore();
rdBest.H = 211; // '211' is the value of VP8BitCost(0, 145)
rdBest.SetRdScore(dqm.LambdaMode);
it.StartI4();
do
{
int numBlocks = 1;
var rdi4 = new Vp8ModeScore();
int mode;
int bestMode = -1;
Span<byte> src = src0.Slice(WebpLookupTables.Vp8Scan[it.I4]);
short[] modeCosts = it.GetCostModeI4(rd.ModesI4);
Span<byte> bestBlock = bestBlocks.Slice(WebpLookupTables.Vp8Scan[it.I4]);
Span<byte> tmpDst = it.Scratch.AsSpan();
tmpDst.Fill(0);
rdi4.InitScore();
it.MakeIntra4Preds();
for (mode = 0; mode < WebpConstants.NumBModes; ++mode)
{
var rdTmp = new Vp8ModeScore();
short[] tmpLevels = new short[16];
// Reconstruct.
rdTmp.Nz = (uint)ReconstructIntra4(it, dqm, tmpLevels, src, tmpDst, mode);
// Compute RD-score.
rdTmp.D = LossyUtils.Vp8Sse4X4(src, tmpDst);
rdTmp.SD = tlambda != 0 ? Mult8B(tlambda, LossyUtils.Vp8Disto4X4(src, tmpDst, WeightY)) : 0;
rdTmp.H = modeCosts[mode];
// Add flatness penalty, to avoid flat area to be mispredicted by a complex mode.
if (mode > 0 && IsFlat(tmpLevels, numBlocks, WebpConstants.FlatnessLimitI4))
{
rdTmp.R = WebpConstants.FlatnessPenality * numBlocks;
}
else
{
rdTmp.R = 0;
}
// early-out check.
rdTmp.SetRdScore(lambda);
if (bestMode >= 0 && rdTmp.Score >= rdi4.Score)
{
continue;
}
// finish computing score.
rdTmp.R += it.GetCostLuma4(tmpLevels, proba);
rdTmp.SetRdScore(lambda);
if (bestMode < 0 || rdTmp.Score < rdi4.Score)
{
rdi4.CopyScore(rdTmp);
bestMode = mode;
Span<byte> tmp = tmpDst;
tmpDst = bestBlock;
bestBlock = tmp;
tmpLevels.CopyTo(rdBest.YAcLevels.AsSpan(it.I4 * 16, 16));
}
}
rdi4.SetRdScore(dqm.LambdaMode);
rdBest.AddScore(rdi4);
if (rdBest.Score >= rd.Score)
{
return false;
}
totalHeaderBits += (int)rdi4.H; // <- equal to modeCosts[bestMode];
if (totalHeaderBits > maxI4HeaderBits)
{
return false;
}
// Copy selected samples to the right place.
LossyUtils.Vp8Copy4X4(bestBlock, bestBlocks.Slice(WebpLookupTables.Vp8Scan[it.I4]));
rd.ModesI4[it.I4] = (byte)bestMode;
it.TopNz[it.I4 & 3] = it.LeftNz[it.I4 >> 2] = rdi4.Nz != 0 ? 1 : 0;
}
while (it.RotateI4(bestBlocks));
// Finalize state.
rd.CopyScore(rdBest);
it.SetIntra4Mode(rd.ModesI4);
it.SwapOut();
rdBest.YAcLevels.AsSpan().CopyTo(rd.YAcLevels);
// Select intra4x4 over intra16x16.
return true;
}
public static void PickBestUv(Vp8EncIterator it, ref Vp8ModeScore rd, Vp8SegmentInfo[] segmentInfos, Vp8EncProba proba)
{
const int numBlocks = 8;
Vp8SegmentInfo dqm = segmentInfos[it.CurrentMacroBlockInfo.Segment];
int lambda = dqm.LambdaUv;
Span<byte> src = it.YuvIn.AsSpan(Vp8EncIterator.UOffEnc);
Span<byte> tmpDst = it.YuvOut2.AsSpan(Vp8EncIterator.UOffEnc);
Span<byte> dst0 = it.YuvOut.AsSpan(Vp8EncIterator.UOffEnc);
Span<byte> dst = dst0;
var rdBest = new Vp8ModeScore();
int mode;
rd.ModeUv = -1;
rdBest.InitScore();
for (mode = 0; mode < WebpConstants.NumPredModes; ++mode)
{
var rdUv = new Vp8ModeScore();
// Reconstruct
rdUv.Nz = (uint)ReconstructUv(it, dqm, rdUv, tmpDst, mode);
// Compute RD-score
rdUv.D = LossyUtils.Vp8Sse16X8(src, tmpDst);
rdUv.SD = 0; // not calling TDisto here: it tends to flatten areas.
rdUv.H = WebpConstants.Vp8FixedCostsUv[mode];
rdUv.R = it.GetCostUv(rdUv, proba);
if (mode > 0 && IsFlat(rdUv.UvLevels, numBlocks, WebpConstants.FlatnessLimitIUv))
{
rdUv.R += WebpConstants.FlatnessPenality * numBlocks;
}
rdUv.SetRdScore(lambda);
if (mode == 0 || rdUv.Score < rdBest.Score)
{
rdBest.CopyScore(rdUv);
rd.ModeUv = mode;
rdUv.UvLevels.CopyTo(rd.UvLevels.AsSpan());
for (int i = 0; i < 2; i++)
{
rd.Derr[i, 0] = rdUv.Derr[i, 0];
rd.Derr[i, 1] = rdUv.Derr[i, 1];
rd.Derr[i, 2] = rdUv.Derr[i, 2];
}
Span<byte> tmp = dst;
dst = tmpDst;
tmpDst = tmp;
}
}
it.SetIntraUvMode(rd.ModeUv);
rd.AddScore(rdBest);
if (dst != dst0)
{
// copy 16x8 block if needed.
LossyUtils.Vp8Copy16X8(dst, dst0);
}
// Store diffusion errors for next block.
it.StoreDiffusionErrors(rd);
}
public static int ReconstructIntra16(Vp8EncIterator it, Vp8SegmentInfo dqm, Vp8ModeScore rd, Span<byte> yuvOut, int mode)
{
Span<byte> reference = it.YuvP.AsSpan(Vp8Encoding.Vp8I16ModeOffsets[mode]);
Span<byte> src = it.YuvIn.AsSpan(Vp8EncIterator.YOffEnc);
int nz = 0;
int n;
short[] dcTmp = new short[16];
short[] tmp = new short[16 * 16];
Span<short> tmpSpan = tmp.AsSpan();
for (n = 0; n < 16; n += 2)
{
Vp8Encoding.FTransform2(src.Slice(WebpLookupTables.Vp8Scan[n]), reference.Slice(WebpLookupTables.Vp8Scan[n]), tmpSpan.Slice(n * 16, 16), tmpSpan.Slice((n + 1) * 16, 16));
}
Vp8Encoding.FTransformWht(tmp, dcTmp);
nz |= QuantizeBlock(dcTmp, rd.YDcLevels, dqm.Y2) << 24;
for (n = 0; n < 16; n += 2)
{
// Zero-out the first coeff, so that: a) nz is correct below, and
// b) finding 'last' non-zero coeffs in SetResidualCoeffs() is simplified.
tmp[n * 16] = tmp[(n + 1) * 16] = 0;
nz |= Quantize2Blocks(tmpSpan.Slice(n * 16, 32), rd.YAcLevels.AsSpan(n * 16, 32), dqm.Y1) << n;
}
// Transform back.
LossyUtils.TransformWht(dcTmp, tmpSpan);
for (n = 0; n < 16; n += 2)
{
Vp8Encoding.ITransform(reference.Slice(WebpLookupTables.Vp8Scan[n]), tmpSpan.Slice(n * 16, 32), yuvOut.Slice(WebpLookupTables.Vp8Scan[n]), true);
}
return nz;
}
public static int ReconstructIntra4(Vp8EncIterator it, Vp8SegmentInfo dqm, Span<short> levels, Span<byte> src, Span<byte> yuvOut, int mode)
{
Span<byte> reference = it.YuvP.AsSpan(Vp8Encoding.Vp8I4ModeOffsets[mode]);
short[] tmp = new short[16];
Vp8Encoding.FTransform(src, reference, tmp);
int nz = QuantizeBlock(tmp, levels, dqm.Y1);
Vp8Encoding.ITransform(reference, tmp, yuvOut, false);
return nz;
}
public static int ReconstructUv(Vp8EncIterator it, Vp8SegmentInfo dqm, Vp8ModeScore rd, Span<byte> yuvOut, int mode)
{
Span<byte> reference = it.YuvP.AsSpan(Vp8Encoding.Vp8UvModeOffsets[mode]);
Span<byte> src = it.YuvIn.AsSpan(Vp8EncIterator.UOffEnc);
int nz = 0;
int n;
short[] tmp = new short[8 * 16];
for (n = 0; n < 8; n += 2)
{
Vp8Encoding.FTransform2(
src.Slice(WebpLookupTables.Vp8ScanUv[n]),
reference.Slice(WebpLookupTables.Vp8ScanUv[n]),
tmp.AsSpan(n * 16, 16),
tmp.AsSpan((n + 1) * 16, 16));
}
CorrectDcValues(it, dqm.Uv, tmp, rd);
for (n = 0; n < 8; n += 2)
{
nz |= Quantize2Blocks(tmp.AsSpan(n * 16, 32), rd.UvLevels.AsSpan(n * 16, 32), dqm.Uv) << n;
}
for (n = 0; n < 8; n += 2)
{
Vp8Encoding.ITransform(reference.Slice(WebpLookupTables.Vp8ScanUv[n]), tmp.AsSpan(n * 16, 32), yuvOut.Slice(WebpLookupTables.Vp8ScanUv[n]), true);
}
return nz << 16;
}
// Refine intra16/intra4 sub-modes based on distortion only (not rate).
public static void RefineUsingDistortion(Vp8EncIterator it, Vp8SegmentInfo[] segmentInfos, Vp8ModeScore rd, bool tryBothModes, bool refineUvMode, int mbHeaderLimit)
{
long bestScore = Vp8ModeScore.MaxCost;
int nz = 0;
int mode;
bool isI16 = tryBothModes || it.CurrentMacroBlockInfo.MacroBlockType == Vp8MacroBlockType.I16X16;
Vp8SegmentInfo dqm = segmentInfos[it.CurrentMacroBlockInfo.Segment];
// Some empiric constants, of approximate order of magnitude.
const int lambdaDi16 = 106;
const int lambdaDi4 = 11;
const int lambdaDuv = 120;
long scoreI4 = dqm.I4Penalty;
long i4BitSum = 0;
long bitLimit = tryBothModes
? mbHeaderLimit
: Vp8ModeScore.MaxCost; // no early-out allowed.
if (isI16)
{
int bestMode = -1;
Span<byte> src = it.YuvIn.AsSpan(Vp8EncIterator.YOffEnc);
for (mode = 0; mode < WebpConstants.NumPredModes; ++mode)
{
Span<byte> reference = it.YuvP.AsSpan(Vp8Encoding.Vp8I16ModeOffsets[mode]);
long score = (LossyUtils.Vp8Sse16X16(src, reference) * WebpConstants.RdDistoMult) + (WebpConstants.Vp8FixedCostsI16[mode] * lambdaDi16);
if (mode > 0 && WebpConstants.Vp8FixedCostsI16[mode] > bitLimit)
{
continue;
}
if (score < bestScore)
{
bestMode = mode;
bestScore = score;
}
}
if (it.X == 0 || it.Y == 0)
{
// Avoid starting a checkerboard resonance from the border. See bug #432 of libwebp.
if (IsFlatSource16(src))
{
bestMode = it.X == 0 ? 0 : 2;
tryBothModes = false; // Stick to i16.
}
}
it.SetIntra16Mode(bestMode);
// We'll reconstruct later, if i16 mode actually gets selected.
}
// Next, evaluate Intra4.
if (tryBothModes || !isI16)
{
// We don't evaluate the rate here, but just account for it through a
// constant penalty (i4 mode usually needs more bits compared to i16).
isI16 = false;
it.StartI4();
do
{
int bestI4Mode = -1;
long bestI4Score = Vp8ModeScore.MaxCost;
Span<byte> src = it.YuvIn.AsSpan(Vp8EncIterator.YOffEnc + WebpLookupTables.Vp8Scan[it.I4]);
short[] modeCosts = it.GetCostModeI4(rd.ModesI4);
it.MakeIntra4Preds();
for (mode = 0; mode < WebpConstants.NumBModes; ++mode)
{
Span<byte> reference = it.YuvP.AsSpan(Vp8Encoding.Vp8I4ModeOffsets[mode]);
long score = (LossyUtils.Vp8Sse4X4(src, reference) * WebpConstants.RdDistoMult) + (modeCosts[mode] * lambdaDi4);
if (score < bestI4Score)
{
bestI4Mode = mode;
bestI4Score = score;
}
}
i4BitSum += modeCosts[bestI4Mode];
rd.ModesI4[it.I4] = (byte)bestI4Mode;
scoreI4 += bestI4Score;
if (scoreI4 >= bestScore || i4BitSum > bitLimit)
{
// Intra4 won't be better than Intra16. Bail out and pick Intra16.
isI16 = true;
break;
}
else
{
// Reconstruct partial block inside YuvOut2 buffer
Span<byte> tmpDst = it.YuvOut2.AsSpan(Vp8EncIterator.YOffEnc + WebpLookupTables.Vp8Scan[it.I4]);
nz |= ReconstructIntra4(it, dqm, rd.YAcLevels.AsSpan(it.I4 * 16, 16), src, tmpDst, bestI4Mode) << it.I4;
}
}
while (it.RotateI4(it.YuvOut2.AsSpan(Vp8EncIterator.YOffEnc)));
}
// Final reconstruction, depending on which mode is selected.
if (!isI16)
{
it.SetIntra4Mode(rd.ModesI4);
it.SwapOut();
bestScore = scoreI4;
}
else
{
int intra16Mode = it.Preds[it.PredIdx];
nz = ReconstructIntra16(it, dqm, rd, it.YuvOut.AsSpan(Vp8EncIterator.YOffEnc), intra16Mode);
}
// ... and UV!
if (refineUvMode)
{
int bestMode = -1;
long bestUvScore = Vp8ModeScore.MaxCost;
Span<byte> src = it.YuvIn.AsSpan(Vp8EncIterator.UOffEnc);
for (mode = 0; mode < WebpConstants.NumPredModes; ++mode)
{
Span<byte> reference = it.YuvP.AsSpan(Vp8Encoding.Vp8UvModeOffsets[mode]);
long score = (LossyUtils.Vp8Sse16X8(src, reference) * WebpConstants.RdDistoMult) + (WebpConstants.Vp8FixedCostsUv[mode] * lambdaDuv);
if (score < bestUvScore)
{
bestMode = mode;
bestUvScore = score;
}
}
it.SetIntraUvMode(bestMode);
}
nz |= ReconstructUv(it, dqm, rd, it.YuvOut.AsSpan(Vp8EncIterator.UOffEnc), it.CurrentMacroBlockInfo.UvMode);
rd.Nz = (uint)nz;
rd.Score = bestScore;
}
[MethodImpl(InliningOptions.ShortMethod)]
public static int Quantize2Blocks(Span<short> input, Span<short> output, Vp8Matrix mtx)
{
int nz = QuantizeBlock(input, output, mtx) << 0;
nz |= QuantizeBlock(input.Slice(1 * 16), output.Slice(1 * 16), mtx) << 1;
return nz;
}
public static int QuantizeBlock(Span<short> input, Span<short> output, Vp8Matrix mtx)
{
int last = -1;
int n;
for (n = 0; n < 16; ++n)
{
int j = Zigzag[n];
bool sign = input[j] < 0;
uint coeff = (uint)((sign ? -input[j] : input[j]) + mtx.Sharpen[j]);
if (coeff > mtx.ZThresh[j])
{
uint q = mtx.Q[j];
uint iQ = mtx.IQ[j];
uint b = mtx.Bias[j];
int level = QuantDiv(coeff, iQ, b);
if (level > MaxLevel)
{
level = MaxLevel;
}
if (sign)
{
level = -level;
}
input[j] = (short)(level * (int)q);
output[n] = (short)level;
if (level != 0)
{
last = n;
}
}
else
{
output[n] = 0;
input[j] = 0;
}
}
return last >= 0 ? 1 : 0;
}
// Quantize as usual, but also compute and return the quantization error.
// Error is already divided by DSHIFT.
public static int QuantizeSingle(Span<short> v, Vp8Matrix mtx)
{
int v0 = v[0];
bool sign = v0 < 0;
if (sign)
{
v0 = -v0;
}
if (v0 > (int)mtx.ZThresh[0])
{
int qV = QuantDiv((uint)v0, mtx.IQ[0], mtx.Bias[0]) * mtx.Q[0];
int err = v0 - qV;
v[0] = (short)(sign ? -qV : qV);
return (sign ? -err : err) >> DSCALE;
}
v[0] = 0;
return (sign ? -v0 : v0) >> DSCALE;
}
public static void CorrectDcValues(Vp8EncIterator it, Vp8Matrix mtx, short[] tmp, Vp8ModeScore rd)
{
#pragma warning disable SA1005 // Single line comments should begin with single space
// | top[0] | top[1]
// --------+--------+---------
// left[0] | tmp[0] tmp[1] <-> err0 err1
// left[1] | tmp[2] tmp[3] err2 err3
//
// Final errors {err1,err2,err3} are preserved and later restored
// as top[]/left[] on the next block.
#pragma warning restore SA1005 // Single line comments should begin with single space
for (int ch = 0; ch <= 1; ++ch)
{
Span<sbyte> top = it.TopDerr.AsSpan((it.X * 4) + ch, 2);
Span<sbyte> left = it.LeftDerr.AsSpan(ch, 2);
Span<short> c = tmp.AsSpan(ch * 4 * 16, 4 * 16);
c[0] += (short)(((C1 * top[0]) + (C2 * left[0])) >> (DSHIFT - DSCALE));
int err0 = QuantizeSingle(c, mtx);
c[1 * 16] += (short)(((C1 * top[1]) + (C2 * err0)) >> (DSHIFT - DSCALE));
int err1 = QuantizeSingle(c.Slice(1 * 16), mtx);
c[2 * 16] += (short)(((C1 * err0) + (C2 * left[1])) >> (DSHIFT - DSCALE));
int err2 = QuantizeSingle(c.Slice(2 * 16), mtx);
c[3 * 16] += (short)(((C1 * err1) + (C2 * err2)) >> (DSHIFT - DSCALE));
int err3 = QuantizeSingle(c.Slice(3 * 16), mtx);
rd.Derr[ch, 0] = err1;
rd.Derr[ch, 1] = err2;
rd.Derr[ch, 2] = err3;
}
}
[MethodImpl(InliningOptions.ShortMethod)]
private static bool IsFlatSource16(Span<byte> src)
{
uint v = src[0] * 0x01010101u;
Span<byte> vSpan = BitConverter.GetBytes(v).AsSpan();
for (int i = 0; i < 16; i++)
{
if (!src.Slice(0, 4).SequenceEqual(vSpan) || !src.Slice(4, 4).SequenceEqual(vSpan) ||
!src.Slice(8, 4).SequenceEqual(vSpan) || !src.Slice(12, 4).SequenceEqual(vSpan))
{
return false;
}
src = src.Slice(WebpConstants.Bps);
}
return true;
}
[MethodImpl(InliningOptions.ShortMethod)]
private static bool IsFlat(Span<short> levels, int numBlocks, int thresh)
{
int score = 0;
while (numBlocks-- > 0)
{
for (int i = 1; i < 16; i++)
{
// omit DC, we're only interested in AC
score += levels[i] != 0 ? 1 : 0;
if (score > thresh)
{
return false;
}
}
levels = levels.Slice(16);
}
return true;
}
[MethodImpl(InliningOptions.ShortMethod)]
private static int Mult8B(int a, int b) => ((a * b) + 128) >> 8;
[MethodImpl(InliningOptions.ShortMethod)]
private static int QuantDiv(uint n, uint iQ, uint b) => (int)(((n * iQ) + b) >> WebpConstants.QFix);
}
}

28
src/ImageSharp/Formats/Webp/Lossy/Vp8BandProbas.cs

@ -0,0 +1,28 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// All the probabilities associated to one band.
/// </summary>
internal class Vp8BandProbas
{
/// <summary>
/// Initializes a new instance of the <see cref="Vp8BandProbas"/> class.
/// </summary>
public Vp8BandProbas()
{
this.Probabilities = new Vp8ProbaArray[WebpConstants.NumCtx];
for (int i = 0; i < WebpConstants.NumCtx; i++)
{
this.Probabilities[i] = new Vp8ProbaArray();
}
}
/// <summary>
/// Gets the Probabilities.
/// </summary>
public Vp8ProbaArray[] Probabilities { get; }
}
}

15
src/ImageSharp/Formats/Webp/Lossy/Vp8CostArray.cs

@ -0,0 +1,15 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal class Vp8CostArray
{
/// <summary>
/// Initializes a new instance of the <see cref="Vp8CostArray"/> class.
/// </summary>
public Vp8CostArray() => this.Costs = new ushort[67 + 1];
public ushort[] Costs { get; }
}
}

25
src/ImageSharp/Formats/Webp/Lossy/Vp8Costs.cs

@ -0,0 +1,25 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal class Vp8Costs
{
/// <summary>
/// Initializes a new instance of the <see cref="Vp8Costs"/> class.
/// </summary>
public Vp8Costs()
{
this.Costs = new Vp8CostArray[WebpConstants.NumCtx];
for (int i = 0; i < WebpConstants.NumCtx; i++)
{
this.Costs[i] = new Vp8CostArray();
}
}
/// <summary>
/// Gets the Costs.
/// </summary>
public Vp8CostArray[] Costs { get; }
}
}

341
src/ImageSharp/Formats/Webp/Lossy/Vp8Decoder.cs

@ -0,0 +1,341 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Buffers;
using SixLabors.ImageSharp.Formats.Webp.BitReader;
using SixLabors.ImageSharp.Memory;
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Holds information for decoding a lossy webp image.
/// </summary>
internal class Vp8Decoder : IDisposable
{
private Vp8MacroBlock leftMacroBlock;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8Decoder"/> class.
/// </summary>
/// <param name="frameHeader">The frame header.</param>
/// <param name="pictureHeader">The picture header.</param>
/// <param name="segmentHeader">The segment header.</param>
/// <param name="probabilities">The probabilities.</param>
/// <param name="memoryAllocator">Used for allocating memory for the pixel data output and the temporary buffers.</param>
public Vp8Decoder(Vp8FrameHeader frameHeader, Vp8PictureHeader pictureHeader, Vp8SegmentHeader segmentHeader, Vp8Proba probabilities, MemoryAllocator memoryAllocator)
{
this.FilterHeader = new Vp8FilterHeader();
this.FrameHeader = frameHeader;
this.PictureHeader = pictureHeader;
this.SegmentHeader = segmentHeader;
this.Probabilities = probabilities;
this.IntraL = new byte[4];
this.MbWidth = (int)((this.PictureHeader.Width + 15) >> 4);
this.MbHeight = (int)((this.PictureHeader.Height + 15) >> 4);
this.CacheYStride = 16 * this.MbWidth;
this.CacheUvStride = 8 * this.MbWidth;
this.MacroBlockInfo = new Vp8MacroBlock[this.MbWidth + 1];
this.MacroBlockData = new Vp8MacroBlockData[this.MbWidth];
this.YuvTopSamples = new Vp8TopSamples[this.MbWidth];
this.FilterInfo = new Vp8FilterInfo[this.MbWidth];
for (int i = 0; i < this.MbWidth; i++)
{
this.MacroBlockInfo[i] = new Vp8MacroBlock();
this.MacroBlockData[i] = new Vp8MacroBlockData();
this.YuvTopSamples[i] = new Vp8TopSamples();
this.FilterInfo[i] = new Vp8FilterInfo();
}
this.MacroBlockInfo[this.MbWidth] = new Vp8MacroBlock();
this.DeQuantMatrices = new Vp8QuantMatrix[WebpConstants.NumMbSegments];
this.FilterStrength = new Vp8FilterInfo[WebpConstants.NumMbSegments, 2];
for (int i = 0; i < WebpConstants.NumMbSegments; i++)
{
this.DeQuantMatrices[i] = new Vp8QuantMatrix();
for (int j = 0; j < 2; j++)
{
this.FilterStrength[i, j] = new Vp8FilterInfo();
}
}
uint width = pictureHeader.Width;
uint height = pictureHeader.Height;
int extraRows = WebpConstants.FilterExtraRows[(int)LoopFilter.Complex]; // assuming worst case: complex filter
int extraY = extraRows * this.CacheYStride;
int extraUv = extraRows / 2 * this.CacheUvStride;
this.YuvBuffer = memoryAllocator.Allocate<byte>((WebpConstants.Bps * 17) + (WebpConstants.Bps * 9) + extraY);
this.CacheY = memoryAllocator.Allocate<byte>((16 * this.CacheYStride) + extraY);
int cacheUvSize = (16 * this.CacheUvStride) + extraUv;
this.CacheU = memoryAllocator.Allocate<byte>(cacheUvSize);
this.CacheV = memoryAllocator.Allocate<byte>(cacheUvSize);
this.TmpYBuffer = memoryAllocator.Allocate<byte>((int)width);
this.TmpUBuffer = memoryAllocator.Allocate<byte>((int)width);
this.TmpVBuffer = memoryAllocator.Allocate<byte>((int)width);
this.Pixels = memoryAllocator.Allocate<byte>((int)(width * height * 4));
this.YuvBuffer.Memory.Span.Fill(205);
this.CacheY.Memory.Span.Fill(205);
this.CacheU.Memory.Span.Fill(205);
this.CacheV.Memory.Span.Fill(205);
this.Vp8BitReaders = new Vp8BitReader[WebpConstants.MaxNumPartitions];
}
/// <summary>
/// Gets the frame header.
/// </summary>
public Vp8FrameHeader FrameHeader { get; }
/// <summary>
/// Gets the picture header.
/// </summary>
public Vp8PictureHeader PictureHeader { get; }
/// <summary>
/// Gets the filter header.
/// </summary>
public Vp8FilterHeader FilterHeader { get; }
/// <summary>
/// Gets the segment header.
/// </summary>
public Vp8SegmentHeader SegmentHeader { get; }
/// <summary>
/// Gets or sets the number of partitions minus one.
/// </summary>
public int NumPartsMinusOne { get; set; }
/// <summary>
/// Gets the per-partition boolean decoders.
/// </summary>
public Vp8BitReader[] Vp8BitReaders { get; }
/// <summary>
/// Gets the dequantization matrices (one set of DC/AC dequant factor per segment).
/// </summary>
public Vp8QuantMatrix[] DeQuantMatrices { get; }
/// <summary>
/// Gets or sets a value indicating whether to use the skip probabilities.
/// </summary>
public bool UseSkipProbability { get; set; }
/// <summary>
/// Gets or sets the skip probability.
/// </summary>
public byte SkipProbability { get; set; }
/// <summary>
/// Gets or sets the Probabilities.
/// </summary>
public Vp8Proba Probabilities { get; set; }
/// <summary>
/// Gets or sets the top intra modes values: 4 * MbWidth.
/// </summary>
public byte[] IntraT { get; set; }
/// <summary>
/// Gets the left intra modes values.
/// </summary>
public byte[] IntraL { get; }
/// <summary>
/// Gets the width in macroblock units.
/// </summary>
public int MbWidth { get; }
/// <summary>
/// Gets the height in macroblock units.
/// </summary>
public int MbHeight { get; }
/// <summary>
/// Gets or sets the top-left x index of the macroblock that must be in-loop filtered.
/// </summary>
public int TopLeftMbX { get; set; }
/// <summary>
/// Gets or sets the top-left y index of the macroblock that must be in-loop filtered.
/// </summary>
public int TopLeftMbY { get; set; }
/// <summary>
/// Gets or sets the last bottom-right x index of the macroblock that must be decoded.
/// </summary>
public int BottomRightMbX { get; set; }
/// <summary>
/// Gets or sets the last bottom-right y index of the macroblock that must be decoded.
/// </summary>
public int BottomRightMbY { get; set; }
/// <summary>
/// Gets or sets the current x position in macroblock units.
/// </summary>
public int MbX { get; set; }
/// <summary>
/// Gets or sets the current y position in macroblock units.
/// </summary>
public int MbY { get; set; }
/// <summary>
/// Gets the parsed reconstruction data.
/// </summary>
public Vp8MacroBlockData[] MacroBlockData { get; }
/// <summary>
/// Gets the contextual macroblock info.
/// </summary>
public Vp8MacroBlock[] MacroBlockInfo { get; }
/// <summary>
/// Gets or sets the loop filter used. The purpose of the loop filter is to eliminate (or at least reduce)
/// visually objectionable artifacts.
/// </summary>
public LoopFilter Filter { get; set; }
/// <summary>
/// Gets the pre-calculated per-segment filter strengths.
/// </summary>
public Vp8FilterInfo[,] FilterStrength { get; }
public IMemoryOwner<byte> YuvBuffer { get; }
public Vp8TopSamples[] YuvTopSamples { get; }
public IMemoryOwner<byte> CacheY { get; }
public IMemoryOwner<byte> CacheU { get; }
public IMemoryOwner<byte> CacheV { get; }
public int CacheYOffset { get; set; }
public int CacheUvOffset { get; set; }
public int CacheYStride { get; }
public int CacheUvStride { get; }
public IMemoryOwner<byte> TmpYBuffer { get; }
public IMemoryOwner<byte> TmpUBuffer { get; }
public IMemoryOwner<byte> TmpVBuffer { get; }
/// <summary>
/// Gets the pixel buffer where the decoded pixel data will be stored.
/// </summary>
public IMemoryOwner<byte> Pixels { get; }
/// <summary>
/// Gets or sets filter info.
/// </summary>
public Vp8FilterInfo[] FilterInfo { get; set; }
public Vp8MacroBlock CurrentMacroBlock => this.MacroBlockInfo[this.MbX];
public Vp8MacroBlock LeftMacroBlock => this.leftMacroBlock ??= new Vp8MacroBlock();
public Vp8MacroBlockData CurrentBlockData => this.MacroBlockData[this.MbX];
public void PrecomputeFilterStrengths()
{
if (this.Filter == LoopFilter.None)
{
return;
}
Vp8FilterHeader hdr = this.FilterHeader;
for (int s = 0; s < WebpConstants.NumMbSegments; ++s)
{
int baseLevel;
// First, compute the initial level.
if (this.SegmentHeader.UseSegment)
{
baseLevel = this.SegmentHeader.FilterStrength[s];
if (!this.SegmentHeader.Delta)
{
baseLevel += hdr.FilterLevel;
}
}
else
{
baseLevel = hdr.FilterLevel;
}
for (int i4x4 = 0; i4x4 <= 1; i4x4++)
{
Vp8FilterInfo info = this.FilterStrength[s, i4x4];
int level = baseLevel;
if (hdr.UseLfDelta)
{
level += hdr.RefLfDelta[0];
if (i4x4 > 0)
{
level += hdr.ModeLfDelta[0];
}
}
level = level < 0 ? 0 : level > 63 ? 63 : level;
if (level > 0)
{
int iLevel = level;
if (hdr.Sharpness > 0)
{
if (hdr.Sharpness > 4)
{
iLevel >>= 2;
}
else
{
iLevel >>= 1;
}
int iLevelCap = 9 - hdr.Sharpness;
if (iLevel > iLevelCap)
{
iLevel = iLevelCap;
}
}
if (iLevel < 1)
{
iLevel = 1;
}
info.InnerLevel = (byte)iLevel;
info.Limit = (byte)((2 * level) + iLevel);
info.HighEdgeVarianceThreshold = (byte)(level >= 40 ? 2 : level >= 15 ? 1 : 0);
}
else
{
info.Limit = 0; // no filtering.
}
info.UseInnerFiltering = i4x4 == 1;
}
}
}
/// <inheritdoc/>
public void Dispose()
{
this.YuvBuffer.Dispose();
this.CacheY.Dispose();
this.CacheU.Dispose();
this.CacheV.Dispose();
this.TmpYBuffer.Dispose();
this.TmpUBuffer.Dispose();
this.TmpVBuffer.Dispose();
this.Pixels.Dispose();
}
}
}

948
src/ImageSharp/Formats/Webp/Lossy/Vp8EncIterator.cs

@ -0,0 +1,948 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Iterator structure to iterate through macroblocks, pointing to the
/// right neighbouring data (samples, predictions, contexts, ...)
/// </summary>
internal class Vp8EncIterator
{
public const int YOffEnc = 0;
public const int UOffEnc = 16;
public const int VOffEnc = 16 + 8;
private const int MaxIntra16Mode = 2;
private const int MaxIntra4Mode = 2;
private const int MaxUvMode = 2;
private const int DefaultAlpha = -1;
private readonly int mbw;
private readonly int mbh;
/// <summary>
/// Stride of the prediction plane(=4*mbw + 1).
/// </summary>
private readonly int predsWidth;
// Array to record the position of the top sample to pass to the prediction functions.
private readonly byte[] vp8TopLeftI4 =
{
17, 21, 25, 29,
13, 17, 21, 25,
9, 13, 17, 21,
5, 9, 13, 17
};
private int currentMbIdx;
private int nzIdx;
private int predIdx;
private int yTopIdx;
private int uvTopIdx;
public Vp8EncIterator(byte[] yTop, byte[] uvTop, uint[] nz, Vp8MacroBlockInfo[] mb, byte[] preds, sbyte[] topDerr, int mbw, int mbh)
{
this.YTop = yTop;
this.UvTop = uvTop;
this.Nz = nz;
this.Mb = mb;
this.Preds = preds;
this.TopDerr = topDerr;
this.LeftDerr = new sbyte[2 * 2];
this.mbw = mbw;
this.mbh = mbh;
this.currentMbIdx = 0;
this.nzIdx = 1;
this.yTopIdx = 0;
this.uvTopIdx = 0;
this.predsWidth = (4 * mbw) + 1;
this.predIdx = this.predsWidth;
this.YuvIn = new byte[WebpConstants.Bps * 16];
this.YuvOut = new byte[WebpConstants.Bps * 16];
this.YuvOut2 = new byte[WebpConstants.Bps * 16];
this.YuvP = new byte[(32 * WebpConstants.Bps) + (16 * WebpConstants.Bps) + (8 * WebpConstants.Bps)]; // I16+Chroma+I4 preds
this.YLeft = new byte[32];
this.UvLeft = new byte[32];
this.TopNz = new int[9];
this.LeftNz = new int[9];
this.I4Boundary = new byte[37];
this.BitCount = new long[4, 3];
this.Scratch = new byte[WebpConstants.Bps * 16];
// To match the C initial values of the reference implementation, initialize all with 204.
byte defaultInitVal = 204;
this.YuvIn.AsSpan().Fill(defaultInitVal);
this.YuvOut.AsSpan().Fill(defaultInitVal);
this.YuvOut2.AsSpan().Fill(defaultInitVal);
this.YuvP.AsSpan().Fill(defaultInitVal);
this.YLeft.AsSpan().Fill(defaultInitVal);
this.UvLeft.AsSpan().Fill(defaultInitVal);
this.Scratch.AsSpan().Fill(defaultInitVal);
this.Reset();
}
/// <summary>
/// Gets or sets the current macroblock X value.
/// </summary>
public int X { get; set; }
/// <summary>
/// Gets or sets the current macroblock Y.
/// </summary>
public int Y { get; set; }
/// <summary>
/// Gets the input samples.
/// </summary>
public byte[] YuvIn { get; }
/// <summary>
/// Gets or sets the output samples.
/// </summary>
public byte[] YuvOut { get; set; }
/// <summary>
/// Gets or sets the secondary buffer swapped with YuvOut.
/// </summary>
public byte[] YuvOut2 { get; set; }
/// <summary>
/// Gets the scratch buffer for prediction.
/// </summary>
public byte[] YuvP { get; }
/// <summary>
/// Gets the left luma samples.
/// </summary>
public byte[] YLeft { get; }
/// <summary>
/// Gets the left uv samples.
/// </summary>
public byte[] UvLeft { get; }
/// <summary>
/// Gets the left error diffusion (u/v).
/// </summary>
public sbyte[] LeftDerr { get; }
/// <summary>
/// Gets the top luma samples at position 'X'.
/// </summary>
public byte[] YTop { get; }
/// <summary>
/// Gets the top u/v samples at position 'X', packed as 16 bytes.
/// </summary>
public byte[] UvTop { get; }
/// <summary>
/// Gets the intra mode predictors (4x4 blocks).
/// </summary>
public byte[] Preds { get; }
/// <summary>
/// Gets the current start index of the intra mode predictors.
/// </summary>
public int PredIdx => this.predIdx;
/// <summary>
/// Gets the non-zero pattern.
/// </summary>
public uint[] Nz { get; }
/// <summary>
/// Gets the top diffusion error.
/// </summary>
public sbyte[] TopDerr { get; }
/// <summary>
/// Gets 32+5 boundary samples needed by intra4x4.
/// </summary>
public byte[] I4Boundary { get; }
/// <summary>
/// Gets or sets the index to the current top boundary sample.
/// </summary>
public int I4BoundaryIdx { get; set; }
/// <summary>
/// Gets or sets the current intra4x4 mode being tested.
/// </summary>
public int I4 { get; set; }
/// <summary>
/// Gets the top-non-zero context.
/// </summary>
public int[] TopNz { get; }
/// <summary>
/// Gets the left-non-zero. leftNz[8] is independent.
/// </summary>
public int[] LeftNz { get; }
/// <summary>
/// Gets or sets the macroblock bit-cost for luma.
/// </summary>
public long LumaBits { get; set; }
/// <summary>
/// Gets the bit counters for coded levels.
/// </summary>
public long[,] BitCount { get; }
/// <summary>
/// Gets or sets the macroblock bit-cost for chroma.
/// </summary>
public long UvBits { get; set; }
/// <summary>
/// Gets or sets the number of mb still to be processed.
/// </summary>
public int CountDown { get; set; }
/// <summary>
/// Gets the scratch buffer.
/// </summary>
public byte[] Scratch { get; }
public Vp8MacroBlockInfo CurrentMacroBlockInfo => this.Mb[this.currentMbIdx];
private Vp8MacroBlockInfo[] Mb { get; }
public void Init() => this.Reset();
public void InitFilter()
{
// TODO: add support for autofilter
}
public void StartI4()
{
int i;
this.I4 = 0; // first 4x4 sub-block.
this.I4BoundaryIdx = this.vp8TopLeftI4[0];
// Import the boundary samples.
for (i = 0; i < 17; i++)
{
// left
this.I4Boundary[i] = this.YLeft[15 - i + 1];
}
Span<byte> yTop = this.YTop.AsSpan(this.yTopIdx);
for (i = 0; i < 16; i++)
{
// top
this.I4Boundary[17 + i] = yTop[i];
}
// top-right samples have a special case on the far right of the picture.
if (this.X < this.mbw - 1)
{
for (i = 16; i < 16 + 4; i++)
{
this.I4Boundary[17 + i] = yTop[i];
}
}
else
{
// else, replicate the last valid pixel four times
for (i = 16; i < 16 + 4; i++)
{
this.I4Boundary[17 + i] = this.I4Boundary[17 + 15];
}
}
this.NzToBytes(); // import the non-zero context.
}
// Import uncompressed samples from source.
public void Import(Span<byte> y, Span<byte> u, Span<byte> v, int yStride, int uvStride, int width, int height, bool importBoundarySamples)
{
int yStartIdx = ((this.Y * yStride) + this.X) * 16;
int uvStartIdx = ((this.Y * uvStride) + this.X) * 8;
Span<byte> ySrc = y.Slice(yStartIdx);
Span<byte> uSrc = u.Slice(uvStartIdx);
Span<byte> vSrc = v.Slice(uvStartIdx);
int w = Math.Min(width - (this.X * 16), 16);
int h = Math.Min(height - (this.Y * 16), 16);
int uvw = (w + 1) >> 1;
int uvh = (h + 1) >> 1;
Span<byte> yuvIn = this.YuvIn.AsSpan(YOffEnc);
Span<byte> uIn = this.YuvIn.AsSpan(UOffEnc);
Span<byte> vIn = this.YuvIn.AsSpan(VOffEnc);
this.ImportBlock(ySrc, yStride, yuvIn, w, h, 16);
this.ImportBlock(uSrc, uvStride, uIn, uvw, uvh, 8);
this.ImportBlock(vSrc, uvStride, vIn, uvw, uvh, 8);
if (!importBoundarySamples)
{
return;
}
// Import source (uncompressed) samples into boundary.
if (this.X == 0)
{
this.InitLeft();
}
else
{
Span<byte> yLeft = this.YLeft.AsSpan();
Span<byte> uLeft = this.UvLeft.AsSpan(0, 16);
Span<byte> vLeft = this.UvLeft.AsSpan(16, 16);
if (this.Y == 0)
{
yLeft[0] = 127;
uLeft[0] = 127;
vLeft[0] = 127;
}
else
{
yLeft[0] = y[yStartIdx - 1 - yStride];
uLeft[0] = u[uvStartIdx - 1 - uvStride];
vLeft[0] = v[uvStartIdx - 1 - uvStride];
}
this.ImportLine(y.Slice(yStartIdx - 1), yStride, yLeft.Slice(1), h, 16);
this.ImportLine(u.Slice(uvStartIdx - 1), uvStride, uLeft.Slice(1), uvh, 8);
this.ImportLine(v.Slice(uvStartIdx - 1), uvStride, vLeft.Slice(1), uvh, 8);
}
Span<byte> yTop = this.YTop.AsSpan(this.yTopIdx, 16);
if (this.Y == 0)
{
yTop.Fill(127);
this.UvTop.AsSpan(this.uvTopIdx, 16).Fill(127);
}
else
{
this.ImportLine(y.Slice(yStartIdx - yStride), 1, yTop, w, 16);
this.ImportLine(u.Slice(uvStartIdx - uvStride), 1, this.UvTop.AsSpan(this.uvTopIdx, 8), uvw, 8);
this.ImportLine(v.Slice(uvStartIdx - uvStride), 1, this.UvTop.AsSpan(this.uvTopIdx + 8, 8), uvw, 8);
}
}
public int FastMbAnalyze(int quality)
{
// Empirical cut-off value, should be around 16 (~=block size). We use the
// [8-17] range and favor intra4 at high quality, intra16 for low quality.
int q = quality;
int kThreshold = 8 + ((17 - 8) * q / 100);
int k;
uint[] dc = new uint[16];
uint m;
uint m2;
for (k = 0; k < 16; k += 4)
{
this.Mean16x4(this.YuvIn.AsSpan(YOffEnc + (k * WebpConstants.Bps)), dc.AsSpan(k));
}
for (m = 0, m2 = 0, k = 0; k < 16; ++k)
{
m += dc[k];
m2 += dc[k] * dc[k];
}
if (kThreshold * m2 < m * m)
{
this.SetIntra16Mode(0); // DC16
}
else
{
byte[] modes = new byte[16]; // DC4
this.SetIntra4Mode(modes);
}
return 0;
}
public int MbAnalyzeBestIntra16Mode()
{
int maxMode = MaxIntra16Mode;
int mode;
int bestAlpha = DefaultAlpha;
int bestMode = 0;
this.MakeLuma16Preds();
for (mode = 0; mode < maxMode; ++mode)
{
var histo = new Vp8Histogram();
histo.CollectHistogram(this.YuvIn.AsSpan(YOffEnc), this.YuvP.AsSpan(Vp8Encoding.Vp8I16ModeOffsets[mode]), 0, 16);
int alpha = histo.GetAlpha();
if (alpha > bestAlpha)
{
bestAlpha = alpha;
bestMode = mode;
}
}
this.SetIntra16Mode(bestMode);
return bestAlpha;
}
public int MbAnalyzeBestIntra4Mode(int bestAlpha)
{
byte[] modes = new byte[16];
int maxMode = MaxIntra4Mode;
var totalHisto = new Vp8Histogram();
int curHisto = 0;
this.StartI4();
do
{
int mode;
int bestModeAlpha = DefaultAlpha;
var histos = new Vp8Histogram[2];
Span<byte> src = this.YuvIn.AsSpan(YOffEnc + WebpLookupTables.Vp8Scan[this.I4]);
this.MakeIntra4Preds();
for (mode = 0; mode < maxMode; ++mode)
{
histos[curHisto] = new Vp8Histogram();
histos[curHisto].CollectHistogram(src, this.YuvP.AsSpan(Vp8Encoding.Vp8I4ModeOffsets[mode]), 0, 1);
int alpha = histos[curHisto].GetAlpha();
if (alpha > bestModeAlpha)
{
bestModeAlpha = alpha;
modes[this.I4] = (byte)mode;
// Keep track of best histo so far.
curHisto ^= 1;
}
}
// Accumulate best histogram.
histos[curHisto ^ 1].Merge(totalHisto);
}
while (this.RotateI4(this.YuvIn.AsSpan(YOffEnc))); // Note: we reuse the original samples for predictors.
int i4Alpha = totalHisto.GetAlpha();
if (i4Alpha > bestAlpha)
{
this.SetIntra4Mode(modes);
bestAlpha = i4Alpha;
}
return bestAlpha;
}
public int MbAnalyzeBestUvMode()
{
int bestAlpha = DefaultAlpha;
int smallestAlpha = 0;
int bestMode = 0;
int maxMode = MaxUvMode;
int mode;
this.MakeChroma8Preds();
for (mode = 0; mode < maxMode; ++mode)
{
var histo = new Vp8Histogram();
histo.CollectHistogram(this.YuvIn.AsSpan(UOffEnc), this.YuvP.AsSpan(Vp8Encoding.Vp8UvModeOffsets[mode]), 16, 16 + 4 + 4);
int alpha = histo.GetAlpha();
if (alpha > bestAlpha)
{
bestAlpha = alpha;
}
// The best prediction mode tends to be the one with the smallest alpha.
if (mode == 0 || alpha < smallestAlpha)
{
smallestAlpha = alpha;
bestMode = mode;
}
}
this.SetIntraUvMode(bestMode);
return bestAlpha;
}
public void SetIntra16Mode(int mode)
{
Span<byte> preds = this.Preds.AsSpan(this.predIdx);
for (int y = 0; y < 4; y++)
{
preds.Slice(0, 4).Fill((byte)mode);
preds = preds.Slice(this.predsWidth);
}
this.CurrentMacroBlockInfo.MacroBlockType = Vp8MacroBlockType.I16X16;
}
public void SetIntra4Mode(byte[] modes)
{
int modesIdx = 0;
int predIdx = this.predIdx;
for (int y = 4; y > 0; y--)
{
modes.AsSpan(modesIdx, 4).CopyTo(this.Preds.AsSpan(predIdx));
predIdx += this.predsWidth;
modesIdx += 4;
}
this.CurrentMacroBlockInfo.MacroBlockType = Vp8MacroBlockType.I4X4;
}
public int GetCostLuma16(Vp8ModeScore rd, Vp8EncProba proba)
{
var res = new Vp8Residual();
int r = 0;
// re-import the non-zero context.
this.NzToBytes();
// DC
res.Init(0, 1, proba);
res.SetCoeffs(rd.YDcLevels);
r += res.GetResidualCost(this.TopNz[8] + this.LeftNz[8]);
// AC
res.Init(1, 0, proba);
for (int y = 0; y < 4; y++)
{
for (int x = 0; x < 4; x++)
{
int ctx = this.TopNz[x] + this.LeftNz[y];
res.SetCoeffs(rd.YAcLevels.AsSpan((x + (y * 4)) * 16, 16));
r += res.GetResidualCost(ctx);
this.TopNz[x] = this.LeftNz[y] = res.Last >= 0 ? 1 : 0;
}
}
return r;
}
public short[] GetCostModeI4(byte[] modes)
{
int predsWidth = this.predsWidth;
int predIdx = this.predIdx;
int x = this.I4 & 3;
int y = this.I4 >> 2;
int left = x == 0 ? this.Preds[predIdx + (y * predsWidth) - 1] : modes[this.I4 - 1];
int top = y == 0 ? this.Preds[predIdx - predsWidth + x] : modes[this.I4 - 4];
return WebpLookupTables.Vp8FixedCostsI4[top, left];
}
public int GetCostLuma4(short[] levels, Vp8EncProba proba)
{
int x = this.I4 & 3;
int y = this.I4 >> 2;
var res = new Vp8Residual();
int r = 0;
res.Init(0, 3, proba);
int ctx = this.TopNz[x] + this.LeftNz[y];
res.SetCoeffs(levels);
r += res.GetResidualCost(ctx);
return r;
}
public int GetCostUv(Vp8ModeScore rd, Vp8EncProba proba)
{
var res = new Vp8Residual();
int r = 0;
// re-import the non-zero context.
this.NzToBytes();
res.Init(0, 2, proba);
for (int ch = 0; ch <= 2; ch += 2)
{
for (int y = 0; y < 2; y++)
{
for (int x = 0; x < 2; x++)
{
int ctx = this.TopNz[4 + ch + x] + this.LeftNz[4 + ch + y];
res.SetCoeffs(rd.UvLevels.AsSpan(((ch * 2) + x + (y * 2)) * 16, 16));
r += res.GetResidualCost(ctx);
this.TopNz[4 + ch + x] = this.LeftNz[4 + ch + y] = res.Last >= 0 ? 1 : 0;
}
}
}
return r;
}
public void SetIntraUvMode(int mode) => this.CurrentMacroBlockInfo.UvMode = mode;
public void SetSkip(bool skip) => this.CurrentMacroBlockInfo.Skip = skip;
public void SetSegment(int segment) => this.CurrentMacroBlockInfo.Segment = segment;
public void StoreDiffusionErrors(Vp8ModeScore rd)
{
for (int ch = 0; ch <= 1; ++ch)
{
Span<sbyte> top = this.TopDerr.AsSpan((this.X * 4) + ch, 2);
Span<sbyte> left = this.LeftDerr.AsSpan(ch, 2);
// restore err1
left[0] = (sbyte)rd.Derr[ch, 0];
// 3/4th of err3
left[1] = (sbyte)((3 * rd.Derr[ch, 2]) >> 2);
// err2
top[0] = (sbyte)rd.Derr[ch, 1];
// 1/4th of err3.
top[1] = (sbyte)(rd.Derr[ch, 2] - left[1]);
}
}
/// <summary>
/// Returns true if iteration is finished.
/// </summary>
/// <returns>True if iterator is finished.</returns>
public bool IsDone() => this.CountDown <= 0;
/// <summary>
/// Go to next macroblock.
/// </summary>
/// <returns>Returns false if not finished.</returns>
public bool Next()
{
if (++this.X == this.mbw)
{
this.SetRow(++this.Y);
}
else
{
this.currentMbIdx++;
this.nzIdx++;
this.predIdx += 4;
this.yTopIdx += 16;
this.uvTopIdx += 16;
}
return --this.CountDown > 0;
}
public void SaveBoundary()
{
int x = this.X;
int y = this.Y;
Span<byte> ySrc = this.YuvOut.AsSpan(YOffEnc);
Span<byte> uvSrc = this.YuvOut.AsSpan(UOffEnc);
if (x < this.mbw - 1)
{
// left
for (int i = 0; i < 16; i++)
{
this.YLeft[i + 1] = ySrc[15 + (i * WebpConstants.Bps)];
}
for (int i = 0; i < 8; i++)
{
this.UvLeft[i + 1] = uvSrc[7 + (i * WebpConstants.Bps)];
this.UvLeft[i + 16 + 1] = uvSrc[15 + (i * WebpConstants.Bps)];
}
// top-left (before 'top'!)
this.YLeft[0] = this.YTop[this.yTopIdx + 15];
this.UvLeft[0] = this.UvTop[this.uvTopIdx + 0 + 7];
this.UvLeft[16] = this.UvTop[this.uvTopIdx + 8 + 7];
}
if (y < this.mbh - 1)
{
// top
ySrc.Slice(15 * WebpConstants.Bps, 16).CopyTo(this.YTop.AsSpan(this.yTopIdx));
uvSrc.Slice(7 * WebpConstants.Bps, 8 + 8).CopyTo(this.UvTop.AsSpan(this.uvTopIdx));
}
}
public bool RotateI4(Span<byte> yuvOut)
{
Span<byte> blk = yuvOut.Slice(WebpLookupTables.Vp8Scan[this.I4]);
Span<byte> top = this.I4Boundary.AsSpan();
int topOffset = this.I4BoundaryIdx;
int i;
// Update the cache with 7 fresh samples.
for (i = 0; i <= 3; i++)
{
top[topOffset - 4 + i] = blk[i + (3 * WebpConstants.Bps)]; // Store future top samples.
}
if ((this.I4 & 3) != 3)
{
// if not on the right sub-blocks #3, #7, #11, #15
for (i = 0; i <= 2; i++)
{
// store future left samples
top[topOffset + i] = blk[3 + ((2 - i) * WebpConstants.Bps)];
}
}
else
{
// else replicate top-right samples, as says the specs.
for (i = 0; i <= 3; i++)
{
top[topOffset + i] = top[topOffset + i + 4];
}
}
// move pointers to next sub-block
++this.I4;
if (this.I4 == 16)
{
// we're done
return false;
}
this.I4BoundaryIdx = this.vp8TopLeftI4[this.I4];
return true;
}
public void ResetAfterSkip()
{
if (this.CurrentMacroBlockInfo.MacroBlockType == Vp8MacroBlockType.I16X16)
{
// Reset all predictors.
this.Nz[this.nzIdx] = 0;
this.LeftNz[8] = 0;
}
else
{
// Preserve the dc_nz bit.
this.Nz[this.nzIdx] &= 1 << 24;
}
}
public void MakeLuma16Preds()
{
Span<byte> left = this.X != 0 ? this.YLeft.AsSpan() : null;
Span<byte> top = this.Y != 0 ? this.YTop.AsSpan(this.yTopIdx) : null;
Vp8Encoding.EncPredLuma16(this.YuvP, left, top);
}
public void MakeChroma8Preds()
{
Span<byte> left = this.X != 0 ? this.UvLeft.AsSpan() : null;
Span<byte> top = this.Y != 0 ? this.UvTop.AsSpan(this.uvTopIdx) : null;
Vp8Encoding.EncPredChroma8(this.YuvP, left, top);
}
public void MakeIntra4Preds() => Vp8Encoding.EncPredLuma4(this.YuvP, this.I4Boundary, this.I4BoundaryIdx);
public void SwapOut()
{
byte[] tmp = this.YuvOut;
this.YuvOut = this.YuvOut2;
this.YuvOut2 = tmp;
}
public void NzToBytes()
{
Span<uint> nz = this.Nz.AsSpan();
uint lnz = nz[this.nzIdx - 1];
uint tnz = nz[this.nzIdx];
Span<int> topNz = this.TopNz;
Span<int> leftNz = this.LeftNz;
// Top-Y
topNz[0] = this.Bit(tnz, 12);
topNz[1] = this.Bit(tnz, 13);
topNz[2] = this.Bit(tnz, 14);
topNz[3] = this.Bit(tnz, 15);
// Top-U
topNz[4] = this.Bit(tnz, 18);
topNz[5] = this.Bit(tnz, 19);
// Top-V
topNz[6] = this.Bit(tnz, 22);
topNz[7] = this.Bit(tnz, 23);
// DC
topNz[8] = this.Bit(tnz, 24);
// left-Y
leftNz[0] = this.Bit(lnz, 3);
leftNz[1] = this.Bit(lnz, 7);
leftNz[2] = this.Bit(lnz, 11);
leftNz[3] = this.Bit(lnz, 15);
// left-U
leftNz[4] = this.Bit(lnz, 17);
leftNz[5] = this.Bit(lnz, 19);
// left-V
leftNz[6] = this.Bit(lnz, 21);
leftNz[7] = this.Bit(lnz, 23);
// left-DC is special, iterated separately.
}
public void BytesToNz()
{
uint nz = 0;
int[] topNz = this.TopNz;
int[] leftNz = this.LeftNz;
// top
nz |= (uint)((topNz[0] << 12) | (topNz[1] << 13));
nz |= (uint)((topNz[2] << 14) | (topNz[3] << 15));
nz |= (uint)((topNz[4] << 18) | (topNz[5] << 19));
nz |= (uint)((topNz[6] << 22) | (topNz[7] << 23));
nz |= (uint)(topNz[8] << 24); // we propagate the top bit, esp. for intra4
// left
nz |= (uint)((leftNz[0] << 3) | (leftNz[1] << 7));
nz |= (uint)(leftNz[2] << 11);
nz |= (uint)((leftNz[4] << 17) | (leftNz[6] << 21));
this.Nz[this.nzIdx] = nz;
}
private void Mean16x4(Span<byte> input, Span<uint> dc)
{
for (int k = 0; k < 4; k++)
{
uint avg = 0;
for (int y = 0; y < 4; y++)
{
for (int x = 0; x < 4; x++)
{
avg += input[x + (y * WebpConstants.Bps)];
}
}
dc[k] = avg;
input = input.Slice(4); // go to next 4x4 block.
}
}
private void ImportBlock(Span<byte> src, int srcStride, Span<byte> dst, int w, int h, int size)
{
int dstIdx = 0;
int srcIdx = 0;
for (int i = 0; i < h; i++)
{
// memcpy(dst, src, w);
src.Slice(srcIdx, w).CopyTo(dst.Slice(dstIdx));
if (w < size)
{
// memset(dst + w, dst[w - 1], size - w);
dst.Slice(dstIdx + w, size - w).Fill(dst[dstIdx + w - 1]);
}
dstIdx += WebpConstants.Bps;
srcIdx += srcStride;
}
for (int i = h; i < size; i++)
{
// memcpy(dst, dst - BPS, size);
dst.Slice(dstIdx - WebpConstants.Bps, size).CopyTo(dst.Slice(dstIdx));
dstIdx += WebpConstants.Bps;
}
}
private void ImportLine(Span<byte> src, int srcStride, Span<byte> dst, int len, int totalLen)
{
int i;
int srcIdx = 0;
for (i = 0; i < len; i++)
{
dst[i] = src[srcIdx];
srcIdx += srcStride;
}
for (; i < totalLen; i++)
{
dst[i] = dst[len - 1];
}
}
/// <summary>
/// Restart a scan.
/// </summary>
private void Reset()
{
this.SetRow(0);
this.SetCountDown(this.mbw * this.mbh);
this.InitTop();
Array.Clear(this.BitCount, 0, this.BitCount.Length);
}
/// <summary>
/// Reset iterator position to row 'y'.
/// </summary>
/// <param name="y">The y position.</param>
private void SetRow(int y)
{
this.X = 0;
this.Y = y;
this.currentMbIdx = y * this.mbw;
this.nzIdx = 1; // note: in reference source nz starts at -1.
this.yTopIdx = 0;
this.uvTopIdx = 0;
this.predIdx = this.predsWidth + (y * 4 * this.predsWidth);
this.InitLeft();
}
private void InitLeft()
{
Span<byte> yLeft = this.YLeft.AsSpan();
Span<byte> uLeft = this.UvLeft.AsSpan(0, 16);
Span<byte> vLeft = this.UvLeft.AsSpan(16, 16);
byte val = (byte)(this.Y > 0 ? 129 : 127);
yLeft[0] = val;
uLeft[0] = val;
vLeft[0] = val;
yLeft.Slice(1, 16).Fill(129);
uLeft.Slice(1, 8).Fill(129);
vLeft.Slice(1, 8).Fill(129);
this.LeftNz[8] = 0;
this.LeftDerr.AsSpan().Fill(0);
}
private void InitTop()
{
int topSize = this.mbw * 16;
this.YTop.AsSpan(0, topSize).Fill(127);
this.UvTop.AsSpan().Fill(127);
this.Nz.AsSpan().Fill(0);
int predsW = (4 * this.mbw) + 1;
int predsH = (4 * this.mbh) + 1;
int predsSize = predsW * predsH;
this.Preds.AsSpan(predsSize + this.predsWidth, this.mbw).Fill(0);
this.TopDerr.AsSpan().Fill(0);
}
private int Bit(uint nz, int n) => (nz & (1 << n)) != 0 ? 1 : 0;
/// <summary>
/// Set count down.
/// </summary>
/// <param name="countDown">Number of iterations to go.</param>
private void SetCountDown(int countDown) => this.CountDown = countDown;
}
}

265
src/ImageSharp/Formats/Webp/Lossy/Vp8EncProba.cs

@ -0,0 +1,265 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal class Vp8EncProba
{
/// <summary>
/// Last (inclusive) level with variable cost.
/// </summary>
private const int MaxVariableLevel = 67;
/// <summary>
/// Value below which using skipProba is OK.
/// </summary>
private const int SkipProbaThreshold = 250;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8EncProba"/> class.
/// </summary>
public Vp8EncProba()
{
this.Dirty = true;
this.UseSkipProba = false;
this.Segments = new byte[3];
this.Coeffs = new Vp8BandProbas[WebpConstants.NumTypes][];
for (int i = 0; i < this.Coeffs.Length; i++)
{
this.Coeffs[i] = new Vp8BandProbas[WebpConstants.NumBands];
for (int j = 0; j < this.Coeffs[i].Length; j++)
{
this.Coeffs[i][j] = new Vp8BandProbas();
}
}
this.Stats = new Vp8Stats[WebpConstants.NumTypes][];
for (int i = 0; i < this.Coeffs.Length; i++)
{
this.Stats[i] = new Vp8Stats[WebpConstants.NumBands];
for (int j = 0; j < this.Stats[i].Length; j++)
{
this.Stats[i][j] = new Vp8Stats();
}
}
this.LevelCost = new Vp8Costs[WebpConstants.NumTypes][];
for (int i = 0; i < this.LevelCost.Length; i++)
{
this.LevelCost[i] = new Vp8Costs[WebpConstants.NumBands];
for (int j = 0; j < this.LevelCost[i].Length; j++)
{
this.LevelCost[i][j] = new Vp8Costs();
}
}
this.RemappedCosts = new Vp8Costs[WebpConstants.NumTypes][];
for (int i = 0; i < this.RemappedCosts.Length; i++)
{
this.RemappedCosts[i] = new Vp8Costs[16];
for (int j = 0; j < this.RemappedCosts[i].Length; j++)
{
this.RemappedCosts[i][j] = new Vp8Costs();
}
}
// Initialize with default probabilities.
this.Segments.AsSpan().Fill(255);
for (int t = 0; t < WebpConstants.NumTypes; ++t)
{
for (int b = 0; b < WebpConstants.NumBands; ++b)
{
for (int c = 0; c < WebpConstants.NumCtx; ++c)
{
Vp8ProbaArray dst = this.Coeffs[t][b].Probabilities[c];
for (int p = 0; p < WebpConstants.NumProbas; ++p)
{
dst.Probabilities[p] = WebpLookupTables.DefaultCoeffsProba[t, b, c, p];
}
}
}
}
}
/// <summary>
/// Gets the probabilities for segment tree.
/// </summary>
public byte[] Segments { get; }
/// <summary>
/// Gets or sets the final probability of being skipped.
/// </summary>
public byte SkipProba { get; set; }
/// <summary>
/// Gets or sets a value indicating whether to use the skip probability.
/// </summary>
public bool UseSkipProba { get; set; }
public Vp8BandProbas[][] Coeffs { get; }
public Vp8Stats[][] Stats { get; }
public Vp8Costs[][] LevelCost { get; }
public Vp8Costs[][] RemappedCosts { get; }
/// <summary>
/// Gets or sets the number of skipped blocks.
/// </summary>
public int NbSkip { get; set; }
/// <summary>
/// Gets or sets a value indicating whether CalculateLevelCosts() needs to be called.
/// </summary>
public bool Dirty { get; set; }
public void CalculateLevelCosts()
{
if (!this.Dirty)
{
return; // Nothing to do.
}
for (int ctype = 0; ctype < WebpConstants.NumTypes; ++ctype)
{
for (int band = 0; band < WebpConstants.NumBands; ++band)
{
for (int ctx = 0; ctx < WebpConstants.NumCtx; ++ctx)
{
Vp8ProbaArray p = this.Coeffs[ctype][band].Probabilities[ctx];
Vp8CostArray table = this.LevelCost[ctype][band].Costs[ctx];
int cost0 = ctx > 0 ? LossyUtils.Vp8BitCost(1, p.Probabilities[0]) : 0;
int costBase = LossyUtils.Vp8BitCost(1, p.Probabilities[1]) + cost0;
int v;
table.Costs[0] = (ushort)(LossyUtils.Vp8BitCost(0, p.Probabilities[1]) + cost0);
for (v = 1; v <= MaxVariableLevel; ++v)
{
table.Costs[v] = (ushort)(costBase + VariableLevelCost(v, p.Probabilities));
}
// Starting at level 67 and up, the variable part of the cost is actually constant
}
}
for (int n = 0; n < 16; ++n)
{
for (int ctx = 0; ctx < WebpConstants.NumCtx; ++ctx)
{
Vp8CostArray dst = this.RemappedCosts[ctype][n].Costs[ctx];
Vp8CostArray src = this.LevelCost[ctype][WebpConstants.Vp8EncBands[n]].Costs[ctx];
src.Costs.CopyTo(dst.Costs.AsSpan());
}
}
}
this.Dirty = false;
}
public int FinalizeTokenProbas()
{
bool hasChanged = false;
int size = 0;
for (int t = 0; t < WebpConstants.NumTypes; ++t)
{
for (int b = 0; b < WebpConstants.NumBands; ++b)
{
for (int c = 0; c < WebpConstants.NumCtx; ++c)
{
for (int p = 0; p < WebpConstants.NumProbas; ++p)
{
uint stats = this.Stats[t][b].Stats[c].Stats[p];
int nb = (int)((stats >> 0) & 0xffff);
int total = (int)((stats >> 16) & 0xffff);
int updateProba = WebpLookupTables.CoeffsUpdateProba[t, b, c, p];
int oldP = WebpLookupTables.DefaultCoeffsProba[t, b, c, p];
int newP = CalcTokenProba(nb, total);
int oldCost = BranchCost(nb, total, oldP) + LossyUtils.Vp8BitCost(0, (byte)updateProba);
int newCost = BranchCost(nb, total, newP) + LossyUtils.Vp8BitCost(1, (byte)updateProba) + (8 * 256);
bool useNewP = oldCost > newCost;
size += LossyUtils.Vp8BitCost(useNewP ? 1 : 0, (byte)updateProba);
if (useNewP)
{
// Only use proba that seem meaningful enough.
this.Coeffs[t][b].Probabilities[c].Probabilities[p] = (byte)newP;
hasChanged |= newP != oldP;
size += 8 * 256;
}
else
{
this.Coeffs[t][b].Probabilities[c].Probabilities[p] = (byte)oldP;
}
}
}
}
}
this.Dirty = hasChanged;
return size;
}
public int FinalizeSkipProba(int mbw, int mbh)
{
int nbMbs = mbw * mbh;
int nbEvents = this.NbSkip;
this.SkipProba = (byte)CalcSkipProba(nbEvents, nbMbs);
this.UseSkipProba = this.SkipProba < SkipProbaThreshold;
int size = 256;
if (this.UseSkipProba)
{
size += (nbEvents * LossyUtils.Vp8BitCost(1, this.SkipProba)) + ((nbMbs - nbEvents) * LossyUtils.Vp8BitCost(0, this.SkipProba));
size += 8 * 256; // cost of signaling the skipProba itself.
}
return size;
}
public void ResetTokenStats()
{
for (int t = 0; t < WebpConstants.NumTypes; ++t)
{
for (int b = 0; b < WebpConstants.NumBands; ++b)
{
for (int c = 0; c < WebpConstants.NumCtx; ++c)
{
for (int p = 0; p < WebpConstants.NumProbas; ++p)
{
this.Stats[t][b].Stats[c].Stats[p] = 0;
}
}
}
}
}
private static int CalcSkipProba(long nb, long total) => (int)(total != 0 ? (total - nb) * 255 / total : 255);
private static int VariableLevelCost(int level, Span<byte> probas)
{
int pattern = WebpLookupTables.Vp8LevelCodes[level - 1][0];
int bits = WebpLookupTables.Vp8LevelCodes[level - 1][1];
int cost = 0;
for (int i = 2; pattern != 0; i++)
{
if ((pattern & 1) != 0)
{
cost += LossyUtils.Vp8BitCost(bits & 1, probas[i]);
}
bits >>= 1;
pattern >>= 1;
}
return cost;
}
// Collect statistics and deduce probabilities for next coding pass.
// Return the total bit-cost for coding the probability updates.
private static int CalcTokenProba(int nb, int total) => nb != 0 ? (255 - (nb * 255 / total)) : 255;
// Cost of coding 'nb' 1's and 'total-nb' 0's using 'proba' probability.
private static int BranchCost(int nb, int total, int proba) => (nb * LossyUtils.Vp8BitCost(1, (byte)proba)) + ((total - nb) * LossyUtils.Vp8BitCost(0, (byte)proba));
}
}

34
src/ImageSharp/Formats/Webp/Lossy/Vp8EncSegmentHeader.cs

@ -0,0 +1,34 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal class Vp8EncSegmentHeader
{
/// <summary>
/// Initializes a new instance of the <see cref="Vp8EncSegmentHeader"/> class.
/// </summary>
/// <param name="numSegments">Number of segments.</param>
public Vp8EncSegmentHeader(int numSegments)
{
this.NumSegments = numSegments;
this.UpdateMap = this.NumSegments > 1;
this.Size = 0;
}
/// <summary>
/// Gets the actual number of segments. 1 segment only = unused.
/// </summary>
public int NumSegments { get; }
/// <summary>
/// Gets or sets a value indicating whether to update the segment map or not. Must be false if there's only 1 segment.
/// </summary>
public bool UpdateMap { get; set; }
/// <summary>
/// Gets or sets the bit-cost for transmitting the segment map.
/// </summary>
public int Size { get; set; }
}
}

1102
src/ImageSharp/Formats/Webp/Lossy/Vp8Encoder.cs

File diff suppressed because it is too large

655
src/ImageSharp/Formats/Webp/Lossy/Vp8Encoding.cs

@ -0,0 +1,655 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Buffers.Binary;
using System.Runtime.CompilerServices;
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Methods for encoding a VP8 frame.
/// </summary>
internal static class Vp8Encoding
{
private const int KC1 = 20091 + (1 << 16);
private const int KC2 = 35468;
private static readonly byte[] Clip1 = new byte[255 + 510 + 1]; // clips [-255,510] to [0,255]
private const int I16DC16 = 0 * 16 * WebpConstants.Bps;
private const int I16TM16 = I16DC16 + 16;
private const int I16VE16 = 1 * 16 * WebpConstants.Bps;
private const int I16HE16 = I16VE16 + 16;
private const int C8DC8 = 2 * 16 * WebpConstants.Bps;
private const int C8TM8 = C8DC8 + (1 * 16);
private const int C8VE8 = (2 * 16 * WebpConstants.Bps) + (8 * WebpConstants.Bps);
private const int C8HE8 = C8VE8 + (1 * 16);
public static readonly int[] Vp8I16ModeOffsets = { I16DC16, I16TM16, I16VE16, I16HE16 };
public static readonly int[] Vp8UvModeOffsets = { C8DC8, C8TM8, C8VE8, C8HE8 };
private const int I4DC4 = (3 * 16 * WebpConstants.Bps) + 0;
private const int I4TM4 = I4DC4 + 4;
private const int I4VE4 = I4DC4 + 8;
private const int I4HE4 = I4DC4 + 12;
private const int I4RD4 = I4DC4 + 16;
private const int I4VR4 = I4DC4 + 20;
private const int I4LD4 = I4DC4 + 24;
private const int I4VL4 = I4DC4 + 28;
private const int I4HD4 = (3 * 16 * WebpConstants.Bps) + (4 * WebpConstants.Bps);
private const int I4HU4 = I4HD4 + 4;
public static readonly int[] Vp8I4ModeOffsets = { I4DC4, I4TM4, I4VE4, I4HE4, I4RD4, I4VR4, I4LD4, I4VL4, I4HD4, I4HU4 };
static Vp8Encoding()
{
for (int i = -255; i <= 255 + 255; i++)
{
Clip1[255 + i] = Clip8b(i);
}
}
public static void ITransform(Span<byte> reference, Span<short> input, Span<byte> dst, bool doTwo)
{
ITransformOne(reference, input, dst);
if (doTwo)
{
ITransformOne(reference.Slice(4), input.Slice(16), dst.Slice(4));
}
}
public static void ITransformOne(Span<byte> reference, Span<short> input, Span<byte> dst)
{
int i;
#pragma warning disable SA1312 // Variable names should begin with lower-case letter
int[] C = new int[4 * 4];
#pragma warning restore SA1312 // Variable names should begin with lower-case letter
Span<int> tmp = C.AsSpan();
for (i = 0; i < 4; i++)
{
// vertical pass.
int a = input[0] + input[8];
int b = input[0] - input[8];
int c = Mul(input[4], KC2) - Mul(input[12], KC1);
int d = Mul(input[4], KC1) + Mul(input[12], KC2);
tmp[0] = a + d;
tmp[1] = b + c;
tmp[2] = b - c;
tmp[3] = a - d;
tmp = tmp.Slice(4);
input = input.Slice(1);
}
tmp = C.AsSpan();
for (i = 0; i < 4; i++)
{
// horizontal pass.
int dc = tmp[0] + 4;
int a = dc + tmp[8];
int b = dc - tmp[8];
int c = Mul(tmp[4], KC2) - Mul(tmp[12], KC1);
int d = Mul(tmp[4], KC1) + Mul(tmp[12], KC2);
Store(dst, reference, 0, i, a + d);
Store(dst, reference, 1, i, b + c);
Store(dst, reference, 2, i, b - c);
Store(dst, reference, 3, i, a - d);
tmp = tmp.Slice(1);
}
}
public static void FTransform2(Span<byte> src, Span<byte> reference, Span<short> output, Span<short> output2)
{
FTransform(src, reference, output);
FTransform(src.Slice(4), reference.Slice(4), output2);
}
public static void FTransform(Span<byte> src, Span<byte> reference, Span<short> output)
{
int i;
int[] tmp = new int[16];
int srcIdx = 0;
int refIdx = 0;
for (i = 0; i < 4; i++)
{
int d0 = src[srcIdx] - reference[refIdx]; // 9bit dynamic range ([-255,255])
int d1 = src[srcIdx + 1] - reference[refIdx + 1];
int d2 = src[srcIdx + 2] - reference[refIdx + 2];
int d3 = src[srcIdx + 3] - reference[refIdx + 3];
int a0 = d0 + d3; // 10b [-510,510]
int a1 = d1 + d2;
int a2 = d1 - d2;
int a3 = d0 - d3;
tmp[0 + (i * 4)] = (a0 + a1) * 8; // 14b [-8160,8160]
tmp[1 + (i * 4)] = ((a2 * 2217) + (a3 * 5352) + 1812) >> 9; // [-7536,7542]
tmp[2 + (i * 4)] = (a0 - a1) * 8;
tmp[3 + (i * 4)] = ((a3 * 2217) - (a2 * 5352) + 937) >> 9;
srcIdx += WebpConstants.Bps;
refIdx += WebpConstants.Bps;
}
for (i = 0; i < 4; i++)
{
int a0 = tmp[0 + i] + tmp[12 + i]; // 15b
int a1 = tmp[4 + i] + tmp[8 + i];
int a2 = tmp[4 + i] - tmp[8 + i];
int a3 = tmp[0 + i] - tmp[12 + i];
output[0 + i] = (short)((a0 + a1 + 7) >> 4); // 12b
output[4 + i] = (short)((((a2 * 2217) + (a3 * 5352) + 12000) >> 16) + (a3 != 0 ? 1 : 0));
output[8 + i] = (short)((a0 - a1 + 7) >> 4);
output[12 + i] = (short)(((a3 * 2217) - (a2 * 5352) + 51000) >> 16);
}
}
public static void FTransformWht(Span<short> input, Span<short> output)
{
int[] tmp = new int[16];
int i;
int inputIdx = 0;
for (i = 0; i < 4; i++)
{
int a0 = input[inputIdx + (0 * 16)] + input[inputIdx + (2 * 16)]; // 13b
int a1 = input[inputIdx + (1 * 16)] + input[inputIdx + (3 * 16)];
int a2 = input[inputIdx + (1 * 16)] - input[inputIdx + (3 * 16)];
int a3 = input[inputIdx + (0 * 16)] - input[inputIdx + (2 * 16)];
tmp[0 + (i * 4)] = a0 + a1; // 14b
tmp[1 + (i * 4)] = a3 + a2;
tmp[2 + (i * 4)] = a3 - a2;
tmp[3 + (i * 4)] = a0 - a1;
inputIdx += 64;
}
for (i = 0; i < 4; i++)
{
int a0 = tmp[0 + i] + tmp[8 + i]; // 15b
int a1 = tmp[4 + i] + tmp[12 + i];
int a2 = tmp[4 + i] - tmp[12 + i];
int a3 = tmp[0 + i] - tmp[8 + i];
int b0 = a0 + a1; // 16b
int b1 = a3 + a2;
int b2 = a3 - a2;
int b3 = a0 - a1;
output[0 + i] = (short)(b0 >> 1); // 15b
output[4 + i] = (short)(b1 >> 1);
output[8 + i] = (short)(b2 >> 1);
output[12 + i] = (short)(b3 >> 1);
}
}
// luma 16x16 prediction (paragraph 12.3).
public static void EncPredLuma16(Span<byte> dst, Span<byte> left, Span<byte> top)
{
DcMode(dst.Slice(I16DC16), left, top, 16, 16, 5);
VerticalPred(dst.Slice(I16VE16), top, 16);
HorizontalPred(dst.Slice(I16HE16), left, 16);
TrueMotion(dst.Slice(I16TM16), left, top, 16);
}
// Chroma 8x8 prediction (paragraph 12.2).
public static void EncPredChroma8(Span<byte> dst, Span<byte> left, Span<byte> top)
{
// U block.
DcMode(dst.Slice(C8DC8), left, top, 8, 8, 4);
VerticalPred(dst.Slice(C8VE8), top, 8);
HorizontalPred(dst.Slice(C8HE8), left, 8);
TrueMotion(dst.Slice(C8TM8), left, top, 8);
// V block.
dst = dst.Slice(8);
if (top != null)
{
top = top.Slice(8);
}
if (left != null)
{
left = left.Slice(16);
}
DcMode(dst.Slice(C8DC8), left, top, 8, 8, 4);
VerticalPred(dst.Slice(C8VE8), top, 8);
HorizontalPred(dst.Slice(C8HE8), left, 8);
TrueMotion(dst.Slice(C8TM8), left, top, 8);
}
// Left samples are top[-5 .. -2], top_left is top[-1], top are
// located at top[0..3], and top right is top[4..7]
public static void EncPredLuma4(Span<byte> dst, Span<byte> top, int topOffset)
{
Dc4(dst.Slice(I4DC4), top, topOffset);
Tm4(dst.Slice(I4TM4), top, topOffset);
Ve4(dst.Slice(I4VE4), top, topOffset);
He4(dst.Slice(I4HE4), top, topOffset);
Rd4(dst.Slice(I4RD4), top, topOffset);
Vr4(dst.Slice(I4VR4), top, topOffset);
Ld4(dst.Slice(I4LD4), top, topOffset);
Vl4(dst.Slice(I4VL4), top, topOffset);
Hd4(dst.Slice(I4HD4), top, topOffset);
Hu4(dst.Slice(I4HU4), top, topOffset);
}
private static void VerticalPred(Span<byte> dst, Span<byte> top, int size)
{
if (top != null)
{
for (int j = 0; j < size; j++)
{
top.Slice(0, size).CopyTo(dst.Slice(j * WebpConstants.Bps));
}
}
else
{
Fill(dst, 127, size);
}
}
public static void HorizontalPred(Span<byte> dst, Span<byte> left, int size)
{
if (left != null)
{
left = left.Slice(1); // in the reference implementation, left starts at - 1.
for (int j = 0; j < size; j++)
{
dst.Slice(j * WebpConstants.Bps, size).Fill(left[j]);
}
}
else
{
Fill(dst, 129, size);
}
}
public static void TrueMotion(Span<byte> dst, Span<byte> left, Span<byte> top, int size)
{
if (left != null)
{
if (top != null)
{
Span<byte> clip = Clip1.AsSpan(255 - left[0]); // left [0] instead of left[-1], original left starts at -1
for (int y = 0; y < size; y++)
{
Span<byte> clipTable = clip.Slice(left[y + 1]); // left[y]
for (int x = 0; x < size; x++)
{
dst[x] = clipTable[top[x]];
}
dst = dst.Slice(WebpConstants.Bps);
}
}
else
{
HorizontalPred(dst, left, size);
}
}
else
{
// true motion without left samples (hence: with default 129 value)
// is equivalent to VE prediction where you just copy the top samples.
// Note that if top samples are not available, the default value is
// then 129, and not 127 as in the VerticalPred case.
if (top != null)
{
VerticalPred(dst, top, size);
}
else
{
Fill(dst, 129, size);
}
}
}
private static void DcMode(Span<byte> dst, Span<byte> left, Span<byte> top, int size, int round, int shift)
{
int dc = 0;
int j;
if (top != null)
{
for (j = 0; j < size; j++)
{
dc += top[j];
}
if (left != null)
{
// top and left present.
left = left.Slice(1); // in the reference implementation, left starts at -1.
for (j = 0; j < size; j++)
{
dc += left[j];
}
}
else
{
// top, but no left.
dc += dc;
}
dc = (dc + round) >> shift;
}
else if (left != null)
{
// left but no top.
left = left.Slice(1); // in the reference implementation, left starts at -1.
for (j = 0; j < size; j++)
{
dc += left[j];
}
dc += dc;
dc = (dc + round) >> shift;
}
else
{
// no top, no left, nothing.
dc = 0x80;
}
Fill(dst, dc, size);
}
private static void Dc4(Span<byte> dst, Span<byte> top, int topOffset)
{
uint dc = 4;
int i;
for (i = 0; i < 4; i++)
{
dc += (uint)(top[topOffset + i] + top[topOffset - 5 + i]);
}
Fill(dst, (int)(dc >> 3), 4);
}
private static void Tm4(Span<byte> dst, Span<byte> top, int topOffset)
{
Span<byte> clip = Clip1.AsSpan(255 - top[topOffset - 1]);
for (int y = 0; y < 4; y++)
{
Span<byte> clipTable = clip.Slice(top[topOffset - 2 - y]);
for (int x = 0; x < 4; x++)
{
dst[x] = clipTable[top[topOffset + x]];
}
dst = dst.Slice(WebpConstants.Bps);
}
}
private static void Ve4(Span<byte> dst, Span<byte> top, int topOffset)
{
// vertical
byte[] vals =
{
LossyUtils.Avg3(top[topOffset - 1], top[topOffset], top[topOffset + 1]),
LossyUtils.Avg3(top[topOffset], top[topOffset + 1], top[topOffset + 2]),
LossyUtils.Avg3(top[topOffset + 1], top[topOffset + 2], top[topOffset + 3]),
LossyUtils.Avg3(top[topOffset + 2], top[topOffset + 3], top[topOffset + 4])
};
for (int i = 0; i < 4; i++)
{
vals.AsSpan().CopyTo(dst.Slice(i * WebpConstants.Bps));
}
}
private static void He4(Span<byte> dst, Span<byte> top, int topOffset)
{
// horizontal
byte x = top[topOffset - 1];
byte i = top[topOffset - 2];
byte j = top[topOffset - 3];
byte k = top[topOffset - 4];
byte l = top[topOffset - 5];
uint val = 0x01010101U * LossyUtils.Avg3(x, i, j);
BinaryPrimitives.WriteUInt32BigEndian(dst, val);
val = 0x01010101U * LossyUtils.Avg3(i, j, k);
BinaryPrimitives.WriteUInt32BigEndian(dst.Slice(1 * WebpConstants.Bps), val);
val = 0x01010101U * LossyUtils.Avg3(j, k, l);
BinaryPrimitives.WriteUInt32BigEndian(dst.Slice(2 * WebpConstants.Bps), val);
val = 0x01010101U * LossyUtils.Avg3(k, l, l);
BinaryPrimitives.WriteUInt32BigEndian(dst.Slice(3 * WebpConstants.Bps), val);
}
private static void Rd4(Span<byte> dst, Span<byte> top, int topOffset)
{
byte x = top[topOffset - 1];
byte i = top[topOffset - 2];
byte j = top[topOffset - 3];
byte k = top[topOffset - 4];
byte l = top[topOffset - 5];
byte a = top[topOffset];
byte b = top[topOffset + 1];
byte c = top[topOffset + 2];
byte d = top[topOffset + 3];
LossyUtils.Dst(dst, 0, 3, LossyUtils.Avg3(j, k, l));
byte ijk = LossyUtils.Avg3(i, j, k);
LossyUtils.Dst(dst, 0, 2, ijk);
LossyUtils.Dst(dst, 1, 3, ijk);
byte xij = LossyUtils.Avg3(x, i, j);
LossyUtils.Dst(dst, 0, 1, xij);
LossyUtils.Dst(dst, 1, 2, xij);
LossyUtils.Dst(dst, 2, 3, xij);
byte axi = LossyUtils.Avg3(a, x, i);
LossyUtils.Dst(dst, 0, 0, axi);
LossyUtils.Dst(dst, 1, 1, axi);
LossyUtils.Dst(dst, 2, 2, axi);
LossyUtils.Dst(dst, 3, 3, axi);
byte bax = LossyUtils.Avg3(b, a, x);
LossyUtils.Dst(dst, 1, 0, bax);
LossyUtils.Dst(dst, 2, 1, bax);
LossyUtils.Dst(dst, 3, 2, bax);
byte cba = LossyUtils.Avg3(c, b, a);
LossyUtils.Dst(dst, 2, 0, cba);
LossyUtils.Dst(dst, 3, 1, cba);
LossyUtils.Dst(dst, 3, 0, LossyUtils.Avg3(d, c, b));
}
private static void Vr4(Span<byte> dst, Span<byte> top, int topOffset)
{
byte x = top[topOffset - 1];
byte i = top[topOffset - 2];
byte j = top[topOffset - 3];
byte k = top[topOffset - 4];
byte a = top[topOffset];
byte b = top[topOffset + 1];
byte c = top[topOffset + 2];
byte d = top[topOffset + 3];
byte xa = LossyUtils.Avg2(x, a);
LossyUtils.Dst(dst, 0, 0, xa);
LossyUtils.Dst(dst, 1, 2, xa);
byte ab = LossyUtils.Avg2(a, b);
LossyUtils.Dst(dst, 1, 0, ab);
LossyUtils.Dst(dst, 2, 2, ab);
byte bc = LossyUtils.Avg2(b, c);
LossyUtils.Dst(dst, 2, 0, bc);
LossyUtils.Dst(dst, 3, 2, bc);
LossyUtils.Dst(dst, 3, 0, LossyUtils.Avg2(c, d));
LossyUtils.Dst(dst, 0, 3, LossyUtils.Avg3(k, j, i));
LossyUtils.Dst(dst, 0, 2, LossyUtils.Avg3(j, i, x));
byte ixa = LossyUtils.Avg3(i, x, a);
LossyUtils.Dst(dst, 0, 1, ixa);
LossyUtils.Dst(dst, 1, 3, ixa);
byte xab = LossyUtils.Avg3(x, a, b);
LossyUtils.Dst(dst, 1, 1, xab);
LossyUtils.Dst(dst, 2, 3, xab);
byte abc = LossyUtils.Avg3(a, b, c);
LossyUtils.Dst(dst, 2, 1, abc);
LossyUtils.Dst(dst, 3, 3, abc);
LossyUtils.Dst(dst, 3, 1, LossyUtils.Avg3(b, c, d));
}
private static void Ld4(Span<byte> dst, Span<byte> top, int topOffset)
{
byte a = top[topOffset + 0];
byte b = top[topOffset + 1];
byte c = top[topOffset + 2];
byte d = top[topOffset + 3];
byte e = top[topOffset + 4];
byte f = top[topOffset + 5];
byte g = top[topOffset + 6];
byte h = top[topOffset + 7];
LossyUtils.Dst(dst, 0, 0, LossyUtils.Avg3(a, b, c));
byte bcd = LossyUtils.Avg3(b, c, d);
LossyUtils.Dst(dst, 1, 0, bcd);
LossyUtils.Dst(dst, 0, 1, bcd);
byte cde = LossyUtils.Avg3(c, d, e);
LossyUtils.Dst(dst, 2, 0, cde);
LossyUtils.Dst(dst, 1, 1, cde);
LossyUtils.Dst(dst, 0, 2, cde);
byte def = LossyUtils.Avg3(d, e, f);
LossyUtils.Dst(dst, 3, 0, def);
LossyUtils.Dst(dst, 2, 1, def);
LossyUtils.Dst(dst, 1, 2, def);
LossyUtils.Dst(dst, 0, 3, def);
byte efg = LossyUtils.Avg3(e, f, g);
LossyUtils.Dst(dst, 3, 1, efg);
LossyUtils.Dst(dst, 2, 2, efg);
LossyUtils.Dst(dst, 1, 3, efg);
byte fgh = LossyUtils.Avg3(f, g, h);
LossyUtils.Dst(dst, 3, 2, fgh);
LossyUtils.Dst(dst, 2, 3, fgh);
LossyUtils.Dst(dst, 3, 3, LossyUtils.Avg3(g, h, h));
}
private static void Vl4(Span<byte> dst, Span<byte> top, int topOffset)
{
byte a = top[topOffset + 0];
byte b = top[topOffset + 1];
byte c = top[topOffset + 2];
byte d = top[topOffset + 3];
byte e = top[topOffset + 4];
byte f = top[topOffset + 5];
byte g = top[topOffset + 6];
byte h = top[topOffset + 7];
LossyUtils.Dst(dst, 0, 0, LossyUtils.Avg2(a, b));
byte bc = LossyUtils.Avg2(b, c);
LossyUtils.Dst(dst, 1, 0, bc);
LossyUtils.Dst(dst, 0, 2, bc);
byte cd = LossyUtils.Avg2(c, d);
LossyUtils.Dst(dst, 2, 0, cd);
LossyUtils.Dst(dst, 1, 2, cd);
byte de = LossyUtils.Avg2(d, e);
LossyUtils.Dst(dst, 3, 0, de);
LossyUtils.Dst(dst, 2, 2, de);
LossyUtils.Dst(dst, 0, 1, LossyUtils.Avg3(a, b, c));
byte bcd = LossyUtils.Avg3(b, c, d);
LossyUtils.Dst(dst, 1, 1, bcd);
LossyUtils.Dst(dst, 0, 3, bcd);
byte cde = LossyUtils.Avg3(c, d, e);
LossyUtils.Dst(dst, 2, 1, cde);
LossyUtils.Dst(dst, 1, 3, cde);
byte def = LossyUtils.Avg3(d, e, f);
LossyUtils.Dst(dst, 3, 1, def);
LossyUtils.Dst(dst, 2, 3, def);
LossyUtils.Dst(dst, 3, 2, LossyUtils.Avg3(e, f, g));
LossyUtils.Dst(dst, 3, 3, LossyUtils.Avg3(f, g, h));
}
private static void Hd4(Span<byte> dst, Span<byte> top, int topOffset)
{
byte x = top[topOffset - 1];
byte i = top[topOffset - 2];
byte j = top[topOffset - 3];
byte k = top[topOffset - 4];
byte l = top[topOffset - 5];
byte a = top[topOffset];
byte b = top[topOffset + 1];
byte c = top[topOffset + 2];
byte ix = LossyUtils.Avg2(i, x);
LossyUtils.Dst(dst, 0, 0, ix);
LossyUtils.Dst(dst, 2, 1, ix);
byte ji = LossyUtils.Avg2(j, i);
LossyUtils.Dst(dst, 0, 1, ji);
LossyUtils.Dst(dst, 2, 2, ji);
byte kj = LossyUtils.Avg2(k, j);
LossyUtils.Dst(dst, 0, 2, kj);
LossyUtils.Dst(dst, 2, 3, kj);
LossyUtils.Dst(dst, 0, 3, LossyUtils.Avg2(l, k));
LossyUtils.Dst(dst, 3, 0, LossyUtils.Avg3(a, b, c));
LossyUtils.Dst(dst, 2, 0, LossyUtils.Avg3(x, a, b));
byte ixa = LossyUtils.Avg3(i, x, a);
LossyUtils.Dst(dst, 1, 0, ixa);
LossyUtils.Dst(dst, 3, 1, ixa);
byte jix = LossyUtils.Avg3(j, i, x);
LossyUtils.Dst(dst, 1, 1, jix);
LossyUtils.Dst(dst, 3, 2, jix);
byte kji = LossyUtils.Avg3(k, j, i);
LossyUtils.Dst(dst, 1, 2, kji);
LossyUtils.Dst(dst, 3, 3, kji);
LossyUtils.Dst(dst, 1, 3, LossyUtils.Avg3(l, k, j));
}
private static void Hu4(Span<byte> dst, Span<byte> top, int topOffset)
{
byte i = top[topOffset - 2];
byte j = top[topOffset - 3];
byte k = top[topOffset - 4];
byte l = top[topOffset - 5];
LossyUtils.Dst(dst, 0, 0, LossyUtils.Avg2(i, j));
byte jk = LossyUtils.Avg2(j, k);
LossyUtils.Dst(dst, 2, 0, jk);
LossyUtils.Dst(dst, 0, 1, jk);
byte kl = LossyUtils.Avg2(k, l);
LossyUtils.Dst(dst, 2, 1, kl);
LossyUtils.Dst(dst, 0, 2, kl);
LossyUtils.Dst(dst, 1, 0, LossyUtils.Avg3(i, j, k));
byte jkl = LossyUtils.Avg3(j, k, l);
LossyUtils.Dst(dst, 3, 0, jkl);
LossyUtils.Dst(dst, 1, 1, jkl);
byte kll = LossyUtils.Avg3(k, l, l);
LossyUtils.Dst(dst, 3, 1, kll);
LossyUtils.Dst(dst, 1, 2, kll);
LossyUtils.Dst(dst, 3, 2, l);
LossyUtils.Dst(dst, 2, 2, l);
LossyUtils.Dst(dst, 0, 3, l);
LossyUtils.Dst(dst, 1, 3, l);
LossyUtils.Dst(dst, 2, 3, l);
LossyUtils.Dst(dst, 3, 3, l);
}
[MethodImpl(InliningOptions.ShortMethod)]
private static void Fill(Span<byte> dst, int value, int size)
{
for (int j = 0; j < size; j++)
{
dst.Slice(j * WebpConstants.Bps, size).Fill((byte)value);
}
}
[MethodImpl(InliningOptions.ShortMethod)]
private static byte Clip8b(int v) => (v & ~0xff) == 0 ? (byte)v : v < 0 ? (byte)0 : (byte)255;
[MethodImpl(InliningOptions.ShortMethod)]
private static void Store(Span<byte> dst, Span<byte> reference, int x, int y, int v) => dst[x + (y * WebpConstants.Bps)] = LossyUtils.Clip8B(reference[x + (y * WebpConstants.Bps)] + (v >> 3));
[MethodImpl(InliningOptions.ShortMethod)]
private static int Mul(int a, int b) => (a * b) >> 16;
}
}

72
src/ImageSharp/Formats/Webp/Lossy/Vp8FilterHeader.cs

@ -0,0 +1,72 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal class Vp8FilterHeader
{
private const int NumRefLfDeltas = 4;
private const int NumModeLfDeltas = 4;
private int filterLevel;
private int sharpness;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8FilterHeader"/> class.
/// </summary>
public Vp8FilterHeader()
{
this.RefLfDelta = new int[NumRefLfDeltas];
this.ModeLfDelta = new int[NumModeLfDeltas];
}
/// <summary>
/// Gets or sets the loop filter.
/// </summary>
public LoopFilter LoopFilter { get; set; }
/// <summary>
/// Gets or sets the filter level. Valid values are [0..63].
/// </summary>
public int FilterLevel
{
get => this.filterLevel;
set
{
Guard.MustBeBetweenOrEqualTo(value, 0, 63, nameof(this.FilterLevel));
this.filterLevel = value;
}
}
/// <summary>
/// Gets or sets the filter sharpness. Valid values are [0..7].
/// </summary>
public int Sharpness
{
get => this.sharpness;
set
{
Guard.MustBeBetweenOrEqualTo(value, 0, 7, nameof(this.Sharpness));
this.sharpness = value;
}
}
/// <summary>
/// Gets or sets a value indicating whether the filtering type is: 0=complex, 1=simple.
/// </summary>
public bool Simple { get; set; }
/// <summary>
/// Gets or sets delta filter level for i4x4 relative to i16x16.
/// </summary>
public int I4x4LfDelta { get; set; }
public bool UseLfDelta { get; set; }
public int[] RefLfDelta { get; }
public int[] ModeLfDelta { get; }
}
}

83
src/ImageSharp/Formats/Webp/Lossy/Vp8FilterInfo.cs

@ -0,0 +1,83 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Filter information.
/// </summary>
internal class Vp8FilterInfo : IDeepCloneable
{
private byte limit;
private byte innerLevel;
private byte highEdgeVarianceThreshold;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8FilterInfo"/> class.
/// </summary>
public Vp8FilterInfo()
{
}
/// <summary>
/// Initializes a new instance of the <see cref="Vp8FilterInfo"/> class.
/// </summary>
/// <param name="other">The filter info to create a copy from.</param>
public Vp8FilterInfo(Vp8FilterInfo other)
{
this.Limit = other.Limit;
this.HighEdgeVarianceThreshold = other.HighEdgeVarianceThreshold;
this.InnerLevel = other.InnerLevel;
this.UseInnerFiltering = other.UseInnerFiltering;
}
/// <summary>
/// Gets or sets the filter limit in [3..189], or 0 if no filtering.
/// </summary>
public byte Limit
{
get => this.limit;
set
{
Guard.MustBeBetweenOrEqualTo(value, (byte)0, (byte)189, nameof(this.Limit));
this.limit = value;
}
}
/// <summary>
/// Gets or sets the inner limit in [1..63], or 0 if no filtering.
/// </summary>
public byte InnerLevel
{
get => this.innerLevel;
set
{
Guard.MustBeBetweenOrEqualTo(value, (byte)0, (byte)63, nameof(this.InnerLevel));
this.innerLevel = value;
}
}
/// <summary>
/// Gets or sets a value indicating whether to do inner filtering.
/// </summary>
public bool UseInnerFiltering { get; set; }
/// <summary>
/// Gets or sets the high edge variance threshold in [0..2].
/// </summary>
public byte HighEdgeVarianceThreshold
{
get => this.highEdgeVarianceThreshold;
set
{
Guard.MustBeBetweenOrEqualTo(value, (byte)0, (byte)2, nameof(this.HighEdgeVarianceThreshold));
this.highEdgeVarianceThreshold = value;
}
}
/// <inheritdoc/>
public IDeepCloneable DeepClone() => new Vp8FilterInfo(this);
}
}

26
src/ImageSharp/Formats/Webp/Lossy/Vp8FrameHeader.cs

@ -0,0 +1,26 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Vp8 frame header information.
/// </summary>
internal class Vp8FrameHeader
{
/// <summary>
/// Gets or sets a value indicating whether this is a key frame.
/// </summary>
public bool KeyFrame { get; set; }
/// <summary>
/// Gets or sets Vp8 profile [0..3].
/// </summary>
public sbyte Profile { get; set; }
/// <summary>
/// Gets or sets the partition length.
/// </summary>
public uint PartitionLength { get; set; }
}
}

140
src/ImageSharp/Formats/Webp/Lossy/Vp8Histogram.cs

@ -0,0 +1,140 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Runtime.CompilerServices;
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal class Vp8Histogram
{
/// <summary>
/// Size of histogram used by CollectHistogram.
/// </summary>
private const int MaxCoeffThresh = 31;
private int maxValue;
private int lastNonZero;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8Histogram" /> class.
/// </summary>
public Vp8Histogram()
{
this.maxValue = 0;
this.lastNonZero = 1;
}
public int GetAlpha()
{
// 'alpha' will later be clipped to [0..MAX_ALPHA] range, clamping outer
// values which happen to be mostly noise. This leaves the maximum precision
// for handling the useful small values which contribute most.
int maxValue = this.maxValue;
int lastNonZero = this.lastNonZero;
int alpha = maxValue > 1 ? WebpConstants.AlphaScale * lastNonZero / maxValue : 0;
return alpha;
}
public void CollectHistogram(Span<byte> reference, Span<byte> pred, int startBlock, int endBlock)
{
int j;
int[] distribution = new int[MaxCoeffThresh + 1];
for (j = startBlock; j < endBlock; j++)
{
short[] output = new short[16];
this.Vp8FTransform(reference.Slice(WebpLookupTables.Vp8DspScan[j]), pred.Slice(WebpLookupTables.Vp8DspScan[j]), output);
// Convert coefficients to bin.
for (int k = 0; k < 16; ++k)
{
int v = Math.Abs(output[k]) >> 3;
int clippedValue = ClipMax(v, MaxCoeffThresh);
++distribution[clippedValue];
}
}
this.SetHistogramData(distribution);
}
public void Merge(Vp8Histogram other)
{
if (this.maxValue > other.maxValue)
{
other.maxValue = this.maxValue;
}
if (this.lastNonZero > other.lastNonZero)
{
other.lastNonZero = this.lastNonZero;
}
}
private void SetHistogramData(int[] distribution)
{
int maxValue = 0;
int lastNonZero = 1;
for (int k = 0; k <= MaxCoeffThresh; ++k)
{
int value = distribution[k];
if (value > 0)
{
if (value > maxValue)
{
maxValue = value;
}
lastNonZero = k;
}
}
this.maxValue = maxValue;
this.lastNonZero = lastNonZero;
}
private void Vp8FTransform(Span<byte> src, Span<byte> reference, Span<short> output)
{
int i;
int[] tmp = new int[16];
for (i = 0; i < 4; i++)
{
int d0 = src[0] - reference[0]; // 9bit dynamic range ([-255,255])
int d1 = src[1] - reference[1];
int d2 = src[2] - reference[2];
int d3 = src[3] - reference[3];
int a0 = d0 + d3; // 10b [-510,510]
int a1 = d1 + d2;
int a2 = d1 - d2;
int a3 = d0 - d3;
tmp[0 + (i * 4)] = (a0 + a1) * 8; // 14b [-8160,8160]
tmp[1 + (i * 4)] = ((a2 * 2217) + (a3 * 5352) + 1812) >> 9; // [-7536,7542]
tmp[2 + (i * 4)] = (a0 - a1) * 8;
tmp[3 + (i * 4)] = ((a3 * 2217) - (a2 * 5352) + 937) >> 9;
// Do not change the span in the last iteration.
if (i < 3)
{
src = src.Slice(WebpConstants.Bps);
reference = reference.Slice(WebpConstants.Bps);
}
}
for (i = 0; i < 4; i++)
{
int a0 = tmp[0 + i] + tmp[12 + i]; // 15b
int a1 = tmp[4 + i] + tmp[8 + i];
int a2 = tmp[4 + i] - tmp[8 + i];
int a3 = tmp[0 + i] - tmp[12 + i];
output[0 + i] = (short)((a0 + a1 + 7) >> 4); // 12b
output[4 + i] = (short)((((a2 * 2217) + (a3 * 5352) + 12000) >> 16) + (a3 != 0 ? 1 : 0));
output[8 + i] = (short)((a0 - a1 + 7) >> 4);
output[12 + i] = (short)(((a3 * 2217) - (a2 * 5352) + 51000) >> 16);
}
}
[MethodImpl(InliningOptions.ShortMethod)]
private static int ClipMax(int v, int max) => v > max ? max : v;
}
}

68
src/ImageSharp/Formats/Webp/Lossy/Vp8Io.cs

@ -0,0 +1,68 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal ref struct Vp8Io
{
/// <summary>
/// Gets or sets the picture width in pixels (invariable).
/// The actual area passed to put() is stored in <see cref="MbW"/> /> field.
/// </summary>
public int Width { get; set; }
/// <summary>
/// Gets or sets the picture height in pixels (invariable).
/// The actual area passed to put() is stored in <see cref="MbH"/> /> field.
/// </summary>
public int Height { get; set; }
/// <summary>
/// Gets or sets the y-position of the current macroblock.
/// </summary>
public int MbY { get; set; }
/// <summary>
/// Gets or sets number of columns in the sample.
/// </summary>
public int MbW { get; set; }
/// <summary>
/// Gets or sets number of rows in the sample.
/// </summary>
public int MbH { get; set; }
/// <summary>
/// Gets or sets the luma component.
/// </summary>
public Span<byte> Y { get; set; }
/// <summary>
/// Gets or sets the U chroma component.
/// </summary>
public Span<byte> U { get; set; }
/// <summary>
/// Gets or sets the V chroma component.
/// </summary>
public Span<byte> V { get; set; }
/// <summary>
/// Gets or sets the row stride for luma.
/// </summary>
public int YStride { get; set; }
/// <summary>
/// Gets or sets the row stride for chroma.
/// </summary>
public int UvStride { get; set; }
public bool UseScaling { get; set; }
public int ScaledWidth { get; set; }
public int ScaledHeight { get; set; }
}
}

21
src/ImageSharp/Formats/Webp/Lossy/Vp8MacroBlock.cs

@ -0,0 +1,21 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Contextual macroblock information.
/// </summary>
internal class Vp8MacroBlock
{
/// <summary>
/// Gets or sets non-zero AC/DC coeffs (4bit for luma + 4bit for chroma).
/// </summary>
public uint NoneZeroAcDcCoeffs { get; set; }
/// <summary>
/// Gets or sets non-zero DC coeff (1bit).
/// </summary>
public uint NoneZeroDcCoeffs { get; set; }
}
}

66
src/ImageSharp/Formats/Webp/Lossy/Vp8MacroBlockData.cs

@ -0,0 +1,66 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Data needed to reconstruct a macroblock.
/// </summary>
internal class Vp8MacroBlockData
{
/// <summary>
/// Initializes a new instance of the <see cref="Vp8MacroBlockData"/> class.
/// </summary>
public Vp8MacroBlockData()
{
this.Modes = new byte[16];
this.Coeffs = new short[384];
}
/// <summary>
/// Gets or sets the coefficients. 384 coeffs = (16+4+4) * 4*4.
/// </summary>
public short[] Coeffs { get; set; }
/// <summary>
/// Gets or sets a value indicating whether its intra4x4.
/// </summary>
public bool IsI4x4 { get; set; }
/// <summary>
/// Gets the modes. One 16x16 mode (#0) or sixteen 4x4 modes.
/// </summary>
public byte[] Modes { get; }
/// <summary>
/// Gets or sets the chroma prediction mode.
/// </summary>
public byte UvMode { get; set; }
/// <summary>
/// Gets or sets bit-wise info about the content of each sub-4x4 blocks (in decoding order).
/// Each of the 4x4 blocks for y/u/v is associated with a 2b code according to:
/// code=0 -> no coefficient
/// code=1 -> only DC
/// code=2 -> first three coefficients are non-zero
/// code=3 -> more than three coefficients are non-zero
/// This allows to call specialized transform functions.
/// </summary>
public uint NonZeroY { get; set; }
/// <summary>
/// Gets or sets bit-wise info about the content of each sub-4x4 blocks (in decoding order).
/// Each of the 4x4 blocks for y/u/v is associated with a 2b code according to:
/// code=0 -> no coefficient
/// code=1 -> only DC
/// code=2 -> first three coefficients are non-zero
/// code=3 -> more than three coefficients are non-zero
/// This allows to call specialized transform functions.
/// </summary>
public uint NonZeroUv { get; set; }
public bool Skip { get; set; }
public byte Segment { get; set; }
}
}

21
src/ImageSharp/Formats/Webp/Lossy/Vp8MacroBlockInfo.cs

@ -0,0 +1,21 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System.Diagnostics;
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
[DebuggerDisplay("Type: {MacroBlockType}, Alpha: {Alpha}, UvMode: {UvMode}")]
internal class Vp8MacroBlockInfo
{
public Vp8MacroBlockType MacroBlockType { get; set; }
public int UvMode { get; set; }
public bool Skip { get; set; }
public int Segment { get; set; }
public int Alpha { get; set; }
}
}

12
src/ImageSharp/Formats/Webp/Lossy/Vp8MacroBlockType.cs

@ -0,0 +1,12 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal enum Vp8MacroBlockType
{
I4X4 = 0,
I16X16 = 1
}
}

111
src/ImageSharp/Formats/Webp/Lossy/Vp8Matrix.cs

@ -0,0 +1,111 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal class Vp8Matrix
{
private static readonly int[][] BiasMatrices =
{
// [luma-ac,luma-dc,chroma][dc,ac]
new[] { 96, 110 },
new[] { 96, 108 },
new[] { 110, 115 }
};
// Sharpening by (slightly) raising the hi-frequency coeffs.
// Hack-ish but helpful for mid-bitrate range. Use with care.
private static readonly byte[] FreqSharpening = { 0, 30, 60, 90, 30, 60, 90, 90, 60, 90, 90, 90, 90, 90, 90, 90 };
/// <summary>
/// Number of descaling bits for sharpening bias.
/// </summary>
private const int SharpenBits = 11;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8Matrix"/> class.
/// </summary>
public Vp8Matrix()
{
this.Q = new ushort[16];
this.IQ = new ushort[16];
this.Bias = new uint[16];
this.ZThresh = new uint[16];
this.Sharpen = new short[16];
}
/// <summary>
/// Gets the quantizer steps.
/// </summary>
public ushort[] Q { get; }
/// <summary>
/// Gets the reciprocals, fixed point.
/// </summary>
public ushort[] IQ { get; }
/// <summary>
/// Gets the rounding bias.
/// </summary>
public uint[] Bias { get; }
/// <summary>
/// Gets the value below which a coefficient is zeroed.
/// </summary>
public uint[] ZThresh { get; }
/// <summary>
/// Gets the frequency boosters for slight sharpening.
/// </summary>
public short[] Sharpen { get; }
/// <summary>
/// Returns the average quantizer.
/// </summary>
/// <returns>The average quantizer.</returns>
public int Expand(int type)
{
int sum;
int i;
for (i = 0; i < 2; i++)
{
int isAcCoeff = i > 0 ? 1 : 0;
int bias = BiasMatrices[type][isAcCoeff];
this.IQ[i] = (ushort)((1 << WebpConstants.QFix) / this.Q[i]);
this.Bias[i] = (uint)this.BIAS(bias);
// zthresh is the exact value such that QUANTDIV(coeff, iQ, B) is:
// * zero if coeff <= zthresh
// * non-zero if coeff > zthresh
this.ZThresh[i] = ((1 << WebpConstants.QFix) - 1 - this.Bias[i]) / this.IQ[i];
}
for (i = 2; i < 16; i++)
{
this.Q[i] = this.Q[1];
this.IQ[i] = this.IQ[1];
this.Bias[i] = this.Bias[1];
this.ZThresh[i] = this.ZThresh[1];
}
for (sum = 0, i = 0; i < 16; i++)
{
if (type == 0)
{
// We only use sharpening for AC luma coeffs.
this.Sharpen[i] = (short)((FreqSharpening[i] * this.Q[i]) >> SharpenBits);
}
else
{
this.Sharpen[i] = 0;
}
sum += this.Q[i];
}
return (sum + 8) >> 4;
}
private int BIAS(int b) => b << (WebpConstants.QFix - 8);
}
}

128
src/ImageSharp/Formats/Webp/Lossy/Vp8ModeScore.cs

@ -0,0 +1,128 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Class to accumulate score and info during RD-optimization and mode evaluation.
/// </summary>
internal class Vp8ModeScore
{
public const long MaxCost = 0x7fffffffffffffL;
/// <summary>
/// Distortion multiplier (equivalent of lambda).
/// </summary>
private const int RdDistoMult = 256;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8ModeScore"/> class.
/// </summary>
public Vp8ModeScore()
{
this.YDcLevels = new short[16];
this.YAcLevels = new short[16 * 16];
this.UvLevels = new short[(4 + 4) * 16];
this.ModesI4 = new byte[16];
this.Derr = new int[2, 3];
}
/// <summary>
/// Gets or sets the distortion.
/// </summary>
public long D { get; set; }
/// <summary>
/// Gets or sets the spectral distortion.
/// </summary>
public long SD { get; set; }
/// <summary>
/// Gets or sets the header bits.
/// </summary>
public long H { get; set; }
/// <summary>
/// Gets or sets the rate.
/// </summary>
public long R { get; set; }
/// <summary>
/// Gets or sets the score.
/// </summary>
public long Score { get; set; }
/// <summary>
/// Gets the quantized levels for luma-DC.
/// </summary>
public short[] YDcLevels { get; }
/// <summary>
/// Gets the quantized levels for luma-AC.
/// </summary>
public short[] YAcLevels { get; }
/// <summary>
/// Gets the quantized levels for chroma.
/// </summary>
public short[] UvLevels { get; }
/// <summary>
/// Gets or sets the mode number for intra16 prediction.
/// </summary>
public int ModeI16 { get; set; }
/// <summary>
/// Gets the mode numbers for intra4 predictions.
/// </summary>
public byte[] ModesI4 { get; }
/// <summary>
/// Gets or sets the mode number of chroma prediction.
/// </summary>
public int ModeUv { get; set; }
/// <summary>
/// Gets or sets the Non-zero blocks.
/// </summary>
public uint Nz { get; set; }
/// <summary>
/// Gets the diffusion errors.
/// </summary>
public int[,] Derr { get; }
public void InitScore()
{
this.D = 0;
this.SD = 0;
this.R = 0;
this.H = 0;
this.Nz = 0;
this.Score = MaxCost;
}
public void CopyScore(Vp8ModeScore other)
{
this.D = other.D;
this.SD = other.SD;
this.R = other.R;
this.H = other.H;
this.Nz = other.Nz; // note that nz is not accumulated, but just copied.
this.Score = other.Score;
}
public void AddScore(Vp8ModeScore other)
{
this.D += other.D;
this.SD += other.SD;
this.R += other.R;
this.H += other.H;
this.Nz |= other.Nz; // here, new nz bits are accumulated.
this.Score += other.Score;
}
public void SetRdScore(int lambda) => this.Score = ((this.R + this.H) * lambda) + (RdDistoMult * (this.D + this.SD));
}
}

42
src/ImageSharp/Formats/Webp/Lossy/Vp8PictureHeader.cs

@ -0,0 +1,42 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal class Vp8PictureHeader
{
/// <summary>
/// Gets or sets the width of the image.
/// </summary>
public uint Width { get; set; }
/// <summary>
/// Gets or sets the Height of the image.
/// </summary>
public uint Height { get; set; }
/// <summary>
/// Gets or sets the horizontal scale.
/// </summary>
public sbyte XScale { get; set; }
/// <summary>
/// Gets or sets the vertical scale.
/// </summary>
public sbyte YScale { get; set; }
/// <summary>
/// Gets or sets the colorspace.
/// 0 - YUV color space similar to the YCrCb color space defined in.
/// 1 - Reserved for future use.
/// </summary>
public sbyte ColorSpace { get; set; }
/// <summary>
/// Gets or sets the clamp type.
/// 0 - Decoders are required to clamp the reconstructed pixel values to between 0 and 255 (inclusive).
/// 1 - Reconstructed pixel values are guaranteed to be between 0 and 255; no clamping is necessary.
/// </summary>
public sbyte ClampType { get; set; }
}
}

42
src/ImageSharp/Formats/Webp/Lossy/Vp8Proba.cs

@ -0,0 +1,42 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Data for all frame-persistent probabilities.
/// </summary>
internal class Vp8Proba
{
private const int MbFeatureTreeProbs = 3;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8Proba"/> class.
/// </summary>
public Vp8Proba()
{
this.Segments = new uint[MbFeatureTreeProbs];
this.Bands = new Vp8BandProbas[WebpConstants.NumTypes, WebpConstants.NumBands];
this.BandsPtr = new Vp8BandProbas[WebpConstants.NumTypes][];
for (int i = 0; i < WebpConstants.NumTypes; i++)
{
for (int j = 0; j < WebpConstants.NumBands; j++)
{
this.Bands[i, j] = new Vp8BandProbas();
}
}
for (int i = 0; i < WebpConstants.NumTypes; i++)
{
this.BandsPtr[i] = new Vp8BandProbas[16 + 1];
}
}
public uint[] Segments { get; }
public Vp8BandProbas[,] Bands { get; }
public Vp8BandProbas[][] BandsPtr { get; }
}
}

21
src/ImageSharp/Formats/Webp/Lossy/Vp8ProbaArray.cs

@ -0,0 +1,21 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Probabilities associated to one of the contexts.
/// </summary>
internal class Vp8ProbaArray
{
/// <summary>
/// Initializes a new instance of the <see cref="Vp8ProbaArray"/> class.
/// </summary>
public Vp8ProbaArray() => this.Probabilities = new byte[WebpConstants.NumProbas];
/// <summary>
/// Gets the probabilities.
/// </summary>
public byte[] Probabilities { get; }
}
}

34
src/ImageSharp/Formats/Webp/Lossy/Vp8QuantMatrix.cs

@ -0,0 +1,34 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal class Vp8QuantMatrix
{
private int dither;
public int[] Y1Mat { get; } = new int[2];
public int[] Y2Mat { get; } = new int[2];
public int[] UvMat { get; } = new int[2];
/// <summary>
/// Gets or sets the U/V quantizer value.
/// </summary>
public int UvQuant { get; set; }
/// <summary>
/// Gets or sets the dithering amplitude (0 = off, max=255).
/// </summary>
public int Dither
{
get => this.dither;
set
{
Guard.MustBeBetweenOrEqualTo(value, 0, 255, nameof(this.Dither));
this.dither = value;
}
}
}
}

31
src/ImageSharp/Formats/Webp/Lossy/Vp8RDLevel.cs

@ -0,0 +1,31 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Rate-distortion optimization levels
/// </summary>
internal enum Vp8RdLevel
{
/// <summary>
/// No rd-opt.
/// </summary>
RdOptNone = 0,
/// <summary>
/// Basic scoring (no trellis).
/// </summary>
RdOptBasic = 1,
/// <summary>
/// Perform trellis-quant on the final decision only.
/// </summary>
RdOptTrellis = 2,
/// <summary>
/// Trellis-quant for every scoring (much slower).
/// </summary>
RdOptTrellisAll = 3
}
}

171
src/ImageSharp/Formats/Webp/Lossy/Vp8Residual.cs

@ -0,0 +1,171 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// On-the-fly info about the current set of residuals.
/// </summary>
internal class Vp8Residual
{
public int First { get; set; }
public int Last { get; set; }
public int CoeffType { get; set; }
public short[] Coeffs { get; set; }
public Vp8BandProbas[] Prob { get; set; }
public Vp8Stats[] Stats { get; set; }
public Vp8Costs[] Costs { get; set; }
public void Init(int first, int coeffType, Vp8EncProba prob)
{
this.First = first;
this.CoeffType = coeffType;
this.Prob = prob.Coeffs[this.CoeffType];
this.Stats = prob.Stats[this.CoeffType];
this.Costs = prob.RemappedCosts[this.CoeffType];
}
public void SetCoeffs(Span<short> coeffs)
{
int n;
this.Last = -1;
for (n = 15; n >= 0; --n)
{
if (coeffs[n] != 0)
{
this.Last = n;
break;
}
}
this.Coeffs = coeffs.Slice(0, 16).ToArray();
}
// Simulate block coding, but only record statistics.
// Note: no need to record the fixed probas.
public int RecordCoeffs(int ctx)
{
int n = this.First;
Vp8StatsArray s = this.Stats[n].Stats[ctx];
if (this.Last < 0)
{
this.RecordStats(0, s, 0);
return 0;
}
while (n <= this.Last)
{
int v;
this.RecordStats(1, s, 0); // order of record doesn't matter
while ((v = this.Coeffs[n++]) == 0)
{
this.RecordStats(0, s, 1);
s = this.Stats[WebpConstants.Vp8EncBands[n]].Stats[0];
}
this.RecordStats(1, s, 1);
bool bit = (uint)(v + 1) > 2u;
if (this.RecordStats(bit ? 1 : 0, s, 2) == 0)
{
// v = -1 or 1
s = this.Stats[WebpConstants.Vp8EncBands[n]].Stats[1];
}
else
{
v = Math.Abs(v);
if (v > WebpConstants.MaxVariableLevel)
{
v = WebpConstants.MaxVariableLevel;
}
int bits = WebpLookupTables.Vp8LevelCodes[v - 1][1];
int pattern = WebpLookupTables.Vp8LevelCodes[v - 1][0];
int i;
for (i = 0; (pattern >>= 1) != 0; i++)
{
int mask = 2 << i;
if ((pattern & 1) != 0)
{
this.RecordStats((bits & mask) != 0 ? 1 : 0, s, 3 + i);
}
}
s = this.Stats[WebpConstants.Vp8EncBands[n]].Stats[2];
}
}
if (n < 16)
{
this.RecordStats(0, s, 0);
}
return 1;
}
public int GetResidualCost(int ctx0)
{
int n = this.First;
int p0 = this.Prob[n].Probabilities[ctx0].Probabilities[0];
Vp8Costs[] costs = this.Costs;
Vp8CostArray t = costs[n].Costs[ctx0];
// bitCost(1, p0) is already incorporated in t[] tables, but only if ctx != 0
// (as required by the syntax). For ctx0 == 0, we need to add it here or it'll
// be missing during the loop.
int cost = ctx0 == 0 ? LossyUtils.Vp8BitCost(1, (byte)p0) : 0;
if (this.Last < 0)
{
return LossyUtils.Vp8BitCost(0, (byte)p0);
}
int v;
for (; n < this.Last; ++n)
{
v = Math.Abs(this.Coeffs[n]);
int ctx = v >= 2 ? 2 : v;
cost += LevelCost(t.Costs, v);
t = costs[n + 1].Costs[ctx];
}
// Last coefficient is always non-zero
v = Math.Abs(this.Coeffs[n]);
cost += LevelCost(t.Costs, v);
if (n < 15)
{
int b = WebpConstants.Vp8EncBands[n + 1];
int ctx = v == 1 ? 1 : 2;
int lastP0 = this.Prob[b].Probabilities[ctx].Probabilities[0];
cost += LossyUtils.Vp8BitCost(0, (byte)lastP0);
}
return cost;
}
private static int LevelCost(Span<ushort> table, int level)
=> WebpLookupTables.Vp8LevelFixedCosts[level] + table[level > WebpConstants.MaxVariableLevel ? WebpConstants.MaxVariableLevel : level];
private int RecordStats(int bit, Vp8StatsArray statsArr, int idx)
{
// An overflow is inbound. Note we handle this at 0xfffe0000u instead of
// 0xffff0000u to make sure p + 1u does not overflow.
if (statsArr.Stats[idx] >= 0xfffe0000u)
{
statsArr.Stats[idx] = ((statsArr.Stats[idx] + 1u) >> 1) & 0x7fff7fffu; // -> divide the stats by 2.
}
// Record bit count (lower 16 bits) and increment total count (upper 16 bits).
statsArr.Stats[idx] += 0x00010000u + (uint)bit;
return bit;
}
}
}

45
src/ImageSharp/Formats/Webp/Lossy/Vp8SegmentHeader.cs

@ -0,0 +1,45 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
/// <summary>
/// Segment features.
/// </summary>
internal class Vp8SegmentHeader
{
private const int NumMbSegments = 4;
/// <summary>
/// Initializes a new instance of the <see cref="Vp8SegmentHeader"/> class.
/// </summary>
public Vp8SegmentHeader()
{
this.Quantizer = new byte[NumMbSegments];
this.FilterStrength = new byte[NumMbSegments];
}
public bool UseSegment { get; set; }
/// <summary>
/// Gets or sets a value indicating whether to update the segment map or not.
/// </summary>
public bool UpdateMap { get; set; }
/// <summary>
/// Gets or sets a value indicating whether to use delta values for quantizer and filter.
/// If this value is false, absolute values are used.
/// </summary>
public bool Delta { get; set; }
/// <summary>
/// Gets quantization changes.
/// </summary>
public byte[] Quantizer { get; }
/// <summary>
/// Gets the filter strength for segments.
/// </summary>
public byte[] FilterStrength { get; }
}
}

85
src/ImageSharp/Formats/Webp/Lossy/Vp8SegmentInfo.cs

@ -0,0 +1,85 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal class Vp8SegmentInfo
{
/// <summary>
/// Gets or sets the quantization matrix y1.
/// </summary>
public Vp8Matrix Y1 { get; set; }
/// <summary>
/// Gets or sets the quantization matrix y2.
/// </summary>
public Vp8Matrix Y2 { get; set; }
/// <summary>
/// Gets or sets the quantization matrix uv.
/// </summary>
public Vp8Matrix Uv { get; set; }
/// <summary>
/// Gets or sets the quant-susceptibility, range [-127,127]. Zero is neutral. Lower values indicate a lower risk of blurriness.
/// </summary>
public int Alpha { get; set; }
/// <summary>
/// Gets or sets the filter-susceptibility, range [0,255].
/// </summary>
public int Beta { get; set; }
/// <summary>
/// Gets or sets the final segment quantizer.
/// </summary>
public int Quant { get; set; }
/// <summary>
/// Gets or sets the final in-loop filtering strength.
/// </summary>
public int FStrength { get; set; }
/// <summary>
/// Gets or sets the max edge delta (for filtering strength).
/// </summary>
public int MaxEdge { get; set; }
/// <summary>
/// Gets or sets the penalty for using Intra4.
/// </summary>
public long I4Penalty { get; set; }
/// <summary>
/// Gets or sets the minimum distortion required to trigger filtering record.
/// </summary>
public int MinDisto { get; set; }
public int LambdaI16 { get; set; }
public int LambdaI4 { get; set; }
public int TLambda { get; set; }
public int LambdaUv { get; set; }
public int LambdaMode { get; set; }
public void StoreMaxDelta(Span<short> dcs)
{
// We look at the first three AC coefficients to determine what is the average
// delta between each sub-4x4 block.
int v0 = Math.Abs(dcs[1]);
int v1 = Math.Abs(dcs[2]);
int v2 = Math.Abs(dcs[4]);
int maxV = v1 > v0 ? v1 : v0;
maxV = v2 > maxV ? v2 : maxV;
if (maxV > this.MaxEdge)
{
this.MaxEdge = maxV;
}
}
}
}

22
src/ImageSharp/Formats/Webp/Lossy/Vp8Stats.cs

@ -0,0 +1,22 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal class Vp8Stats
{
/// <summary>
/// Initializes a new instance of the <see cref="Vp8Stats"/> class.
/// </summary>
public Vp8Stats()
{
this.Stats = new Vp8StatsArray[WebpConstants.NumCtx];
for (int i = 0; i < WebpConstants.NumCtx; i++)
{
this.Stats[i] = new Vp8StatsArray();
}
}
public Vp8StatsArray[] Stats { get; }
}
}

15
src/ImageSharp/Formats/Webp/Lossy/Vp8StatsArray.cs

@ -0,0 +1,15 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal class Vp8StatsArray
{
/// <summary>
/// Initializes a new instance of the <see cref="Vp8StatsArray"/> class.
/// </summary>
public Vp8StatsArray() => this.Stats = new uint[WebpConstants.NumProbas];
public uint[] Stats { get; }
}
}

14
src/ImageSharp/Formats/Webp/Lossy/Vp8TopSamples.cs

@ -0,0 +1,14 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal class Vp8TopSamples
{
public byte[] Y { get; } = new byte[16];
public byte[] U { get; } = new byte[8];
public byte[] V { get; } = new byte[8];
}
}

1376
src/ImageSharp/Formats/Webp/Lossy/WebpLossyDecoder.cs

File diff suppressed because it is too large

303
src/ImageSharp/Formats/Webp/Lossy/YuvConversion.cs

@ -0,0 +1,303 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using System;
using System.Buffers;
using System.Runtime.CompilerServices;
using SixLabors.ImageSharp.Memory;
using SixLabors.ImageSharp.PixelFormats;
namespace SixLabors.ImageSharp.Formats.Webp.Lossy
{
internal static class YuvConversion
{
/// <summary>
/// Fixed-point precision for RGB->YUV.
/// </summary>
private const int YuvFix = 16;
private const int YuvHalf = 1 << (YuvFix - 1);
/// <summary>
/// Converts the RGB values of the image to YUV.
/// </summary>
/// <typeparam name="TPixel">The pixel type of the image.</typeparam>
/// <param name="image">The image to convert.</param>
/// <param name="configuration">The global configuration.</param>
/// <param name="memoryAllocator">The memory allocator.</param>
/// <param name="y">Span to store the luma component of the image.</param>
/// <param name="u">Span to store the u component of the image.</param>
/// <param name="v">Span to store the v component of the image.</param>
public static void ConvertRgbToYuv<TPixel>(Image<TPixel> image, Configuration configuration, MemoryAllocator memoryAllocator, Span<byte> y, Span<byte> u, Span<byte> v)
where TPixel : unmanaged, IPixel<TPixel>
{
int width = image.Width;
int height = image.Height;
int uvWidth = (width + 1) >> 1;
// Temporary storage for accumulated R/G/B values during conversion to U/V.
using IMemoryOwner<ushort> tmpRgb = memoryAllocator.Allocate<ushort>(4 * uvWidth);
using IMemoryOwner<Bgra32> bgraRow0Buffer = memoryAllocator.Allocate<Bgra32>(width);
using IMemoryOwner<Bgra32> bgraRow1Buffer = memoryAllocator.Allocate<Bgra32>(width);
Span<ushort> tmpRgbSpan = tmpRgb.GetSpan();
Span<Bgra32> bgraRow0 = bgraRow0Buffer.GetSpan();
Span<Bgra32> bgraRow1 = bgraRow1Buffer.GetSpan();
int uvRowIndex = 0;
int rowIndex;
for (rowIndex = 0; rowIndex < height - 1; rowIndex += 2)
{
Span<TPixel> rowSpan = image.GetPixelRowSpan(rowIndex);
Span<TPixel> nextRowSpan = image.GetPixelRowSpan(rowIndex + 1);
PixelOperations<TPixel>.Instance.ToBgra32(configuration, rowSpan, bgraRow0);
PixelOperations<TPixel>.Instance.ToBgra32(configuration, nextRowSpan, bgraRow1);
bool rowsHaveAlpha = WebpCommonUtils.CheckNonOpaque(bgraRow0) && WebpCommonUtils.CheckNonOpaque(bgraRow1);
// Downsample U/V planes, two rows at a time.
if (!rowsHaveAlpha)
{
AccumulateRgb(bgraRow0, bgraRow1, tmpRgbSpan, width);
}
else
{
AccumulateRgba(bgraRow0, bgraRow1, tmpRgbSpan, width);
}
ConvertRgbaToUv(tmpRgbSpan, u.Slice(uvRowIndex * uvWidth), v.Slice(uvRowIndex * uvWidth), uvWidth);
uvRowIndex++;
ConvertRgbaToY(bgraRow0, y.Slice(rowIndex * width), width);
ConvertRgbaToY(bgraRow1, y.Slice((rowIndex + 1) * width), width);
}
// Extra last row.
if ((height & 1) != 0)
{
Span<TPixel> rowSpan = image.GetPixelRowSpan(rowIndex);
PixelOperations<TPixel>.Instance.ToBgra32(configuration, rowSpan, bgraRow0);
ConvertRgbaToY(bgraRow0, y.Slice(rowIndex * width), width);
if (!WebpCommonUtils.CheckNonOpaque(bgraRow0))
{
AccumulateRgb(bgraRow0, bgraRow0, tmpRgbSpan, width);
}
else
{
AccumulateRgba(bgraRow0, bgraRow0, tmpRgbSpan, width);
}
ConvertRgbaToUv(tmpRgbSpan, u.Slice(uvRowIndex * uvWidth), v.Slice(uvRowIndex * uvWidth), uvWidth);
}
}
/// <summary>
/// Converts a rgba pixel row to Y.
/// </summary>
/// <param name="rowSpan">The row span to convert.</param>
/// <param name="y">The destination span for y.</param>
/// <param name="width">The width.</param>
[MethodImpl(InliningOptions.ShortMethod)]
public static void ConvertRgbaToY(Span<Bgra32> rowSpan, Span<byte> y, int width)
{
for (int x = 0; x < width; x++)
{
y[x] = (byte)RgbToY(rowSpan[x].R, rowSpan[x].G, rowSpan[x].B, YuvHalf);
}
}
/// <summary>
/// Converts a rgb row of pixels to UV.
/// </summary>
/// <param name="rgb">The RGB pixel row.</param>
/// <param name="u">The destination span for u.</param>
/// <param name="v">The destination span for v.</param>
/// <param name="width">The width.</param>
public static void ConvertRgbaToUv(Span<ushort> rgb, Span<byte> u, Span<byte> v, int width)
{
int rgbIdx = 0;
for (int i = 0; i < width; i += 1, rgbIdx += 4)
{
int r = rgb[rgbIdx], g = rgb[rgbIdx + 1], b = rgb[rgbIdx + 2];
u[i] = (byte)RgbToU(r, g, b, YuvHalf << 2);
v[i] = (byte)RgbToV(r, g, b, YuvHalf << 2);
}
}
public static void AccumulateRgb(Span<Bgra32> rowSpan, Span<Bgra32> nextRowSpan, Span<ushort> dst, int width)
{
Bgra32 bgra0;
Bgra32 bgra1;
int i, j;
int dstIdx = 0;
for (i = 0, j = 0; i < (width >> 1); i += 1, j += 2, dstIdx += 4)
{
bgra0 = rowSpan[j];
bgra1 = rowSpan[j + 1];
Bgra32 bgra2 = nextRowSpan[j];
Bgra32 bgra3 = nextRowSpan[j + 1];
dst[dstIdx] = (ushort)LinearToGamma(
GammaToLinear(bgra0.R) +
GammaToLinear(bgra1.R) +
GammaToLinear(bgra2.R) +
GammaToLinear(bgra3.R),
0);
dst[dstIdx + 1] = (ushort)LinearToGamma(
GammaToLinear(bgra0.G) +
GammaToLinear(bgra1.G) +
GammaToLinear(bgra2.G) +
GammaToLinear(bgra3.G),
0);
dst[dstIdx + 2] = (ushort)LinearToGamma(
GammaToLinear(bgra0.B) +
GammaToLinear(bgra1.B) +
GammaToLinear(bgra2.B) +
GammaToLinear(bgra3.B),
0);
}
if ((width & 1) != 0)
{
bgra0 = rowSpan[j];
bgra1 = nextRowSpan[j];
dst[dstIdx] = (ushort)LinearToGamma(GammaToLinear(bgra0.R) + GammaToLinear(bgra1.R), 1);
dst[dstIdx + 1] = (ushort)LinearToGamma(GammaToLinear(bgra0.G) + GammaToLinear(bgra1.G), 1);
dst[dstIdx + 2] = (ushort)LinearToGamma(GammaToLinear(bgra0.B) + GammaToLinear(bgra1.B), 1);
}
}
public static void AccumulateRgba(Span<Bgra32> rowSpan, Span<Bgra32> nextRowSpan, Span<ushort> dst, int width)
{
Bgra32 bgra0;
Bgra32 bgra1;
int i, j;
int dstIdx = 0;
for (i = 0, j = 0; i < width >> 1; i += 1, j += 2, dstIdx += 4)
{
bgra0 = rowSpan[j];
bgra1 = rowSpan[j + 1];
Bgra32 bgra2 = nextRowSpan[j];
Bgra32 bgra3 = nextRowSpan[j + 1];
uint a = (uint)(bgra0.A + bgra1.A + bgra2.A + bgra3.A);
int r, g, b;
if (a is 4 * 0xff or 0)
{
r = (ushort)LinearToGamma(
GammaToLinear(bgra0.R) +
GammaToLinear(bgra1.R) +
GammaToLinear(bgra2.R) +
GammaToLinear(bgra3.R),
0);
g = (ushort)LinearToGamma(
GammaToLinear(bgra0.G) +
GammaToLinear(bgra1.G) +
GammaToLinear(bgra2.G) +
GammaToLinear(bgra3.G),
0);
b = (ushort)LinearToGamma(
GammaToLinear(bgra0.B) +
GammaToLinear(bgra1.B) +
GammaToLinear(bgra2.B) +
GammaToLinear(bgra3.B),
0);
}
else
{
r = LinearToGammaWeighted(bgra0.R, bgra1.R, bgra2.R, bgra3.R, bgra0.A, bgra1.A, bgra2.A, bgra3.A, a);
g = LinearToGammaWeighted(bgra0.G, bgra1.G, bgra2.G, bgra3.G, bgra0.A, bgra1.A, bgra2.A, bgra3.A, a);
b = LinearToGammaWeighted(bgra0.B, bgra1.B, bgra2.B, bgra3.B, bgra0.A, bgra1.A, bgra2.A, bgra3.A, a);
}
dst[dstIdx] = (ushort)r;
dst[dstIdx + 1] = (ushort)g;
dst[dstIdx + 2] = (ushort)b;
dst[dstIdx + 3] = (ushort)a;
}
if ((width & 1) != 0)
{
bgra0 = rowSpan[j];
bgra1 = nextRowSpan[j];
uint a = (uint)(2u * (bgra0.A + bgra1.A));
int r, g, b;
if (a is 4 * 0xff or 0)
{
r = (ushort)LinearToGamma(GammaToLinear(bgra0.R) + GammaToLinear(bgra1.R), 1);
g = (ushort)LinearToGamma(GammaToLinear(bgra0.G) + GammaToLinear(bgra1.G), 1);
b = (ushort)LinearToGamma(GammaToLinear(bgra0.B) + GammaToLinear(bgra1.B), 1);
}
else
{
r = LinearToGammaWeighted(bgra0.R, bgra1.R, bgra0.R, bgra1.R, bgra0.A, bgra1.A, bgra0.A, bgra1.A, a);
g = LinearToGammaWeighted(bgra0.G, bgra1.G, bgra0.G, bgra1.G, bgra0.A, bgra1.A, bgra0.A, bgra1.A, a);
b = LinearToGammaWeighted(bgra0.B, bgra1.B, bgra0.B, bgra1.B, bgra0.A, bgra1.A, bgra0.A, bgra1.A, a);
}
dst[dstIdx] = (ushort)r;
dst[dstIdx + 1] = (ushort)g;
dst[dstIdx + 2] = (ushort)b;
dst[dstIdx + 3] = (ushort)a;
}
}
[MethodImpl(InliningOptions.ShortMethod)]
private static int LinearToGammaWeighted(byte rgb0, byte rgb1, byte rgb2, byte rgb3, byte a0, byte a1, byte a2, byte a3, uint totalA)
{
uint sum = (a0 * GammaToLinear(rgb0)) + (a1 * GammaToLinear(rgb1)) + (a2 * GammaToLinear(rgb2)) + (a3 * GammaToLinear(rgb3));
return LinearToGamma((sum * WebpLookupTables.InvAlpha[totalA]) >> (WebpConstants.AlphaFix - 2), 0);
}
// Convert a linear value 'v' to YUV_FIX+2 fixed-point precision
// U/V value, suitable for RGBToU/V calls.
[MethodImpl(InliningOptions.ShortMethod)]
private static int LinearToGamma(uint baseValue, int shift)
{
int y = Interpolate((int)(baseValue << shift)); // Final uplifted value.
return (y + WebpConstants.GammaTabRounder) >> WebpConstants.GammaTabFix; // Descale.
}
[MethodImpl(InliningOptions.ShortMethod)]
private static uint GammaToLinear(byte v) => WebpLookupTables.GammaToLinearTab[v];
[MethodImpl(InliningOptions.ShortMethod)]
private static int Interpolate(int v)
{
int tabPos = v >> (WebpConstants.GammaTabFix + 2); // integer part.
int x = v & ((WebpConstants.GammaTabScale << 2) - 1); // fractional part.
int v0 = WebpLookupTables.LinearToGammaTab[tabPos];
int v1 = WebpLookupTables.LinearToGammaTab[tabPos + 1];
int y = (v1 * x) + (v0 * ((WebpConstants.GammaTabScale << 2) - x)); // interpolate
return y;
}
[MethodImpl(InliningOptions.ShortMethod)]
private static int RgbToY(byte r, byte g, byte b, int rounding)
{
int luma = (16839 * r) + (33059 * g) + (6420 * b);
return (luma + rounding + (16 << YuvFix)) >> YuvFix; // No need to clip.
}
[MethodImpl(InliningOptions.ShortMethod)]
private static int RgbToU(int r, int g, int b, int rounding)
{
int u = (-9719 * r) - (19081 * g) + (28800 * b);
return ClipUv(u, rounding);
}
[MethodImpl(InliningOptions.ShortMethod)]
private static int RgbToV(int r, int g, int b, int rounding)
{
int v = (+28800 * r) - (24116 * g) - (4684 * b);
return ClipUv(v, rounding);
}
[MethodImpl(InliningOptions.ShortMethod)]
private static int ClipUv(int uv, int rounding)
{
uv = (uv + rounding + (128 << (YuvFix + 2))) >> (YuvFix + 2);
return (uv & ~0xff) == 0 ? uv : uv < 0 ? 0 : 255;
}
}
}

BIN
src/ImageSharp/Formats/Webp/Lossy/rfc6386_lossy_specification.pdf

Binary file not shown.

21
src/ImageSharp/Formats/Webp/MetadataExtensions.cs

@ -0,0 +1,21 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
using SixLabors.ImageSharp.Formats.Webp;
using SixLabors.ImageSharp.Metadata;
namespace SixLabors.ImageSharp
{
/// <summary>
/// Extension methods for the <see cref="ImageMetadata"/> type.
/// </summary>
public static partial class MetadataExtensions
{
/// <summary>
/// Gets the webp format specific metadata for the image.
/// </summary>
/// <param name="metadata">The metadata this method extends.</param>
/// <returns>The <see cref="WebpMetadata"/>.</returns>
public static WebpMetadata GetWebpMetadata(this ImageMetadata metadata) => metadata.GetFormatMetadata(WebpFormat.Instance);
}
}

10
src/ImageSharp/Formats/Webp/Readme.md

@ -0,0 +1,10 @@
# Webp Format
Reference implementation, specification and stuff like that:
- [google webp introduction](https://developers.google.com/speed/webp)
- [Webp Spec 1.0.3](https://chromium.googlesource.com/webm/libwebp/+/v1.0.3/doc/webp-container-spec.txt)
- [Webp VP8 Spec, Lossy](http://tools.ietf.org/html/rfc6386)
- [Webp VP8L Spec, Lossless](https://developers.google.com/speed/webp/docs/webp_lossless_bitstream_specification)
- [Webp filefront](https://wiki.fileformat.com/image/webp/)
- [Webp test data](https://github.com/webmproject/libwebp-test-data/)

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save