diff --git a/search-index.json b/search-index.json
index b532a1a5..611dc4a9 100644
--- a/search-index.json
+++ b/search-index.json
@@ -1 +1 @@
-[{"documents":[{"i":1,"t":"","u":"/blog/archive","b":["Blog"]},{"i":2,"t":"AV1 Encoding for Dummies","u":"/blog/av1-encoding-for-dummies","b":["Blog"]},{"i":35,"t":"Embedding the Un-Embeddable","u":"/blog/embedding-the-un-embeddable","b":["Blog"]},{"i":62,"t":"Reducing Image Load Online","u":"/blog/site-optimization","b":["Blog"]},{"i":78,"t":"AV1 for Dummies","u":"/blog/av1-for-dummies","b":["Blog"]},{"i":108,"t":"AAC","u":"/docs/audio/AAC","b":["π Audio"]},{"i":138,"t":"Codec Wiki: One Year Later","u":"/blog/codec-wiki-one-year-later","b":["Blog"]},{"i":152,"t":"ALAC","u":"/docs/audio/ALAC","b":["π Audio"]},{"i":154,"t":"Dolby Digital","u":"/docs/audio/Dolby","b":["π Audio"]},{"i":167,"t":"FLAC","u":"/docs/audio/FLAC","b":["π Audio"]},{"i":175,"t":"Introduction to Lossy & Lossless Audio Compression","u":"/docs/audio/intro","b":["π Audio"]},{"i":185,"t":"MP3","u":"/docs/audio/MP3","b":["π Audio"]},{"i":187,"t":"Opus","u":"/docs/audio/Opus","b":["π Audio"]},{"i":202,"t":"Intro","u":"/docs/colorimetry/intro","b":["π¨ Colorimetry"]},{"i":204,"t":"Vorbis","u":"/docs/audio/Vorbis","b":["π Audio"]},{"i":206,"t":"Matrix Coefficients","u":"/docs/colorimetry/matrix","b":["π¨ Colorimetry"]},{"i":236,"t":"Color Primaries","u":"/docs/colorimetry/primaries","b":["π¨ Colorimetry"]},{"i":262,"t":"Color Range","u":"/docs/colorimetry/range","b":["π¨ Colorimetry"]},{"i":264,"t":"Transfer Characteristics","u":"/docs/colorimetry/transfer","b":["π¨ Colorimetry"]},{"i":300,"t":"Speex","u":"/docs/audio/Speex","b":["π Audio"]},{"i":302,"t":"7-zip (7z)","u":"/docs/data/7z","b":["π½ Data"]},{"i":304,"t":"WavPack","u":"/docs/audio/WavPack","b":["π Audio"]},{"i":321,"t":"Brotli","u":"/docs/data/brotli","b":["π½ Data"]},{"i":323,"t":"Contribution Guide","u":"/docs/contribution-guide","b":[]},{"i":341,"t":"Color Formats","u":"/docs/colorimetry/format","b":["π¨ Colorimetry"]},{"i":365,"t":"gzip","u":"/docs/data/gzip","b":["π½ Data"]},{"i":376,"t":"ZIP","u":"/docs/data/zip","b":["π½ Data"]},{"i":386,"t":"bzip2","u":"/docs/data/bzip2","b":["π½ Data"]},{"i":388,"t":"AMF","u":"/docs/encoders_hw/amf","b":["π Hardware Encoders"]},{"i":390,"t":"tar","u":"/docs/data/tar","b":["π½ Data"]},{"i":398,"t":"Zstandard","u":"/docs/data/zstd","b":["π½ Data"]},{"i":406,"t":"XZ","u":"/docs/data/xz","b":["π½ Data"]},{"i":414,"t":"QSV","u":"/docs/encoders_hw/qsv","b":["π Hardware Encoders"]},{"i":416,"t":"Mediacodec","u":"/docs/encoders_hw/mediacodec","b":["π Hardware Encoders"]},{"i":422,"t":"NVENC","u":"/docs/encoders_hw/nvenc","b":["π Hardware Encoders"]},{"i":424,"t":"Aurora1 AV1","u":"/docs/encoders/Aurora1","b":["πΎ Encoders"]},{"i":426,"t":"aom-psy101","u":"/docs/encoders/aom-psy101","b":["πΎ Encoders"]},{"i":432,"t":"AVM","u":"/docs/encoders/AVM","b":["πΎ Encoders"]},{"i":445,"t":"aom-av1-lavish","u":"/docs/encoders/aom-av1-lavish","b":["πΎ Encoders"]},{"i":460,"t":"aomenc","u":"/docs/encoders/aomenc","b":["πΎ Encoders"]},{"i":477,"t":"HM","u":"/docs/encoders/HM","b":["πΎ Encoders"]},{"i":483,"t":"Kvazaar","u":"/docs/encoders/Kvazaar","b":["πΎ Encoders"]},{"i":499,"t":"rav1e","u":"/docs/encoders/rav1e","b":["πΎ Encoders"]},{"i":513,"t":"ZPAQ","u":"/docs/data/zpaq","b":["π½ Data"]},{"i":515,"t":"SVT-VP9","u":"/docs/encoders/SVT-VP9","b":["πΎ Encoders"]},{"i":524,"t":"SVT-AV1-PSY","u":"/docs/encoders/SVT-AV1-PSY","b":["πΎ Encoders"]},{"i":540,"t":"VideoToolbox","u":"/docs/encoders_hw/videotoolbox","b":["π Hardware Encoders"]},{"i":548,"t":"JM","u":"/docs/encoders/JM","b":["πΎ Encoders"]},{"i":550,"t":"VTM","u":"/docs/encoders/VTM","b":["πΎ Encoders"]},{"i":552,"t":"SVT-HEVC","u":"/docs/encoders/SVT-HEVC","b":["πΎ Encoders"]},{"i":569,"t":"vpxenc","u":"/docs/encoders/vpxenc","b":["πΎ Encoders"]},{"i":593,"t":"SVT-AV1","u":"/docs/encoders/SVT-AV1","b":["πΎ Encoders"]},{"i":612,"t":"uavs3e","u":"/docs/encoders/uavs3e","b":["πΎ Encoders"]},{"i":622,"t":"uvg266","u":"/docs/encoders/uvg266","b":["πΎ Encoders"]},{"i":635,"t":"x265","u":"/docs/encoders/x265","b":["πΎ Encoders"]},{"i":659,"t":"VVenC","u":"/docs/encoders/VVenC","b":["πΎ Encoders"]},{"i":667,"t":"x266","u":"/docs/encoders/x266","b":["πΎ Encoders"]},{"i":669,"t":"Antialiasing","u":"/docs/filtering/antialiasing","b":["ποΈ Filtering"]},{"i":671,"t":"Deband","u":"/docs/filtering/deband","b":["ποΈ Filtering"]},{"i":673,"t":"x264","u":"/docs/encoders/x264","b":["πΎ Encoders"]},{"i":699,"t":"FAQ","u":"/docs/FAQ","b":[]},{"i":708,"t":"Decombing","u":"/docs/filtering/decombing","b":["ποΈ Filtering"]},{"i":710,"t":"Deinterlace","u":"/docs/filtering/deinterlace","b":["ποΈ Filtering"]},{"i":712,"t":"Basics of Filtering with Vapoursynth","u":"/docs/filtering/basics","b":["ποΈ Filtering"]},{"i":725,"t":"Dehalo","u":"/docs/filtering/dehalo","b":["ποΈ Filtering"]},{"i":727,"t":"Denoise","u":"/docs/filtering/denoise","b":["ποΈ Filtering"]},{"i":743,"t":"Inverse Telecine","u":"/docs/filtering/ivtc","b":["ποΈ Filtering"]},{"i":745,"t":"Stabilizing","u":"/docs/filtering/stabilizing","b":["ποΈ Filtering"]},{"i":757,"t":"Vapoursynth","u":"/docs/filtering/vapoursynth","b":["ποΈ Filtering"]},{"i":780,"t":"AVIF","u":"/docs/images/AVIF","b":["ποΈ Images"]},{"i":791,"t":"GIF","u":"/docs/images/GIF","b":["ποΈ Images"]},{"i":795,"t":"HEIC","u":"/docs/images/HEIC","b":["ποΈ Images"]},{"i":799,"t":"JPEG","u":"/docs/images/JPEG","b":["ποΈ Images"]},{"i":805,"t":"JPEG 2000","u":"/docs/images/JPEG2000","b":["ποΈ Images"]},{"i":807,"t":"Graining","u":"/docs/filtering/graining","b":["ποΈ Filtering"]},{"i":809,"t":"QOI","u":"/docs/images/QOI","b":["ποΈ Images"]},{"i":821,"t":"Lossless Compression","u":"/docs/introduction/lossless","b":["π‘ Introduction"]},{"i":827,"t":"PNG","u":"/docs/images/PNG","b":["ποΈ Images"]},{"i":831,"t":"High Dynamic Range","u":"/docs/introduction/high-dynamic-range","b":["π‘ Introduction"]},{"i":841,"t":"Lossy Compression","u":"/docs/introduction/lossy","b":["π‘ Introduction"]},{"i":845,"t":"Prologue","u":"/docs/introduction/prologue","b":["Get Started","π‘ Introduction"]},{"i":853,"t":"Psychovisual","u":"/docs/introduction/psychovisual","b":["π‘ Introduction"]},{"i":857,"t":"JPEG XL","u":"/docs/images/JXL","b":["ποΈ Images"]},{"i":890,"t":"Terminology","u":"/docs/introduction/terminology","b":["π‘ Introduction"]},{"i":918,"t":"Butteraugli","u":"/docs/metrics/butteraugli","b":["ποΈ Metrics"]},{"i":920,"t":"PSNR","u":"/docs/metrics/PSNR","b":["ποΈ Metrics"]},{"i":922,"t":"WebP","u":"/docs/images/WebP","b":["ποΈ Images"]},{"i":931,"t":"SSIMULACRA2","u":"/docs/metrics/SSIMULACRA2","b":["ποΈ Metrics"]},{"i":942,"t":"SSIM","u":"/docs/metrics/SSIM","b":["ποΈ Metrics"]},{"i":944,"t":"Privacy Policy","u":"/docs/privacy-policy","b":[]},{"i":946,"t":"XPSNR","u":"/docs/metrics/XPSNR","b":["ποΈ Metrics"]},{"i":956,"t":"Resources","u":"/docs/resources","b":[]},{"i":958,"t":"SRT","u":"/docs/subtitles/SRT","b":["π¬ Subtitles"]},{"i":966,"t":"WebVTT","u":"/docs/subtitles/webvtt","b":["π¬ Subtitles"]},{"i":974,"t":"VMAF","u":"/docs/metrics/VMAF","b":["ποΈ Metrics"]},{"i":990,"t":"Overview","u":"/docs/subtitles/SSA","b":["π¬ Subtitles"]},{"i":998,"t":"Terms of Use","u":"/docs/terms-of-use","b":[]},{"i":1000,"t":"Spotting Video Artifacts","u":"/docs/introduction/video-artifacts","b":["π‘ Introduction"]},{"i":1016,"t":"Autocompressor","u":"/docs/utilities/autocompressor","b":["π οΈ Utilities"]},{"i":1018,"t":"Av1an Command Generator","u":"/docs/utilities/av1an-command-gen","b":["π οΈ Utilities"]},{"i":1030,"t":"Discord","u":"/docs/utilities/Discord","b":["π οΈ Utilities"]},{"i":1036,"t":"Aviator","u":"/docs/utilities/Aviator","b":["π οΈ Utilities"]},{"i":1052,"t":"dovi_tool","u":"/docs/utilities/dovi_tool","b":["π οΈ Utilities"]},{"i":1058,"t":"Av1an","u":"/docs/utilities/av1an","b":["π οΈ Utilities"]},{"i":1078,"t":"eac3to","u":"/docs/utilities/eac3to","b":["π οΈ Utilities"]},{"i":1091,"t":"FFMetrics","u":"/docs/utilities/FFMetrics","b":["π οΈ Utilities"]},{"i":1097,"t":"NMKODER","u":"/docs/utilities/nmkoder","b":["π οΈ Utilities"]},{"i":1103,"t":"hdr10plus_tool","u":"/docs/utilities/hdr10plus_tool","b":["π οΈ Utilities"]},{"i":1115,"t":"rAV1ator","u":"/docs/utilities/rAV1ator","b":["π οΈ Utilities"]},{"i":1117,"t":"MP4Box","u":"/docs/utilities/mp4box","b":["π οΈ Utilities"]},{"i":1119,"t":"YUView","u":"/docs/utilities/YUView","b":["π οΈ Utilities"]},{"i":1125,"t":"FFmpeg","u":"/docs/utilities/ffmpeg","b":["π οΈ Utilities"]},{"i":1135,"t":"Video Players","u":"/docs/video-players","b":[]},{"i":1143,"t":"AV1","u":"/docs/video/AV1","b":["πΉοΈ Video"]},{"i":1145,"t":"AVS3","u":"/docs/video/AVS3","b":["πΉοΈ Video"]},{"i":1147,"t":"rAV1ator CLI","u":"/docs/utilities/rav1ator-cli","b":["π οΈ Utilities"]},{"i":1167,"t":"H.264","u":"/docs/video/AVC","b":["πΉοΈ Video"]},{"i":1169,"t":"ECM","u":"/docs/video/ECM","b":["πΉοΈ Video"]},{"i":1171,"t":"FFV1","u":"/docs/video/FFV1","b":["πΉοΈ Video"]},{"i":1181,"t":"MKVToolNix","u":"/docs/utilities/MKVToolNix","b":["π οΈ Utilities"]},{"i":1189,"t":"ProRes","u":"/docs/video/prores","b":["πΉοΈ Video"]},{"i":1195,"t":"Theora","u":"/docs/video/Theora","b":["πΉοΈ Video"]},{"i":1199,"t":"UT Video Codec Suite","u":"/docs/video/utvideo","b":["πΉοΈ Video"]},{"i":1201,"t":"VC-1","u":"/docs/video/VC-1","b":["πΉοΈ Video"]},{"i":1207,"t":"VP8","u":"/docs/video/VP8","b":["πΉοΈ Video"]},{"i":1209,"t":"VP9","u":"/docs/video/VP9","b":["πΉοΈ Video"]},{"i":1211,"t":"H.266","u":"/docs/video/VVC","b":["πΉοΈ Video"]},{"i":1213,"t":"H.265","u":"/docs/video/HEVC","b":["πΉοΈ Video"]},{"i":1215,"t":"Encoding Animation with SVT-AV1: A Deep Dive","u":"/blog/svt-av1-deep-dive","b":["Blog"]},{"i":1273,"t":"Observing SVT-AV1 v2.1.0's improvements: A New Deep Dive","u":"/blog/svt-av1-second-deep-dive","b":["Blog"]}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/1",[]],["t/2",[0,1.873,1,2.826,2,2.826]],["t/35",[3,3.191,4,3.191,5,3.191]],["t/62",[6,2.675,7,2.675,8,2.675,9,2.675]],["t/78",[0,2.32,2,3.501]],["t/108",[10,5.192]],["t/138",[11,2.04,12,2.303,13,2.303,14,2.303,15,2.303]],["t/152",[16,5.192]],["t/154",[17,3.953,18,3.953]],["t/167",[19,5.192]],["t/175",[20,2.022,21,1.791,22,2.022,23,1.791,24,2.022,25,1.639]],["t/185",[26,5.192]],["t/187",[27,5.192]],["t/202",[28,5.192]],["t/204",[29,5.192]],["t/206",[30,3.953,31,3.953]],["t/236",[32,3.203,33,3.953]],["t/262",[32,3.203,34,3.501]],["t/264",[35,3.953,36,3.953]],["t/300",[37,5.192]],["t/302",[38,3.191,39,2.826,40,3.191]],["t/304",[41,5.192]],["t/321",[42,5.192]],["t/323",[43,3.953,44,3.953]],["t/341",[32,3.203,45,3.953]],["t/365",[46,5.192]],["t/376",[39,4.599]],["t/386",[47,5.192]],["t/388",[48,5.192]],["t/390",[49,5.192]],["t/398",[50,5.192]],["t/406",[51,5.192]],["t/414",[52,5.192]],["t/416",[53,5.192]],["t/422",[54,5.192]],["t/424",[0,2.32,55,3.953]],["t/426",[56,3.501,57,3.953]],["t/432",[58,5.192]],["t/445",[0,1.873,56,2.826,59,3.191]],["t/460",[60,5.192]],["t/477",[61,5.192]],["t/483",[62,5.192]],["t/499",[63,5.192]],["t/513",[64,5.192]],["t/515",[65,2.656,66,3.501]],["t/524",[0,1.873,65,2.144,67,3.191]],["t/540",[68,5.192]],["t/548",[69,5.192]],["t/550",[70,5.192]],["t/552",[65,2.656,71,3.953]],["t/569",[72,5.192]],["t/593",[0,2.32,65,2.656]],["t/612",[73,5.192]],["t/622",[74,5.192]],["t/635",[75,5.192]],["t/659",[76,5.192]],["t/667",[77,5.192]],["t/669",[78,5.192]],["t/671",[79,5.192]],["t/673",[80,5.192]],["t/699",[81,5.192]],["t/708",[82,5.192]],["t/710",[83,5.192]],["t/712",[84,3.191,85,3.191,86,2.826]],["t/725",[87,5.192]],["t/727",[88,5.192]],["t/743",[89,3.953,90,3.953]],["t/745",[91,5.192]],["t/757",[86,4.599]],["t/780",[92,5.192]],["t/791",[93,5.192]],["t/795",[94,5.192]],["t/799",[95,4.208]],["t/805",[95,3.203,96,3.953]],["t/807",[97,5.192]],["t/809",[98,5.192]],["t/821",[23,3.501,25,3.203]],["t/827",[99,5.192]],["t/831",[34,2.826,100,3.191,101,3.191]],["t/841",[21,3.501,25,3.203]],["t/845",[102,5.192]],["t/853",[103,5.192]],["t/857",[95,3.203,104,3.953]],["t/890",[105,5.192]],["t/918",[106,5.192]],["t/920",[107,5.192]],["t/922",[108,5.192]],["t/931",[109,5.192]],["t/942",[110,5.192]],["t/944",[111,3.953,112,3.953]],["t/946",[113,5.192]],["t/956",[114,5.192]],["t/958",[115,5.192]],["t/966",[116,5.192]],["t/974",[117,5.192]],["t/990",[118,5.192]],["t/998",[119,3.953,120,3.953]],["t/1000",[121,3.191,122,2.586,123,3.191]],["t/1016",[124,5.192]],["t/1018",[125,2.826,126,3.191,127,3.191]],["t/1030",[128,5.192]],["t/1036",[129,5.192]],["t/1052",[130,5.192]],["t/1058",[125,4.599]],["t/1078",[131,5.192]],["t/1091",[132,5.192]],["t/1097",[133,5.192]],["t/1103",[134,5.192]],["t/1115",[135,4.599]],["t/1117",[136,5.192]],["t/1119",[137,5.192]],["t/1125",[138,5.192]],["t/1135",[122,3.203,139,3.953]],["t/1143",[0,3.048]],["t/1145",[140,5.192]],["t/1147",[135,3.501,141,3.953]],["t/1167",[142,5.192]],["t/1169",[143,5.192]],["t/1171",[144,5.192]],["t/1181",[145,5.192]],["t/1189",[146,5.192]],["t/1195",[147,5.192]],["t/1199",[11,2.37,122,2.168,148,2.675,149,2.675]],["t/1201",[150,3.953,151,3.953]],["t/1207",[152,5.192]],["t/1209",[66,4.599]],["t/1211",[153,5.192]],["t/1213",[154,5.192]],["t/1215",[0,1.187,1,1.791,65,1.359,155,2.022,156,1.791,157,1.791]],["t/1273",[0,0.954,65,1.092,156,1.439,157,1.439,158,1.625,159,1.625,160,1.625,161,1.625]]],"invertedIndex":[["",{"_index":22,"t":{"175":{"position":[[22,1]]}}}],["1",{"_index":151,"t":{"1201":{"position":[[3,1]]}}}],["2000",{"_index":96,"t":{"805":{"position":[[5,4]]}}}],["7",{"_index":38,"t":{"302":{"position":[[0,1]]}}}],["7z",{"_index":40,"t":{"302":{"position":[[6,4]]}}}],["aac",{"_index":10,"t":{"108":{"position":[[0,3]]}}}],["alac",{"_index":16,"t":{"152":{"position":[[0,4]]}}}],["amf",{"_index":48,"t":{"388":{"position":[[0,3]]}}}],["anim",{"_index":155,"t":{"1215":{"position":[[9,9]]}}}],["antialias",{"_index":78,"t":{"669":{"position":[[0,12]]}}}],["aom",{"_index":56,"t":{"426":{"position":[[0,3]]},"445":{"position":[[0,3]]}}}],["aomenc",{"_index":60,"t":{"460":{"position":[[0,6]]}}}],["artifact",{"_index":123,"t":{"1000":{"position":[[15,9]]}}}],["audio",{"_index":24,"t":{"175":{"position":[[33,5]]}}}],["aurora1",{"_index":55,"t":{"424":{"position":[[0,7]]}}}],["autocompressor",{"_index":124,"t":{"1016":{"position":[[0,14]]}}}],["av1",{"_index":0,"t":{"2":{"position":[[0,3]]},"78":{"position":[[0,3]]},"424":{"position":[[8,3]]},"445":{"position":[[4,3]]},"524":{"position":[[4,3]]},"593":{"position":[[4,3]]},"1143":{"position":[[0,3]]},"1215":{"position":[[28,4]]},"1273":{"position":[[14,3]]}}}],["av1an",{"_index":125,"t":{"1018":{"position":[[0,5]]},"1058":{"position":[[0,5]]}}}],["aviat",{"_index":129,"t":{"1036":{"position":[[0,7]]}}}],["avif",{"_index":92,"t":{"780":{"position":[[0,4]]}}}],["avm",{"_index":58,"t":{"432":{"position":[[0,3]]}}}],["avs3",{"_index":140,"t":{"1145":{"position":[[0,4]]}}}],["basic",{"_index":84,"t":{"712":{"position":[[0,6]]}}}],["brotli",{"_index":42,"t":{"321":{"position":[[0,6]]}}}],["butteraugli",{"_index":106,"t":{"918":{"position":[[0,11]]}}}],["bzip2",{"_index":47,"t":{"386":{"position":[[0,5]]}}}],["characterist",{"_index":36,"t":{"264":{"position":[[9,15]]}}}],["cli",{"_index":141,"t":{"1147":{"position":[[9,3]]}}}],["codec",{"_index":11,"t":{"138":{"position":[[0,5]]},"1199":{"position":[[9,5]]}}}],["coeffici",{"_index":31,"t":{"206":{"position":[[7,12]]}}}],["color",{"_index":32,"t":{"236":{"position":[[0,5]]},"262":{"position":[[0,5]]},"341":{"position":[[0,5]]}}}],["command",{"_index":126,"t":{"1018":{"position":[[6,7]]}}}],["compress",{"_index":25,"t":{"175":{"position":[[39,11]]},"821":{"position":[[9,11]]},"841":{"position":[[6,11]]}}}],["contribut",{"_index":43,"t":{"323":{"position":[[0,12]]}}}],["deband",{"_index":79,"t":{"671":{"position":[[0,6]]}}}],["decomb",{"_index":82,"t":{"708":{"position":[[0,9]]}}}],["deep",{"_index":156,"t":{"1215":{"position":[[35,4]]},"1273":{"position":[[47,4]]}}}],["dehalo",{"_index":87,"t":{"725":{"position":[[0,6]]}}}],["deinterlac",{"_index":83,"t":{"710":{"position":[[0,11]]}}}],["denois",{"_index":88,"t":{"727":{"position":[[0,7]]}}}],["digit",{"_index":18,"t":{"154":{"position":[[6,7]]}}}],["discord",{"_index":128,"t":{"1030":{"position":[[0,7]]}}}],["dive",{"_index":157,"t":{"1215":{"position":[[40,4]]},"1273":{"position":[[52,4]]}}}],["dolbi",{"_index":17,"t":{"154":{"position":[[0,5]]}}}],["dovi_tool",{"_index":130,"t":{"1052":{"position":[[0,9]]}}}],["dummi",{"_index":2,"t":{"2":{"position":[[17,7]]},"78":{"position":[[8,7]]}}}],["dynam",{"_index":101,"t":{"831":{"position":[[5,7]]}}}],["eac3to",{"_index":131,"t":{"1078":{"position":[[0,6]]}}}],["ecm",{"_index":143,"t":{"1169":{"position":[[0,3]]}}}],["embed",{"_index":3,"t":{"35":{"position":[[0,9]]}}}],["embedd",{"_index":5,"t":{"35":{"position":[[17,10]]}}}],["encod",{"_index":1,"t":{"2":{"position":[[4,8]]},"1215":{"position":[[0,8]]}}}],["faq",{"_index":81,"t":{"699":{"position":[[0,3]]}}}],["ffmetric",{"_index":132,"t":{"1091":{"position":[[0,9]]}}}],["ffmpeg",{"_index":138,"t":{"1125":{"position":[[0,6]]}}}],["ffv1",{"_index":144,"t":{"1171":{"position":[[0,4]]}}}],["filter",{"_index":85,"t":{"712":{"position":[[10,9]]}}}],["flac",{"_index":19,"t":{"167":{"position":[[0,4]]}}}],["format",{"_index":45,"t":{"341":{"position":[[6,7]]}}}],["gener",{"_index":127,"t":{"1018":{"position":[[14,9]]}}}],["gif",{"_index":93,"t":{"791":{"position":[[0,3]]}}}],["grain",{"_index":97,"t":{"807":{"position":[[0,8]]}}}],["guid",{"_index":44,"t":{"323":{"position":[[13,5]]}}}],["gzip",{"_index":46,"t":{"365":{"position":[[0,4]]}}}],["h.264",{"_index":142,"t":{"1167":{"position":[[0,5]]}}}],["h.265",{"_index":154,"t":{"1213":{"position":[[0,5]]}}}],["h.266",{"_index":153,"t":{"1211":{"position":[[0,5]]}}}],["hdr10plus_tool",{"_index":134,"t":{"1103":{"position":[[0,14]]}}}],["heic",{"_index":94,"t":{"795":{"position":[[0,4]]}}}],["hevc",{"_index":71,"t":{"552":{"position":[[4,4]]}}}],["high",{"_index":100,"t":{"831":{"position":[[0,4]]}}}],["hm",{"_index":61,"t":{"477":{"position":[[0,2]]}}}],["imag",{"_index":7,"t":{"62":{"position":[[9,5]]}}}],["improv",{"_index":160,"t":{"1273":{"position":[[27,13]]}}}],["intro",{"_index":28,"t":{"202":{"position":[[0,5]]}}}],["introduct",{"_index":20,"t":{"175":{"position":[[0,12]]}}}],["invers",{"_index":89,"t":{"743":{"position":[[0,7]]}}}],["jm",{"_index":69,"t":{"548":{"position":[[0,2]]}}}],["jpeg",{"_index":95,"t":{"799":{"position":[[0,4]]},"805":{"position":[[0,4]]},"857":{"position":[[0,4]]}}}],["kvazaar",{"_index":62,"t":{"483":{"position":[[0,7]]}}}],["later",{"_index":15,"t":{"138":{"position":[[21,5]]}}}],["lavish",{"_index":59,"t":{"445":{"position":[[8,6]]}}}],["load",{"_index":8,"t":{"62":{"position":[[15,4]]}}}],["lossi",{"_index":21,"t":{"175":{"position":[[16,5]]},"841":{"position":[[0,5]]}}}],["lossless",{"_index":23,"t":{"175":{"position":[[24,8]]},"821":{"position":[[0,8]]}}}],["matrix",{"_index":30,"t":{"206":{"position":[[0,6]]}}}],["mediacodec",{"_index":53,"t":{"416":{"position":[[0,10]]}}}],["mkvtoolnix",{"_index":145,"t":{"1181":{"position":[[0,10]]}}}],["mp3",{"_index":26,"t":{"185":{"position":[[0,3]]}}}],["mp4box",{"_index":136,"t":{"1117":{"position":[[0,6]]}}}],["new",{"_index":161,"t":{"1273":{"position":[[43,3]]}}}],["nmkoder",{"_index":133,"t":{"1097":{"position":[[0,7]]}}}],["nvenc",{"_index":54,"t":{"422":{"position":[[0,5]]}}}],["observ",{"_index":158,"t":{"1273":{"position":[[0,9]]}}}],["on",{"_index":13,"t":{"138":{"position":[[12,3]]}}}],["onlin",{"_index":9,"t":{"62":{"position":[[20,6]]}}}],["opu",{"_index":27,"t":{"187":{"position":[[0,4]]}}}],["overview",{"_index":118,"t":{"990":{"position":[[0,8]]}}}],["player",{"_index":139,"t":{"1135":{"position":[[6,7]]}}}],["png",{"_index":99,"t":{"827":{"position":[[0,3]]}}}],["polici",{"_index":112,"t":{"944":{"position":[[8,6]]}}}],["primari",{"_index":33,"t":{"236":{"position":[[6,9]]}}}],["privaci",{"_index":111,"t":{"944":{"position":[[0,7]]}}}],["prologu",{"_index":102,"t":{"845":{"position":[[0,8]]}}}],["prore",{"_index":146,"t":{"1189":{"position":[[0,6]]}}}],["psi",{"_index":67,"t":{"524":{"position":[[8,3]]}}}],["psnr",{"_index":107,"t":{"920":{"position":[[0,4]]}}}],["psy101",{"_index":57,"t":{"426":{"position":[[4,6]]}}}],["psychovisu",{"_index":103,"t":{"853":{"position":[[0,12]]}}}],["qoi",{"_index":98,"t":{"809":{"position":[[0,3]]}}}],["qsv",{"_index":52,"t":{"414":{"position":[[0,3]]}}}],["rang",{"_index":34,"t":{"262":{"position":[[6,5]]},"831":{"position":[[13,5]]}}}],["rav1",{"_index":63,"t":{"499":{"position":[[0,5]]}}}],["rav1at",{"_index":135,"t":{"1115":{"position":[[0,8]]},"1147":{"position":[[0,8]]}}}],["reduc",{"_index":6,"t":{"62":{"position":[[0,8]]}}}],["resourc",{"_index":114,"t":{"956":{"position":[[0,9]]}}}],["speex",{"_index":37,"t":{"300":{"position":[[0,5]]}}}],["spot",{"_index":121,"t":{"1000":{"position":[[0,8]]}}}],["srt",{"_index":115,"t":{"958":{"position":[[0,3]]}}}],["ssim",{"_index":110,"t":{"942":{"position":[[0,4]]}}}],["ssimulacra2",{"_index":109,"t":{"931":{"position":[[0,11]]}}}],["stabil",{"_index":91,"t":{"745":{"position":[[0,11]]}}}],["suit",{"_index":149,"t":{"1199":{"position":[[15,5]]}}}],["svt",{"_index":65,"t":{"515":{"position":[[0,3]]},"524":{"position":[[0,3]]},"552":{"position":[[0,3]]},"593":{"position":[[0,3]]},"1215":{"position":[[24,3]]},"1273":{"position":[[10,3]]}}}],["tar",{"_index":49,"t":{"390":{"position":[[0,3]]}}}],["telecin",{"_index":90,"t":{"743":{"position":[[8,8]]}}}],["term",{"_index":119,"t":{"998":{"position":[[0,5]]}}}],["terminolog",{"_index":105,"t":{"890":{"position":[[0,11]]}}}],["theora",{"_index":147,"t":{"1195":{"position":[[0,6]]}}}],["transfer",{"_index":35,"t":{"264":{"position":[[0,8]]}}}],["uavs3",{"_index":73,"t":{"612":{"position":[[0,6]]}}}],["un",{"_index":4,"t":{"35":{"position":[[14,2]]}}}],["us",{"_index":120,"t":{"998":{"position":[[9,3]]}}}],["ut",{"_index":148,"t":{"1199":{"position":[[0,2]]}}}],["uvg266",{"_index":74,"t":{"622":{"position":[[0,6]]}}}],["v2.1.0'",{"_index":159,"t":{"1273":{"position":[[18,8]]}}}],["vapoursynth",{"_index":86,"t":{"712":{"position":[[25,11]]},"757":{"position":[[0,11]]}}}],["vc",{"_index":150,"t":{"1201":{"position":[[0,2]]}}}],["video",{"_index":122,"t":{"1000":{"position":[[9,5]]},"1135":{"position":[[0,5]]},"1199":{"position":[[3,5]]}}}],["videotoolbox",{"_index":68,"t":{"540":{"position":[[0,12]]}}}],["vmaf",{"_index":117,"t":{"974":{"position":[[0,4]]}}}],["vorbi",{"_index":29,"t":{"204":{"position":[[0,6]]}}}],["vp8",{"_index":152,"t":{"1207":{"position":[[0,3]]}}}],["vp9",{"_index":66,"t":{"515":{"position":[[4,3]]},"1209":{"position":[[0,3]]}}}],["vpxenc",{"_index":72,"t":{"569":{"position":[[0,6]]}}}],["vtm",{"_index":70,"t":{"550":{"position":[[0,3]]}}}],["vvenc",{"_index":76,"t":{"659":{"position":[[0,5]]}}}],["wavpack",{"_index":41,"t":{"304":{"position":[[0,7]]}}}],["webp",{"_index":108,"t":{"922":{"position":[[0,4]]}}}],["webvtt",{"_index":116,"t":{"966":{"position":[[0,6]]}}}],["wiki",{"_index":12,"t":{"138":{"position":[[6,5]]}}}],["x264",{"_index":80,"t":{"673":{"position":[[0,4]]}}}],["x265",{"_index":75,"t":{"635":{"position":[[0,4]]}}}],["x266",{"_index":77,"t":{"667":{"position":[[0,4]]}}}],["xl",{"_index":104,"t":{"857":{"position":[[5,2]]}}}],["xpsnr",{"_index":113,"t":{"946":{"position":[[0,5]]}}}],["xz",{"_index":51,"t":{"406":{"position":[[0,2]]}}}],["year",{"_index":14,"t":{"138":{"position":[[16,4]]}}}],["yuview",{"_index":137,"t":{"1119":{"position":[[0,6]]}}}],["zip",{"_index":39,"t":{"302":{"position":[[2,3]]},"376":{"position":[[0,3]]}}}],["zpaq",{"_index":64,"t":{"513":{"position":[[0,4]]}}}],["zstandard",{"_index":50,"t":{"398":{"position":[[0,9]]}}}]],"pipeline":["stemmer"]}},{"documents":[{"i":4,"t":"Installing the Tools","u":"/blog/av1-encoding-for-dummies","h":"#installing-the-tools","p":2},{"i":6,"t":"Microsoft Windows","u":"/blog/av1-encoding-for-dummies","h":"#microsoft-windows","p":2},{"i":7,"t":"The GUI Way","u":"/blog/av1-encoding-for-dummies","h":"#the-gui-way","p":2},{"i":9,"t":"The WSL2 Way","u":"/blog/av1-encoding-for-dummies","h":"#the-wsl2-way","p":2},{"i":11,"t":"The Automated Way","u":"/blog/av1-encoding-for-dummies","h":"#the-automated-way","p":2},{"i":13,"t":"The Manual Way","u":"/blog/av1-encoding-for-dummies","h":"#the-manual-way","p":2},{"i":15,"t":"macOS","u":"/blog/av1-encoding-for-dummies","h":"#macos","p":2},{"i":17,"t":"Linux","u":"/blog/av1-encoding-for-dummies","h":"#linux","p":2},{"i":19,"t":"The GUI Way","u":"/blog/av1-encoding-for-dummies","h":"#the-gui-way-1","p":2},{"i":21,"t":"The TUI Way","u":"/blog/av1-encoding-for-dummies","h":"#the-tui-way","p":2},{"i":23,"t":"The Compiling Route","u":"/blog/av1-encoding-for-dummies","h":"#the-compiling-route","p":2},{"i":25,"t":"Arch","u":"/blog/av1-encoding-for-dummies","h":"#arch","p":2},{"i":27,"t":"Encoding","u":"/blog/av1-encoding-for-dummies","h":"#encoding","p":2},{"i":29,"t":"Merging Everything","u":"/blog/av1-encoding-for-dummies","h":"#merging-everything","p":2},{"i":31,"t":"Tips & Tricks","u":"/blog/av1-encoding-for-dummies","h":"#tips--tricks","p":2},{"i":33,"t":"Final Thoughts","u":"/blog/av1-encoding-for-dummies","h":"#final-thoughts","p":2},{"i":37,"t":"A Scenario","u":"/blog/embedding-the-un-embeddable","h":"#a-scenario","p":35},{"i":39,"t":"But First, a Quick Disclosure","u":"/blog/embedding-the-un-embeddable","h":"#but-first-a-quick-disclosure","p":35},{"i":41,"t":"How it Works","u":"/blog/embedding-the-un-embeddable","h":"#how-it-works","p":35},{"i":43,"t":"The Website's End","u":"/blog/embedding-the-un-embeddable","h":"#the-websites-end","p":35},{"i":45,"t":"Discord's End","u":"/blog/embedding-the-un-embeddable","h":"#discords-end","p":35},{"i":47,"t":"Strengths & Limitations","u":"/blog/embedding-the-un-embeddable","h":"#strengths--limitations","p":35},{"i":49,"t":"Differences between Sites","u":"/blog/embedding-the-un-embeddable","h":"#differences-between-sites","p":35},{"i":51,"t":"The Lore","u":"/blog/embedding-the-un-embeddable","h":"#the-lore","p":35},{"i":52,"t":"Dwayne","u":"/blog/embedding-the-un-embeddable","h":"#dwayne","p":35},{"i":54,"t":"Discovery","u":"/blog/embedding-the-un-embeddable","h":"#discovery","p":35},{"i":56,"t":"The Experiments & Interactive Site","u":"/blog/embedding-the-un-embeddable","h":"#the-experiments--interactive-site","p":35},{"i":58,"t":"Virality","u":"/blog/embedding-the-un-embeddable","h":"#virality","p":35},{"i":60,"t":"Closing","u":"/blog/embedding-the-un-embeddable","h":"#closing","p":35},{"i":64,"t":"Fire & Forget","u":"/blog/site-optimization","h":"#fire--forget","p":62},{"i":66,"t":"Massive Improvement","u":"/blog/site-optimization","h":"#massive-improvement","p":62},{"i":68,"t":"Lazy Loading","u":"/blog/site-optimization","h":"#lazy-loading","p":62},{"i":70,"t":"New Codecs","u":"/blog/site-optimization","h":"#new-codecs","p":62},{"i":72,"t":"Fallbacks","u":"/blog/site-optimization","h":"#fallbacks","p":62},{"i":74,"t":"Compression Efficacy","u":"/blog/site-optimization","h":"#compression-efficacy","p":62},{"i":76,"t":"Responsive Images","u":"/blog/site-optimization","h":"#responsive-images","p":62},{"i":80,"t":"Introduction","u":"/blog/av1-for-dummies","h":"#introduction","p":78},{"i":82,"t":"Why AV1?","u":"/blog/av1-for-dummies","h":"#why-av1","p":78},{"i":84,"t":"Do's & Don'ts","u":"/blog/av1-for-dummies","h":"#dos--donts","p":78},{"i":86,"t":"Tools","u":"/blog/av1-for-dummies","h":"#tools","p":78},{"i":88,"t":"GUI","u":"/blog/av1-for-dummies","h":"#gui","p":78},{"i":90,"t":"CLI","u":"/blog/av1-for-dummies","h":"#cli","p":78},{"i":92,"t":"Conclusion","u":"/blog/av1-for-dummies","h":"#conclusion","p":78},{"i":94,"t":"Encoders","u":"/blog/av1-for-dummies","h":"#encoders","p":78},{"i":96,"t":"SVT-AV1","u":"/blog/av1-for-dummies","h":"#svt-av1","p":78},{"i":98,"t":"rav1e","u":"/blog/av1-for-dummies","h":"#rav1e","p":78},{"i":100,"t":"aomenc (libaom)","u":"/blog/av1-for-dummies","h":"#aomenc-libaom","p":78},{"i":102,"t":"SVT-AV1-PSY","u":"/blog/av1-for-dummies","h":"#svt-av1-psy","p":78},{"i":104,"t":"Conclusion","u":"/blog/av1-for-dummies","h":"#conclusion-1","p":78},{"i":106,"t":"Final Conclusion","u":"/blog/av1-for-dummies","h":"#final-conclusion","p":78},{"i":110,"t":"Format Breakdown","u":"/docs/audio/AAC","h":"#format-breakdown","p":108},{"i":112,"t":"AAC-LC","u":"/docs/audio/AAC","h":"#aac-lc","p":108},{"i":114,"t":"AAC-LD & AAC-ELD","u":"/docs/audio/AAC","h":"#aac-ld--aac-eld","p":108},{"i":116,"t":"HE-AAC","u":"/docs/audio/AAC","h":"#he-aac","p":108},{"i":118,"t":"HE-AACv2","u":"/docs/audio/AAC","h":"#he-aacv2","p":108},{"i":120,"t":"xHE-AAC","u":"/docs/audio/AAC","h":"#xhe-aac","p":108},{"i":122,"t":"Encoders","u":"/docs/audio/AAC","h":"#encoders","p":108},{"i":124,"t":"Fraunhofer FDK AAC","u":"/docs/audio/AAC","h":"#fraunhofer-fdk-aac","p":108},{"i":126,"t":"Core Audio","u":"/docs/audio/AAC","h":"#core-audio","p":108},{"i":128,"t":"FFmpeg AAC","u":"/docs/audio/AAC","h":"#ffmpeg-aac","p":108},{"i":130,"t":"FAAC","u":"/docs/audio/AAC","h":"#faac","p":108},{"i":132,"t":"Nero AAC","u":"/docs/audio/AAC","h":"#nero-aac","p":108},{"i":134,"t":"Exhale","u":"/docs/audio/AAC","h":"#exhale","p":108},{"i":136,"t":"Conclusion","u":"/docs/audio/AAC","h":"#conclusion","p":108},{"i":140,"t":"Stats","u":"/blog/codec-wiki-one-year-later","h":"#stats","p":138},{"i":142,"t":"Plausible","u":"/blog/codec-wiki-one-year-later","h":"#plausible","p":138},{"i":144,"t":"Google Search Console","u":"/blog/codec-wiki-one-year-later","h":"#google-search-console","p":138},{"i":146,"t":"A Brief History","u":"/blog/codec-wiki-one-year-later","h":"#a-brief-history","p":138},{"i":148,"t":"Bumps in the Road","u":"/blog/codec-wiki-one-year-later","h":"#bumps-in-the-road","p":138},{"i":150,"t":"Closing Statement","u":"/blog/codec-wiki-one-year-later","h":"#closing-statement","p":138},{"i":156,"t":"Format Overview","u":"/docs/audio/Dolby","h":"#format-overview","p":154},{"i":157,"t":"AC-3","u":"/docs/audio/Dolby","h":"#ac-3","p":154},{"i":159,"t":"E-AC-3","u":"/docs/audio/Dolby","h":"#e-ac-3","p":154},{"i":161,"t":"TrueHD","u":"/docs/audio/Dolby","h":"#truehd","p":154},{"i":163,"t":"AC-4","u":"/docs/audio/Dolby","h":"#ac-4","p":154},{"i":165,"t":"Atmos","u":"/docs/audio/Dolby","h":"#atmos","p":154},{"i":169,"t":"Software support","u":"/docs/audio/FLAC","h":"#software-support","p":167},{"i":171,"t":"WAV to FLAC using FFmpeg:","u":"/docs/audio/FLAC","h":"#wav-to-flac-using-ffmpeg","p":167},{"i":173,"t":"WAV to FLAC using FLAC command-line tool:","u":"/docs/audio/FLAC","h":"#wav-to-flac-using-flac-command-line-tool","p":167},{"i":177,"t":"Sampling & the Nyquist Frequency","u":"/docs/audio/intro","h":"#sampling--the-nyquist-frequency","p":175},{"i":179,"t":"Lossless Audio Compression","u":"/docs/audio/intro","h":"#lossless-audio-compression","p":175},{"i":181,"t":"Lossy Audio Compression","u":"/docs/audio/intro","h":"#lossy-audio-compression","p":175},{"i":183,"t":"Conclusion","u":"/docs/audio/intro","h":"#conclusion","p":175},{"i":189,"t":"Format Breakdown","u":"/docs/audio/Opus","h":"#format-breakdown","p":187},{"i":191,"t":"SILK","u":"/docs/audio/Opus","h":"#silk","p":187},{"i":193,"t":"CELT","u":"/docs/audio/Opus","h":"#celt","p":187},{"i":195,"t":"Encoders","u":"/docs/audio/Opus","h":"#encoders","p":187},{"i":196,"t":"Opusenc","u":"/docs/audio/Opus","h":"#opusenc","p":187},{"i":198,"t":"FFopus","u":"/docs/audio/Opus","h":"#ffopus","p":187},{"i":200,"t":"vac-enc","u":"/docs/audio/Opus","h":"#vac-enc","p":187},{"i":208,"t":"0: Identity","u":"/docs/colorimetry/matrix","h":"#0-identity","p":206},{"i":210,"t":"1: BT.709","u":"/docs/colorimetry/matrix","h":"#1-bt709","p":206},{"i":212,"t":"2: Unspecified","u":"/docs/colorimetry/matrix","h":"#2-unspecified","p":206},{"i":214,"t":"4: BT.470M","u":"/docs/colorimetry/matrix","h":"#4-bt470m","p":206},{"i":216,"t":"5: BT.470BG","u":"/docs/colorimetry/matrix","h":"#5-bt470bg","p":206},{"i":218,"t":"6: SMPTE 170M","u":"/docs/colorimetry/matrix","h":"#6-smpte-170m","p":206},{"i":220,"t":"7: SMPTE 240M","u":"/docs/colorimetry/matrix","h":"#7-smpte-240m","p":206},{"i":222,"t":"8: YCgCo","u":"/docs/colorimetry/matrix","h":"#8-ycgco","p":206},{"i":224,"t":"9: BT.2020 Non-Constant Luminance","u":"/docs/colorimetry/matrix","h":"#9-bt2020-non-constant-luminance","p":206},{"i":226,"t":"10: BT.2020 Constant Luminance","u":"/docs/colorimetry/matrix","h":"#10-bt2020-constant-luminance","p":206},{"i":228,"t":"11: SMPTE 2085","u":"/docs/colorimetry/matrix","h":"#11-smpte-2085","p":206},{"i":230,"t":"12: Chromaticity-Derived Non-Constant Luminance","u":"/docs/colorimetry/matrix","h":"#12-chromaticity-derived-non-constant-luminance","p":206},{"i":232,"t":"13: Chromaticity-Derived Constant Luminance","u":"/docs/colorimetry/matrix","h":"#13-chromaticity-derived-constant-luminance","p":206},{"i":234,"t":"14: ICtCp","u":"/docs/colorimetry/matrix","h":"#14-ictcp","p":206},{"i":238,"t":"1: BT.709","u":"/docs/colorimetry/primaries","h":"#1-bt709","p":236},{"i":240,"t":"2: Unspecified","u":"/docs/colorimetry/primaries","h":"#2-unspecified","p":236},{"i":242,"t":"4: BT.470M","u":"/docs/colorimetry/primaries","h":"#4-bt470m","p":236},{"i":244,"t":"5: BT.470BG","u":"/docs/colorimetry/primaries","h":"#5-bt470bg","p":236},{"i":246,"t":"6: SMPTE 170M","u":"/docs/colorimetry/primaries","h":"#6-smpte-170m","p":236},{"i":248,"t":"7: SMPTE 240M","u":"/docs/colorimetry/primaries","h":"#7-smpte-240m","p":236},{"i":250,"t":"8: Film","u":"/docs/colorimetry/primaries","h":"#8-film","p":236},{"i":252,"t":"9: BT.2020","u":"/docs/colorimetry/primaries","h":"#9-bt2020","p":236},{"i":254,"t":"10: SMPTE 428","u":"/docs/colorimetry/primaries","h":"#10-smpte-428","p":236},{"i":256,"t":"11: DCI-P3","u":"/docs/colorimetry/primaries","h":"#11-dci-p3","p":236},{"i":258,"t":"12: Display-P3","u":"/docs/colorimetry/primaries","h":"#12-display-p3","p":236},{"i":260,"t":"22: EBU Tech 3213","u":"/docs/colorimetry/primaries","h":"#22-ebu-tech-3213","p":236},{"i":266,"t":"1: BT.1886","u":"/docs/colorimetry/transfer","h":"#1-bt1886","p":264},{"i":268,"t":"2: Unspecified","u":"/docs/colorimetry/transfer","h":"#2-unspecified","p":264},{"i":270,"t":"4: BT.470M","u":"/docs/colorimetry/transfer","h":"#4-bt470m","p":264},{"i":272,"t":"5: BT.470BG","u":"/docs/colorimetry/transfer","h":"#5-bt470bg","p":264},{"i":274,"t":"6: SMPTE 170M","u":"/docs/colorimetry/transfer","h":"#6-smpte-170m","p":264},{"i":276,"t":"7: SMPTE 240M","u":"/docs/colorimetry/transfer","h":"#7-smpte-240m","p":264},{"i":278,"t":"8: Linear","u":"/docs/colorimetry/transfer","h":"#8-linear","p":264},{"i":280,"t":"9: Logarithmic 100","u":"/docs/colorimetry/transfer","h":"#9-logarithmic-100","p":264},{"i":282,"t":"10: Logarithmic 316","u":"/docs/colorimetry/transfer","h":"#10-logarithmic-316","p":264},{"i":284,"t":"11: XVYCC","u":"/docs/colorimetry/transfer","h":"#11-xvycc","p":264},{"i":286,"t":"12: BT.1361E","u":"/docs/colorimetry/transfer","h":"#12-bt1361e","p":264},{"i":288,"t":"13: sRGB","u":"/docs/colorimetry/transfer","h":"#13-srgb","p":264},{"i":290,"t":"14: BT.2020 10-bit","u":"/docs/colorimetry/transfer","h":"#14-bt2020-10-bit","p":264},{"i":292,"t":"15: BT.2020 12-bit","u":"/docs/colorimetry/transfer","h":"#15-bt2020-12-bit","p":264},{"i":294,"t":"16: PQ aka SMPTE 2084","u":"/docs/colorimetry/transfer","h":"#16-pq-aka-smpte-2084","p":264},{"i":296,"t":"17: SMPTE 428","u":"/docs/colorimetry/transfer","h":"#17-smpte-428","p":264},{"i":298,"t":"18: HLG aka Hybrid Log-Gamma","u":"/docs/colorimetry/transfer","h":"#18-hlg-aka-hybrid-log-gamma","p":264},{"i":306,"t":"Features","u":"/docs/audio/WavPack","h":"#features","p":304},{"i":308,"t":"Hybrid Mode","u":"/docs/audio/WavPack","h":"#hybrid-mode","p":304},{"i":310,"t":"Format Breakdown","u":"/docs/audio/WavPack","h":"#format-breakdown","p":304},{"i":312,"t":"Encoders","u":"/docs/audio/WavPack","h":"#encoders","p":304},{"i":313,"t":"wavpack","u":"/docs/audio/WavPack","h":"#wavpack-1","p":304},{"i":315,"t":"FFmpeg","u":"/docs/audio/WavPack","h":"#ffmpeg","p":304},{"i":317,"t":"Adoption issues","u":"/docs/audio/WavPack","h":"#adoption-issues","p":304},{"i":319,"t":"Notes","u":"/docs/audio/WavPack","h":"#notes","p":304},{"i":325,"t":"Before You Contribute","u":"/docs/contribution-guide","h":"#before-you-contribute","p":323},{"i":327,"t":"Connect With Us","u":"/docs/contribution-guide","h":"#connect-with-us","p":323},{"i":329,"t":"Clone & Push Instructions","u":"/docs/contribution-guide","h":"#clone--push-instructions","p":323},{"i":331,"t":"Website","u":"/docs/contribution-guide","h":"#website","p":323},{"i":333,"t":"Installation","u":"/docs/contribution-guide","h":"#installation","p":323},{"i":335,"t":"Local Development","u":"/docs/contribution-guide","h":"#local-development","p":323},{"i":337,"t":"Build","u":"/docs/contribution-guide","h":"#build","p":323},{"i":339,"t":"Deployment","u":"/docs/contribution-guide","h":"#deployment","p":323},{"i":343,"t":"Color Models","u":"/docs/colorimetry/format","h":"#color-models","p":341},{"i":345,"t":"RGB","u":"/docs/colorimetry/format","h":"#rgb","p":341},{"i":347,"t":"YUV","u":"/docs/colorimetry/format","h":"#yuv","p":341},{"i":349,"t":"Component order","u":"/docs/colorimetry/format","h":"#component-order","p":341},{"i":351,"t":"Bit depth","u":"/docs/colorimetry/format","h":"#bit-depth","p":341},{"i":353,"t":"Packed vs planar","u":"/docs/colorimetry/format","h":"#packed-vs-planar","p":341},{"i":355,"t":"Endianness","u":"/docs/colorimetry/format","h":"#endianness","p":341},{"i":357,"t":"Chroma subsampling","u":"/docs/colorimetry/format","h":"#chroma-subsampling","p":341},{"i":359,"t":"Common formats","u":"/docs/colorimetry/format","h":"#common-formats","p":341},{"i":361,"t":"References","u":"/docs/colorimetry/format","h":"#references","p":341},{"i":363,"t":"Footnotes","u":"/docs/colorimetry/format","h":"#footnote-label","p":341},{"i":367,"t":"Format Breakdown","u":"/docs/data/gzip","h":"#format-breakdown","p":365},{"i":369,"t":"History","u":"/docs/data/gzip","h":"#history","p":365},{"i":371,"t":"Encoding","u":"/docs/data/gzip","h":"#encoding","p":365},{"i":372,"t":"Linux & macOS","u":"/docs/data/gzip","h":"#linux--macos","p":365},{"i":374,"t":"Windows","u":"/docs/data/gzip","h":"#windows","p":365},{"i":378,"t":"Format Breakdown","u":"/docs/data/zip","h":"#format-breakdown","p":376},{"i":380,"t":"History","u":"/docs/data/zip","h":"#history","p":376},{"i":382,"t":"Encoding","u":"/docs/data/zip","h":"#encoding","p":376},{"i":384,"t":"Conclusion","u":"/docs/data/zip","h":"#conclusion","p":376},{"i":392,"t":"Usage","u":"/docs/data/tar","h":"#usage","p":390},{"i":394,"t":"Create a tar archive","u":"/docs/data/tar","h":"#create-a-tar-archive","p":390},{"i":396,"t":"Extract a tar archive","u":"/docs/data/tar","h":"#extract-a-tar-archive","p":390},{"i":400,"t":"Usage","u":"/docs/data/zstd","h":"#usage","p":398},{"i":402,"t":"Compress a file","u":"/docs/data/zstd","h":"#compress-a-file","p":398},{"i":404,"t":"Decompress a file","u":"/docs/data/zstd","h":"#decompress-a-file","p":398},{"i":408,"t":"Usage","u":"/docs/data/xz","h":"#usage","p":406},{"i":410,"t":"Compression","u":"/docs/data/xz","h":"#compression","p":406},{"i":412,"t":"Decompression","u":"/docs/data/xz","h":"#decompression","p":406},{"i":418,"t":"Usage","u":"/docs/encoders_hw/mediacodec","h":"#usage","p":416},{"i":420,"t":"FFmpeg","u":"/docs/encoders_hw/mediacodec","h":"#ffmpeg","p":416},{"i":428,"t":"FFmpeg","u":"/docs/encoders/aom-psy101","h":"#ffmpeg","p":426},{"i":430,"t":"Installation","u":"/docs/encoders/aom-psy101","h":"#installation","p":426},{"i":434,"t":"Rumors","u":"/docs/encoders/AVM","h":"#rumors","p":432},{"i":436,"t":"Installation","u":"/docs/encoders/AVM","h":"#installation","p":432},{"i":437,"t":"Arch Linux","u":"/docs/encoders/AVM","h":"#arch-linux","p":432},{"i":439,"t":"Compiling","u":"/docs/encoders/AVM","h":"#compiling","p":432},{"i":441,"t":"Usage","u":"/docs/encoders/AVM","h":"#usage","p":432},{"i":443,"t":"aomdec","u":"/docs/encoders/AVM","h":"#aomdec","p":432},{"i":447,"t":"FFmpeg","u":"/docs/encoders/aom-av1-lavish","h":"#ffmpeg","p":445},{"i":449,"t":"Installation","u":"/docs/encoders/aom-av1-lavish","h":"#installation","p":445},{"i":451,"t":"Usage","u":"/docs/encoders/aom-av1-lavish","h":"#usage","p":445},{"i":452,"t":"AV1 Encoding","u":"/docs/encoders/aom-av1-lavish","h":"#av1-encoding","p":445},{"i":454,"t":"AVIF Encoding","u":"/docs/encoders/aom-av1-lavish","h":"#avif-encoding","p":445},{"i":456,"t":"Recommendations","u":"/docs/encoders/aom-av1-lavish","h":"#recommendations","p":445},{"i":458,"t":"Tips & Tricks","u":"/docs/encoders/aom-av1-lavish","h":"#tips--tricks","p":445},{"i":462,"t":"FFmpeg","u":"/docs/encoders/aomenc","h":"#ffmpeg","p":460},{"i":464,"t":"Supported Color Space","u":"/docs/encoders/aomenc","h":"#supported-color-space","p":460},{"i":466,"t":"Installation","u":"/docs/encoders/aomenc","h":"#installation","p":460},{"i":468,"t":"Usage","u":"/docs/encoders/aomenc","h":"#usage","p":460},{"i":469,"t":"AV1 Encoding","u":"/docs/encoders/aomenc","h":"#av1-encoding","p":460},{"i":471,"t":"AVIF Encoding","u":"/docs/encoders/aomenc","h":"#avif-encoding","p":460},{"i":473,"t":"Recommendations","u":"/docs/encoders/aomenc","h":"#recommendations","p":460},{"i":475,"t":"Community Forks","u":"/docs/encoders/aomenc","h":"#community-forks","p":460},{"i":479,"t":"Installation","u":"/docs/encoders/HM","h":"#installation","p":477},{"i":481,"t":"Usage","u":"/docs/encoders/HM","h":"#usage","p":477},{"i":485,"t":"FFmpeg","u":"/docs/encoders/Kvazaar","h":"#ffmpeg","p":483},{"i":487,"t":"Supported Color Space","u":"/docs/encoders/Kvazaar","h":"#supported-color-space","p":483},{"i":489,"t":"Installation","u":"/docs/encoders/Kvazaar","h":"#installation","p":483},{"i":491,"t":"Autotools","u":"/docs/encoders/Kvazaar","h":"#autotools","p":483},{"i":493,"t":"CMake (10-bit support)","u":"/docs/encoders/Kvazaar","h":"#cmake-10-bit-support","p":483},{"i":495,"t":"MSYS2","u":"/docs/encoders/Kvazaar","h":"#msys2","p":483},{"i":497,"t":"Usage","u":"/docs/encoders/Kvazaar","h":"#usage","p":483},{"i":501,"t":"FFmpeg","u":"/docs/encoders/rav1e","h":"#ffmpeg","p":499},{"i":503,"t":"Supported Color Space","u":"/docs/encoders/rav1e","h":"#supported-color-space","p":499},{"i":505,"t":"Installation","u":"/docs/encoders/rav1e","h":"#installation","p":499},{"i":507,"t":"Patched Installation with HDR10+ support","u":"/docs/encoders/rav1e","h":"#patched-installation-with-hdr10-support","p":499},{"i":509,"t":"Usage","u":"/docs/encoders/rav1e","h":"#usage","p":499},{"i":511,"t":"Tips & Tricks","u":"/docs/encoders/rav1e","h":"#tips--tricks","p":499},{"i":517,"t":"FFmpeg","u":"/docs/encoders/SVT-VP9","h":"#ffmpeg","p":515},{"i":519,"t":"Supported Color Space","u":"/docs/encoders/SVT-VP9","h":"#supported-color-space","p":515},{"i":521,"t":"Usage","u":"/docs/encoders/SVT-VP9","h":"#usage","p":515},{"i":522,"t":"Standalone","u":"/docs/encoders/SVT-VP9","h":"#standalone","p":515},{"i":526,"t":"Micro-Release Framework","u":"/docs/encoders/SVT-AV1-PSY","h":"#micro-release-framework","p":524},{"i":528,"t":"Feature Additions","u":"/docs/encoders/SVT-AV1-PSY","h":"#feature-additions","p":524},{"i":530,"t":"Modified Defaults","u":"/docs/encoders/SVT-AV1-PSY","h":"#modified-defaults","p":524},{"i":532,"t":"Other Changes","u":"/docs/encoders/SVT-AV1-PSY","h":"#other-changes","p":524},{"i":534,"t":"Installation","u":"/docs/encoders/SVT-AV1-PSY","h":"#installation","p":524},{"i":536,"t":"Projects Featuring SVT-AV1-PSY","u":"/docs/encoders/SVT-AV1-PSY","h":"#projects-featuring-svt-av1-psy","p":524},{"i":538,"t":"License","u":"/docs/encoders/SVT-AV1-PSY","h":"#license","p":524},{"i":542,"t":"Usage","u":"/docs/encoders_hw/videotoolbox","h":"#usage","p":540},{"i":544,"t":"FFmpeg","u":"/docs/encoders_hw/videotoolbox","h":"#ffmpeg","p":540},{"i":546,"t":"Handbrake","u":"/docs/encoders_hw/videotoolbox","h":"#handbrake","p":540},{"i":554,"t":"FFmpeg","u":"/docs/encoders/SVT-HEVC","h":"#ffmpeg","p":552},{"i":556,"t":"Supported Color Space","u":"/docs/encoders/SVT-HEVC","h":"#supported-color-space","p":552},{"i":558,"t":"Installation","u":"/docs/encoders/SVT-HEVC","h":"#installation","p":552},{"i":560,"t":"Usage","u":"/docs/encoders/SVT-HEVC","h":"#usage","p":552},{"i":561,"t":"Normal usage","u":"/docs/encoders/SVT-HEVC","h":"#normal-usage","p":552},{"i":563,"t":"Encoding HDR","u":"/docs/encoders/SVT-HEVC","h":"#encoding-hdr","p":552},{"i":565,"t":"Encoding with Dolby Vision","u":"/docs/encoders/SVT-HEVC","h":"#encoding-with-dolby-vision","p":552},{"i":567,"t":"List of all configuration parameters","u":"/docs/encoders/SVT-HEVC","h":"#list-of-all-configuration-parameters","p":552},{"i":571,"t":"FFmpeg","u":"/docs/encoders/vpxenc","h":"#ffmpeg","p":569},{"i":573,"t":"Supported Color Space","u":"/docs/encoders/vpxenc","h":"#supported-color-space","p":569},{"i":575,"t":"Installing (Binary)","u":"/docs/encoders/vpxenc","h":"#installing-binary","p":569},{"i":577,"t":"Compiling (Windows/MacOS/Linux)","u":"/docs/encoders/vpxenc","h":"#compiling-windowsmacoslinux","p":569},{"i":579,"t":"Cloning","u":"/docs/encoders/vpxenc","h":"#cloning","p":569},{"i":581,"t":"./configure file","u":"/docs/encoders/vpxenc","h":"#configure-file","p":569},{"i":583,"t":"Other ./configure options","u":"/docs/encoders/vpxenc","h":"#other-configure-options","p":569},{"i":585,"t":"Running GNU make","u":"/docs/encoders/vpxenc","h":"#running-gnu-make","p":569},{"i":587,"t":"VP8","u":"/docs/encoders/vpxenc","h":"#vp8","p":569},{"i":589,"t":"VP9","u":"/docs/encoders/vpxenc","h":"#vp9","p":569},{"i":591,"t":"Encoding","u":"/docs/encoders/vpxenc","h":"#encoding","p":569},{"i":595,"t":"FFmpeg","u":"/docs/encoders/SVT-AV1","h":"#ffmpeg","p":593},{"i":597,"t":"Supported Color Space","u":"/docs/encoders/SVT-AV1","h":"#supported-color-space","p":593},{"i":599,"t":"Installation","u":"/docs/encoders/SVT-AV1","h":"#installation","p":593},{"i":601,"t":"Encoding","u":"/docs/encoders/SVT-AV1","h":"#encoding","p":593},{"i":602,"t":"Strengths","u":"/docs/encoders/SVT-AV1","h":"#strengths","p":593},{"i":604,"t":"Weaknesses","u":"/docs/encoders/SVT-AV1","h":"#weaknesses","p":593},{"i":606,"t":"Encoder Optimization","u":"/docs/encoders/SVT-AV1","h":"#encoder-optimization","p":593},{"i":608,"t":"Community Forks","u":"/docs/encoders/SVT-AV1","h":"#community-forks","p":593},{"i":610,"t":"SVT-AV1-PSY","u":"/docs/encoders/SVT-AV1","h":"#svt-av1-psy","p":593},{"i":614,"t":"Installation","u":"/docs/encoders/uavs3e","h":"#installation","p":612},{"i":616,"t":"10-bit Support","u":"/docs/encoders/uavs3e","h":"#10-bit-support","p":612},{"i":618,"t":"Usage","u":"/docs/encoders/uavs3e","h":"#usage","p":612},{"i":620,"t":"Notes","u":"/docs/encoders/uavs3e","h":"#notes","p":612},{"i":624,"t":"Installation","u":"/docs/encoders/uvg266","h":"#installation","p":622},{"i":625,"t":"Arch Linux","u":"/docs/encoders/uvg266","h":"#arch-linux","p":622},{"i":627,"t":"Compiling","u":"/docs/encoders/uvg266","h":"#compiling","p":622},{"i":629,"t":"10-bit Support","u":"/docs/encoders/uvg266","h":"#10-bit-support","p":622},{"i":631,"t":"Usage","u":"/docs/encoders/uvg266","h":"#usage","p":622},{"i":633,"t":"Troubleshooting","u":"/docs/encoders/uvg266","h":"#troubleshooting","p":622},{"i":637,"t":"FFmpeg","u":"/docs/encoders/x265","h":"#ffmpeg","p":635},{"i":639,"t":"Installation","u":"/docs/encoders/x265","h":"#installation","p":635},{"i":641,"t":"Parameters","u":"/docs/encoders/x265","h":"#parameters","p":635},{"i":643,"t":"Preset","u":"/docs/encoders/x265","h":"#preset","p":635},{"i":645,"t":"CRF","u":"/docs/encoders/x265","h":"#crf","p":635},{"i":647,"t":"bframes","u":"/docs/encoders/x265","h":"#bframes","p":635},{"i":649,"t":"SAO","u":"/docs/encoders/x265","h":"#sao","p":635},{"i":651,"t":"Deblock","u":"/docs/encoders/x265","h":"#deblock","p":635},{"i":653,"t":"Psy-RD","u":"/docs/encoders/x265","h":"#psy-rd","p":635},{"i":655,"t":"Adaptive Quantization","u":"/docs/encoders/x265","h":"#adaptive-quantization","p":635},{"i":657,"t":"CU-Tree","u":"/docs/encoders/x265","h":"#cu-tree","p":635},{"i":661,"t":"Installation","u":"/docs/encoders/VVenC","h":"#installation","p":659},{"i":663,"t":"FFmpeg Integration","u":"/docs/encoders/VVenC","h":"#ffmpeg-integration","p":659},{"i":665,"t":"Usage","u":"/docs/encoders/VVenC","h":"#usage","p":659},{"i":675,"t":"FFmpeg","u":"/docs/encoders/x264","h":"#ffmpeg","p":673},{"i":677,"t":"Supported Color Space","u":"/docs/encoders/x264","h":"#supported-color-space","p":673},{"i":679,"t":"Installation","u":"/docs/encoders/x264","h":"#installation","p":673},{"i":681,"t":"Usage","u":"/docs/encoders/x264","h":"#usage","p":673},{"i":683,"t":"Recommendations","u":"/docs/encoders/x264","h":"#recommendations","p":673},{"i":685,"t":"Preset","u":"/docs/encoders/x264","h":"#preset","p":673},{"i":687,"t":"Threads","u":"/docs/encoders/x264","h":"#threads","p":673},{"i":689,"t":"Open GOP","u":"/docs/encoders/x264","h":"#open-gop","p":673},{"i":691,"t":"AQ Mode","u":"/docs/encoders/x264","h":"#aq-mode","p":673},{"i":693,"t":"Reference Frames","u":"/docs/encoders/x264","h":"#reference-frames","p":673},{"i":695,"t":"MB Tree","u":"/docs/encoders/x264","h":"#mb-tree","p":673},{"i":697,"t":"Lossless Encoding","u":"/docs/encoders/x264","h":"#lossless-encoding","p":673},{"i":700,"t":"Why are you doing this?","u":"/docs/FAQ","h":"#why-are-you-doing-this","p":699},{"i":702,"t":"But alternatives exist. Why not contribute there?","u":"/docs/FAQ","h":"#but-alternatives-exist-why-not-contribute-there","p":699},{"i":704,"t":"How do I get started as a contributor?","u":"/docs/FAQ","h":"#how-do-i-get-started-as-a-contributor","p":699},{"i":706,"t":"Why \"Codec Wiki\"?","u":"/docs/FAQ","h":"#why-codec-wiki","p":699},{"i":713,"t":"Intro to Filters","u":"/docs/filtering/basics","h":"#intro-to-filters","p":712},{"i":715,"t":"Filter Order","u":"/docs/filtering/basics","h":"#filter-order","p":712},{"i":717,"t":"Bit Depth and Colorimetry","u":"/docs/filtering/basics","h":"#bit-depth-and-colorimetry","p":712},{"i":719,"t":"Cropping","u":"/docs/filtering/basics","h":"#cropping","p":712},{"i":721,"t":"Resizing","u":"/docs/filtering/basics","h":"#resizing","p":712},{"i":723,"t":"Trimming","u":"/docs/filtering/basics","h":"#trimming","p":712},{"i":729,"t":"Overview","u":"/docs/filtering/denoise","h":"#overview","p":727},{"i":731,"t":"hqdn3d","u":"/docs/filtering/denoise","h":"#hqdn3d","p":727},{"i":733,"t":"Usage","u":"/docs/filtering/denoise","h":"#usage","p":727},{"i":735,"t":"nlmeans","u":"/docs/filtering/denoise","h":"#nlmeans","p":727},{"i":737,"t":"Usage","u":"/docs/filtering/denoise","h":"#usage-1","p":727},{"i":739,"t":"Parameters","u":"/docs/filtering/denoise","h":"#parameters","p":727},{"i":741,"t":"Notes","u":"/docs/filtering/denoise","h":"","p":727},{"i":747,"t":"Overview","u":"/docs/filtering/stabilizing","h":"#overview","p":745},{"i":749,"t":"Usage","u":"/docs/filtering/stabilizing","h":"#usage","p":745},{"i":751,"t":"vidstabdetect Parameters","u":"/docs/filtering/stabilizing","h":"#vidstabdetect-parameters","p":745},{"i":753,"t":"vidstabtransform Parameters","u":"/docs/filtering/stabilizing","h":"#vidstabtransform-parameters","p":745},{"i":755,"t":"Notes","u":"/docs/filtering/stabilizing","h":"#notes","p":745},{"i":759,"t":"Introduction","u":"/docs/filtering/vapoursynth","h":"","p":757},{"i":761,"t":"Installation","u":"/docs/filtering/vapoursynth","h":"#installation","p":757},{"i":762,"t":"Microsoft Windows","u":"/docs/filtering/vapoursynth","h":"#microsoft-windows","p":757},{"i":764,"t":"Arch Linux","u":"/docs/filtering/vapoursynth","h":"#arch-linux","p":757},{"i":766,"t":"Other Linux","u":"/docs/filtering/vapoursynth","h":"#other-linux","p":757},{"i":768,"t":"Previewing","u":"/docs/filtering/vapoursynth","h":"#previewing","p":757},{"i":770,"t":"Output","u":"/docs/filtering/vapoursynth","h":"#output","p":757},{"i":772,"t":"Source Filters","u":"/docs/filtering/vapoursynth","h":"#source-filters","p":757},{"i":774,"t":"LSmashSource","u":"/docs/filtering/vapoursynth","h":"#lsmashsource","p":757},{"i":776,"t":"ffms2","u":"/docs/filtering/vapoursynth","h":"#ffms2","p":757},{"i":778,"t":"BestSource","u":"/docs/filtering/vapoursynth","h":"#bestsource","p":757},{"i":782,"t":"Performance Checklist","u":"/docs/images/AVIF","h":"#performance-checklist","p":780},{"i":784,"t":"Format Breakdown","u":"/docs/images/AVIF","h":"#format-breakdown","p":780},{"i":785,"t":"Advantages","u":"/docs/images/AVIF","h":"#advantages","p":780},{"i":787,"t":"Limitations","u":"/docs/images/AVIF","h":"#limitations","p":780},{"i":789,"t":"Conclusion","u":"/docs/images/AVIF","h":"#conclusion","p":780},{"i":793,"t":"Performance Checklist","u":"/docs/images/GIF","h":"#performance-checklist","p":791},{"i":797,"t":"Performance Checklist","u":"/docs/images/HEIC","h":"#performance-checklist","p":795},{"i":801,"t":"Performance Checklist","u":"/docs/images/JPEG","h":"#performance-checklist","p":799},{"i":803,"t":"Compression","u":"/docs/images/JPEG","h":"#compression","p":799},{"i":811,"t":"Performance Checklist","u":"/docs/images/QOI","h":"#performance-checklist","p":809},{"i":813,"t":"Format Breakdown","u":"/docs/images/QOI","h":"#format-breakdown","p":809},{"i":815,"t":"Benchmarks","u":"/docs/images/QOI","h":"#benchmarks","p":809},{"i":817,"t":"Advantages","u":"/docs/images/QOI","h":"#advantages","p":809},{"i":819,"t":"Limitations","u":"/docs/images/QOI","h":"#limitations","p":809},{"i":823,"t":"Redundancy & Entropy","u":"/docs/introduction/lossless","h":"#redundancy--entropy","p":821},{"i":825,"t":"Techniques in Lossless Compression","u":"/docs/introduction/lossless","h":"#techniques-in-lossless-compression","p":821},{"i":829,"t":"Performance Checklist","u":"/docs/images/PNG","h":"#performance-checklist","p":827},{"i":833,"t":"HLG","u":"/docs/introduction/high-dynamic-range","h":"#hlg","p":831},{"i":835,"t":"HDR10","u":"/docs/introduction/high-dynamic-range","h":"#hdr10","p":831},{"i":837,"t":"HDR10+","u":"/docs/introduction/high-dynamic-range","h":"#hdr10-1","p":831},{"i":839,"t":"Dolby Vision","u":"/docs/introduction/high-dynamic-range","h":"#dolby-vision","p":831},{"i":843,"t":"How Lossy Compression Works","u":"/docs/introduction/lossy","h":"#how-lossy-compression-works","p":841},{"i":847,"t":"What This Isn't","u":"/docs/introduction/prologue","h":"#what-this-isnt","p":845},{"i":849,"t":"What is a Codec","u":"/docs/introduction/prologue","h":"#what-is-a-codec","p":845},{"i":851,"t":"What You Need","u":"/docs/introduction/prologue","h":"#what-you-need","p":845},{"i":855,"t":"Breakdown","u":"/docs/introduction/psychovisual","h":"#breakdown","p":853},{"i":859,"t":"Performance Checklist","u":"/docs/images/JXL","h":"#performance-checklist","p":857},{"i":861,"t":"Format Breakdown","u":"/docs/images/JXL","h":"#format-breakdown","p":857},{"i":863,"t":"Lossless Compression","u":"/docs/images/JXL","h":"#lossless-compression","p":857},{"i":865,"t":"Lossy Compression","u":"/docs/images/JXL","h":"#lossy-compression","p":857},{"i":867,"t":"Supported Bit Depth(s)","u":"/docs/images/JXL","h":"#supported-bit-depths","p":857},{"i":869,"t":"Progressive Decode","u":"/docs/images/JXL","h":"#progressive-decode","p":857},{"i":871,"t":"Lossless JPEG Re-compression","u":"/docs/images/JXL","h":"#lossless-jpeg-re-compression","p":857},{"i":873,"t":"Industry Support","u":"/docs/images/JXL","h":"#industry-support","p":857},{"i":875,"t":"Other Features","u":"/docs/images/JXL","h":"#other-features","p":857},{"i":877,"t":"Encoders","u":"/docs/images/JXL","h":"#encoders","p":857},{"i":879,"t":"libjxl","u":"/docs/images/JXL","h":"#libjxl","p":857},{"i":881,"t":"libjxl-tiny","u":"/docs/images/JXL","h":"#libjxl-tiny","p":857},{"i":883,"t":"Hydrium","u":"/docs/images/JXL","h":"#hydrium","p":857},{"i":885,"t":"zune-jpegxl","u":"/docs/images/JXL","h":"#zune-jpegxl","p":857},{"i":887,"t":"Decoders","u":"/docs/images/JXL","h":"#decoders","p":857},{"i":888,"t":"jxl-oxide","u":"/docs/images/JXL","h":"#jxl-oxide","p":857},{"i":892,"t":"Bitstream","u":"/docs/introduction/terminology","h":"#bitstream","p":890},{"i":894,"t":"Lossy / Lossless","u":"/docs/introduction/terminology","h":"#lossy--lossless","p":890},{"i":896,"t":"Elementary stream","u":"/docs/introduction/terminology","h":"#elementary-stream","p":890},{"i":898,"t":"Muxing","u":"/docs/introduction/terminology","h":"#muxing","p":890},{"i":900,"t":"Codec","u":"/docs/introduction/terminology","h":"#codec","p":890},{"i":902,"t":"Filter","u":"/docs/introduction/terminology","h":"#filter","p":890},{"i":904,"t":"Muxer/Demuxer","u":"/docs/introduction/terminology","h":"#muxerdemuxer","p":890},{"i":906,"t":"Bitstream filter","u":"/docs/introduction/terminology","h":"#bitstream-filter","p":890},{"i":908,"t":"Container","u":"/docs/introduction/terminology","h":"#container","p":890},{"i":910,"t":"Transcoding","u":"/docs/introduction/terminology","h":"#transcoding","p":890},{"i":912,"t":"RDO","u":"/docs/introduction/terminology","h":"#rdo","p":890},{"i":914,"t":"Perceputal / Psychovisual / Psychoacoustic","u":"/docs/introduction/terminology","h":"#perceputal--psychovisual--psychoacoustic","p":890},{"i":916,"t":"Discrete Cosine Transform (DCT)","u":"/docs/introduction/terminology","h":"#discrete-cosine-transform-dct","p":890},{"i":924,"t":"Encoding","u":"/docs/images/WebP","h":"#encoding","p":922},{"i":925,"t":"Using libwebp","u":"/docs/images/WebP","h":"#using-libwebp","p":922},{"i":927,"t":"decoding","u":"/docs/images/WebP","h":"#decoding","p":922},{"i":929,"t":"Performance Checklist","u":"/docs/images/WebP","h":"#performance-checklist","p":922},{"i":933,"t":"Installing","u":"/docs/metrics/SSIMULACRA2","h":"#installing","p":931},{"i":935,"t":"Running","u":"/docs/metrics/SSIMULACRA2","h":"#running","p":931},{"i":936,"t":"On Images","u":"/docs/metrics/SSIMULACRA2","h":"#on-images","p":931},{"i":938,"t":"On Videos","u":"/docs/metrics/SSIMULACRA2","h":"#on-videos","p":931},{"i":940,"t":"Scoring","u":"/docs/metrics/SSIMULACRA2","h":"#scoring","p":931},{"i":948,"t":"Installation","u":"/docs/metrics/XPSNR","h":"#installation","p":946},{"i":950,"t":"Usage","u":"/docs/metrics/XPSNR","h":"#usage","p":946},{"i":952,"t":"Notes","u":"/docs/metrics/XPSNR","h":"#notes","p":946},{"i":954,"t":"Comparing to SSIMULACRA 2","u":"/docs/metrics/XPSNR","h":"#comparing-to-ssimulacra-2","p":946},{"i":960,"t":"Format","u":"/docs/subtitles/SRT","h":"#format","p":958},{"i":962,"t":"Unoffical features","u":"/docs/subtitles/SRT","h":"#unoffical-features","p":958},{"i":964,"t":"Example","u":"/docs/subtitles/SRT","h":"#example","p":958},{"i":968,"t":"Structure","u":"/docs/subtitles/webvtt","h":"#structure","p":966},{"i":970,"t":"Cue","u":"/docs/subtitles/webvtt","h":"#cue","p":966},{"i":972,"t":"Example","u":"/docs/subtitles/webvtt","h":"#example","p":966},{"i":976,"t":"Installation","u":"/docs/metrics/VMAF","h":"#installation","p":974},{"i":978,"t":"Using VMAF with FFmpeg","u":"/docs/metrics/VMAF","h":"#using-vmaf-with-ffmpeg","p":974},{"i":980,"t":"Note about the model path on Windows","u":"/docs/metrics/VMAF","h":"#note-about-the-model-path-on-windows","p":974},{"i":982,"t":"Scoring","u":"/docs/metrics/VMAF","h":"#scoring","p":974},{"i":984,"t":"Some weaknesses","u":"/docs/metrics/VMAF","h":"#some-weaknesses","p":974},{"i":986,"t":"Comparing to SSIMULACRA2","u":"/docs/metrics/VMAF","h":"#comparing-to-ssimulacra2","p":974},{"i":988,"t":"Additional resources","u":"/docs/metrics/VMAF","h":"","p":974},{"i":992,"t":"Format","u":"/docs/subtitles/SSA","h":"#format","p":990},{"i":994,"t":"ASS (SSA v4+) header","u":"/docs/subtitles/SSA","h":"#ass-ssa-v4-header","p":990},{"i":996,"t":"Further reading:","u":"/docs/subtitles/SSA","h":"#further-reading","p":990},{"i":1002,"t":"MoirΓ© Pattern","u":"/docs/introduction/video-artifacts","h":"#moirΓ©-pattern","p":1000},{"i":1004,"t":"Staircase Effect","u":"/docs/introduction/video-artifacts","h":"#staircase-effect","p":1000},{"i":1006,"t":"Color Bleed","u":"/docs/introduction/video-artifacts","h":"#color-bleed","p":1000},{"i":1008,"t":"Ringing","u":"/docs/introduction/video-artifacts","h":"#ringing","p":1000},{"i":1010,"t":"Blocking","u":"/docs/introduction/video-artifacts","h":"#blocking","p":1000},{"i":1012,"t":"Banding/Contouring","u":"/docs/introduction/video-artifacts","h":"#bandingcontouring","p":1000},{"i":1014,"t":"Mosquito Noise","u":"/docs/introduction/video-artifacts","h":"#mosquito-noise","p":1000},{"i":1020,"t":"Description","u":"/docs/utilities/av1an-command-gen","h":"#description","p":1018},{"i":1022,"t":"Usage","u":"/docs/utilities/av1an-command-gen","h":"#usage","p":1018},{"i":1024,"t":"Examples","u":"/docs/utilities/av1an-command-gen","h":"#examples","p":1018},{"i":1026,"t":"Installation","u":"/docs/utilities/av1an-command-gen","h":"#installation","p":1018},{"i":1028,"t":"License","u":"/docs/utilities/av1an-command-gen","h":"#license","p":1018},{"i":1032,"t":"Key","u":"/docs/utilities/Discord","h":"#key","p":1030},{"i":1034,"t":"Video Codecs & Containers","u":"/docs/utilities/Discord","h":"#video-codecs--containers","p":1030},{"i":1038,"t":"Installation","u":"/docs/utilities/Aviator","h":"#installation","p":1036},{"i":1040,"t":"Aviator's Defaults","u":"/docs/utilities/Aviator","h":"#aviators-defaults","p":1036},{"i":1042,"t":"Perceptual Optimization","u":"/docs/utilities/Aviator","h":"#perceptual-optimization","p":1036},{"i":1044,"t":"Video","u":"/docs/utilities/Aviator","h":"#video","p":1036},{"i":1046,"t":"Audio","u":"/docs/utilities/Aviator","h":"#audio","p":1036},{"i":1048,"t":"Output","u":"/docs/utilities/Aviator","h":"#output","p":1036},{"i":1050,"t":"Credits","u":"/docs/utilities/Aviator","h":"#credits","p":1036},{"i":1054,"t":"Installation","u":"/docs/utilities/dovi_tool","h":"#installation","p":1052},{"i":1056,"t":"Usage","u":"/docs/utilities/dovi_tool","h":"#usage","p":1052},{"i":1060,"t":"Prerequisites","u":"/docs/utilities/av1an","h":"#prerequisites","p":1058},{"i":1062,"t":"Installation","u":"/docs/utilities/av1an","h":"#installation","p":1058},{"i":1063,"t":"Windows","u":"/docs/utilities/av1an","h":"#windows","p":1058},{"i":1065,"t":"macOS","u":"/docs/utilities/av1an","h":"#macos","p":1058},{"i":1067,"t":"Linux","u":"/docs/utilities/av1an","h":"#linux","p":1058},{"i":1069,"t":"Docker","u":"/docs/utilities/av1an","h":"#docker","p":1058},{"i":1071,"t":"Installing Dependencies","u":"/docs/utilities/av1an","h":"#installing-dependencies","p":1058},{"i":1073,"t":"Troubleshooting","u":"/docs/utilities/av1an","h":"#troubleshooting","p":1058},{"i":1074,"t":"\"Error: The file 'XXXXX.ivf' could not be opened for reading: open file error.\" with mkvmerge on Linux","u":"/docs/utilities/av1an","h":"#error-the-file-xxxxxivf-could-not-be-opened-for-reading-open-file-error-with-mkvmerge-on-linux","p":1058},{"i":1076,"t":"Gray screen flashing for a single frame in output","u":"/docs/utilities/av1an","h":"#gray-screen-flashing-for-a-single-frame-in-output","p":1058},{"i":1080,"t":"Installation","u":"/docs/utilities/eac3to","h":"#installation","p":1078},{"i":1082,"t":"Usage","u":"/docs/utilities/eac3to","h":"#usage","p":1078},{"i":1083,"t":"Audio conversion","u":"/docs/utilities/eac3to","h":"#audio-conversion","p":1078},{"i":1085,"t":"Get BDMV info","u":"/docs/utilities/eac3to","h":"#get-bdmv-info","p":1078},{"i":1087,"t":"Demux","u":"/docs/utilities/eac3to","h":"#demux","p":1078},{"i":1089,"t":"Delay audio","u":"/docs/utilities/eac3to","h":"#delay-audio","p":1078},{"i":1093,"t":"Installation","u":"/docs/utilities/FFMetrics","h":"#installation","p":1091},{"i":1095,"t":"Usage","u":"/docs/utilities/FFMetrics","h":"#usage","p":1091},{"i":1099,"t":"Installation","u":"/docs/utilities/nmkoder","h":"#installation","p":1097},{"i":1101,"t":"Usage","u":"/docs/utilities/nmkoder","h":"#usage","p":1097},{"i":1105,"t":"Installation","u":"/docs/utilities/hdr10plus_tool","h":"#installation","p":1103},{"i":1107,"t":"Usage","u":"/docs/utilities/hdr10plus_tool","h":"#usage","p":1103},{"i":1109,"t":"Extracting","u":"/docs/utilities/hdr10plus_tool","h":"#extracting","p":1103},{"i":1111,"t":"Injecting","u":"/docs/utilities/hdr10plus_tool","h":"#injecting","p":1103},{"i":1113,"t":"Removing HDR10+ Metadata","u":"/docs/utilities/hdr10plus_tool","h":"#removing-hdr10-metadata","p":1103},{"i":1121,"t":"Installation","u":"/docs/utilities/YUView","h":"#installation","p":1119},{"i":1123,"t":"Usage","u":"/docs/utilities/YUView","h":"#usage","p":1119},{"i":1127,"t":"Installation","u":"/docs/utilities/ffmpeg","h":"","p":1125},{"i":1129,"t":"Linux & macOS","u":"/docs/utilities/ffmpeg","h":"#linux--macos","p":1125},{"i":1131,"t":"Windows","u":"/docs/utilities/ffmpeg","h":"#windows","p":1125},{"i":1133,"t":"Using FFmpeg","u":"/docs/utilities/ffmpeg","h":"","p":1125},{"i":1137,"t":"MPV","u":"/docs/video-players","h":"#mpv","p":1135},{"i":1139,"t":"VLC","u":"/docs/video-players","h":"#vlc","p":1135},{"i":1141,"t":"MPC-HC","u":"/docs/video-players","h":"#mpc-hc","p":1135},{"i":1149,"t":"Installation","u":"/docs/utilities/rav1ator-cli","h":"#installation","p":1147},{"i":1151,"t":"Linux (Arch)","u":"/docs/utilities/rav1ator-cli","h":"#linux-arch","p":1147},{"i":1153,"t":"Linux (Other)","u":"/docs/utilities/rav1ator-cli","h":"#linux-other","p":1147},{"i":1155,"t":"Windows","u":"/docs/utilities/rav1ator-cli","h":"#windows","p":1147},{"i":1157,"t":"Basic installtion","u":"/docs/utilities/rav1ator-cli","h":"#basic-installtion","p":1147},{"i":1159,"t":"After the Installation and Cleanup, How Do I Start Arch?","u":"/docs/utilities/rav1ator-cli","h":"#after-the-installation-and-cleanup-how-do-i-start-arch","p":1147},{"i":1161,"t":"Unlock WSL RAM Usage (Optional)","u":"/docs/utilities/rav1ator-cli","h":"#unlock-wsl-ram-usage-optional","p":1147},{"i":1163,"t":"macOS","u":"/docs/utilities/rav1ator-cli","h":"#macos","p":1147},{"i":1165,"t":"Troubleshooting","u":"/docs/utilities/rav1ator-cli","h":"#troubleshooting","p":1147},{"i":1173,"t":"History","u":"/docs/video/FFV1","h":"#history","p":1171},{"i":1175,"t":"Usage","u":"/docs/video/FFV1","h":"#usage","p":1171},{"i":1177,"t":"Options","u":"/docs/video/FFV1","h":"#options","p":1171},{"i":1179,"t":"Intra-frame only catch","u":"/docs/video/FFV1","h":"#intra-frame-only-catch","p":1171},{"i":1183,"t":"Installation","u":"/docs/utilities/MKVToolNix","h":"#installation","p":1181},{"i":1185,"t":"Usage","u":"/docs/utilities/MKVToolNix","h":"#usage","p":1181},{"i":1187,"t":"Tips and tricks","u":"/docs/utilities/MKVToolNix","h":"#tips-and-tricks","p":1181},{"i":1191,"t":"Format Breakdown","u":"/docs/video/prores","h":"#format-breakdown","p":1189},{"i":1193,"t":"Usage","u":"/docs/video/prores","h":"#usage","p":1189},{"i":1197,"t":"Encoding","u":"/docs/video/Theora","h":"#encoding","p":1195},{"i":1203,"t":"Encoding","u":"/docs/video/VC-1","h":"#encoding","p":1201},{"i":1205,"t":"Decoding","u":"/docs/video/VC-1","h":"#decoding","p":1201},{"i":1217,"t":"Samples","u":"/blog/svt-av1-deep-dive","h":"#samples","p":1215},{"i":1219,"t":"Presets comparisons (-1 -> 13)","u":"/blog/svt-av1-deep-dive","h":"#presets-comparisons--1---13","p":1215},{"i":1221,"t":"TLDR","u":"/blog/svt-av1-deep-dive","h":"#tldr","p":1215},{"i":1223,"t":"Tunes comparisons","u":"/blog/svt-av1-deep-dive","h":"#tunes-comparisons","p":1215},{"i":1225,"t":"TLDR","u":"/blog/svt-av1-deep-dive","h":"#tldr-1","p":1215},{"i":1227,"t":"Parameters comparisons","u":"/blog/svt-av1-deep-dive","h":"#parameters-comparisons","p":1215},{"i":1229,"t":"--tile-rows 1 --tile-columns 1 vs default --tile-rows 0 --tile-columns 0","u":"/blog/svt-av1-deep-dive","h":"#--tile-rows-1---tile-columns-1-vs-default---tile-rows-0---tile-columns-0","p":1215},{"i":1231,"t":"--aq-mode 0 vs default --aq-mode 2","u":"/blog/svt-av1-deep-dive","h":"#--aq-mode-0-vs-default---aq-mode-2","p":1215},{"i":1233,"t":"--aq-mode 1 vs default --aq-mode 2","u":"/blog/svt-av1-deep-dive","h":"#--aq-mode-1-vs-default---aq-mode-2","p":1215},{"i":1235,"t":"--enable-cdef 0 vs default --enable-cdef 1","u":"/blog/svt-av1-deep-dive","h":"#--enable-cdef-0-vs-default---enable-cdef-1","p":1215},{"i":1237,"t":"--enable-dg 0 vs default --enable-dg 1","u":"/blog/svt-av1-deep-dive","h":"#--enable-dg-0-vs-default---enable-dg-1","p":1215},{"i":1239,"t":"--enable-dlf 0 vs default --enable-dlf 1","u":"/blog/svt-av1-deep-dive","h":"#--enable-dlf-0-vs-default---enable-dlf-1","p":1215},{"i":1241,"t":"--fast-decode 1 vs default --fast-decode 0","u":"/blog/svt-av1-deep-dive","h":"#--fast-decode-1-vs-default---fast-decode-0","p":1215},{"i":1243,"t":"--irefresh-type 1 vs default --irefresh-type 2","u":"/blog/svt-av1-deep-dive","h":"#--irefresh-type-1-vs-default---irefresh-type-2","p":1215},{"i":1245,"t":"--lookahead 0 vs default --lookahead -1 (auto)","u":"/blog/svt-av1-deep-dive","h":"#--lookahead-0-vs-default---lookahead--1-auto","p":1215},{"i":1247,"t":"--lookahead 60 vs default --lookahead -1 (auto)","u":"/blog/svt-av1-deep-dive","h":"#--lookahead-60-vs-default---lookahead--1-auto","p":1215},{"i":1249,"t":"--lookahead 120 (max) vs default --lookahead -1 (auto)","u":"/blog/svt-av1-deep-dive","h":"#--lookahead-120-max-vs-default---lookahead--1-auto","p":1215},{"i":1251,"t":"--enable-overlays 1 vs default --enable-overlays 0","u":"/blog/svt-av1-deep-dive","h":"#--enable-overlays-1-vs-default---enable-overlays-0","p":1215},{"i":1253,"t":"--enable-qm 1 vs default --enable-qm 0","u":"/blog/svt-av1-deep-dive","h":"#--enable-qm-1-vs-default---enable-qm-0","p":1215},{"i":1255,"t":"--enable-qm 1 --qm-min 0 vs --enable-qm 1","u":"/blog/svt-av1-deep-dive","h":"#--enable-qm-1---qm-min-0-vs---enable-qm-1","p":1215},{"i":1257,"t":"--enable-restoration 0 vs default --enable-restoration 1","u":"/blog/svt-av1-deep-dive","h":"#--enable-restoration-0-vs-default---enable-restoration-1","p":1215},{"i":1259,"t":"--scm 0 vs default --scm 2 (content adaptive)","u":"/blog/svt-av1-deep-dive","h":"#--scm-0-vs-default---scm-2-content-adaptive","p":1215},{"i":1261,"t":"--scm 1 vs default --scm 2 (content adaptive)","u":"/blog/svt-av1-deep-dive","h":"#--scm-1-vs-default---scm-2-content-adaptive","p":1215},{"i":1263,"t":"--enable-tf 0 vs default --enable-tf 1","u":"/blog/svt-av1-deep-dive","h":"#--enable-tf-0-vs-default---enable-tf-1","p":1215},{"i":1265,"t":"--enable-tpl-la 0 vs default --enable-tpl-la 1","u":"/blog/svt-av1-deep-dive","h":"#--enable-tpl-la-0-vs-default---enable-tpl-la-1","p":1215},{"i":1267,"t":"superres:","u":"/blog/svt-av1-deep-dive","h":"#superres","p":1215},{"i":1269,"t":"Early TLDR on parameters results:","u":"/blog/svt-av1-deep-dive","h":"#early-tldr-on-parameters-results","p":1215},{"i":1271,"t":"Conclusion","u":"/blog/svt-av1-deep-dive","h":"#conclusion","p":1215},{"i":1275,"t":"Feedback","u":"/blog/svt-av1-second-deep-dive","h":"#feedback","p":1273},{"i":1277,"t":"Methodology","u":"/blog/svt-av1-second-deep-dive","h":"#methodology","p":1273},{"i":1279,"t":"Samples","u":"/blog/svt-av1-second-deep-dive","h":"#samples","p":1273},{"i":1281,"t":"Presets comparisons (-1 -> 13 12)","u":"/blog/svt-av1-second-deep-dive","h":"#presets-comparisons--1---13-12","p":1273},{"i":1283,"t":"Efficiency","u":"/blog/svt-av1-second-deep-dive","h":"#efficiency","p":1273},{"i":1285,"t":"Speed","u":"/blog/svt-av1-second-deep-dive","h":"#speed","p":1273},{"i":1287,"t":"Interpretation","u":"/blog/svt-av1-second-deep-dive","h":"#interpretation","p":1273},{"i":1289,"t":"TLDR","u":"/blog/svt-av1-second-deep-dive","h":"#tldr","p":1273},{"i":1291,"t":"SVT-AV1 v2.0.0 vs v2.1.0 presets comparisons:","u":"/blog/svt-av1-second-deep-dive","h":"#svt-av1-v200-vs-v210-presets-comparisons","p":1273},{"i":1293,"t":"preset -1: v2.0.0 vs v2.1.0","u":"/blog/svt-av1-second-deep-dive","h":"#preset--1-v200-vs-v210","p":1273},{"i":1295,"t":"preset 0: v2.0.0 vs v2.1.0","u":"/blog/svt-av1-second-deep-dive","h":"#preset-0-v200-vs-v210","p":1273},{"i":1297,"t":"preset 1: v2.0.0 vs v2.1.0","u":"/blog/svt-av1-second-deep-dive","h":"#preset-1-v200-vs-v210","p":1273},{"i":1299,"t":"preset 2: v2.0.0 vs v2.1.0","u":"/blog/svt-av1-second-deep-dive","h":"#preset-2-v200-vs-v210","p":1273},{"i":1301,"t":"preset 3: v2.0.0 vs v2.1.0","u":"/blog/svt-av1-second-deep-dive","h":"#preset-3-v200-vs-v210","p":1273},{"i":1303,"t":"preset 4: v2.0.0 vs v2.1.0","u":"/blog/svt-av1-second-deep-dive","h":"#preset-4-v200-vs-v210","p":1273},{"i":1305,"t":"preset 5: v2.0.0 vs v2.1.0","u":"/blog/svt-av1-second-deep-dive","h":"#preset-5-v200-vs-v210","p":1273},{"i":1307,"t":"preset 6: v2.0.0 vs v2.1.0","u":"/blog/svt-av1-second-deep-dive","h":"#preset-6-v200-vs-v210","p":1273},{"i":1309,"t":"preset 7: v2.0.0 vs v2.1.0","u":"/blog/svt-av1-second-deep-dive","h":"#preset-7-v200-vs-v210","p":1273},{"i":1311,"t":"preset 8: v2.0.0 vs v2.1.0","u":"/blog/svt-av1-second-deep-dive","h":"#preset-8-v200-vs-v210","p":1273},{"i":1313,"t":"preset 9: v2.0.0 vs v2.1.0","u":"/blog/svt-av1-second-deep-dive","h":"#preset-9-v200-vs-v210","p":1273},{"i":1315,"t":"preset 10: v2.0.0 vs v2.1.0","u":"/blog/svt-av1-second-deep-dive","h":"#preset-10-v200-vs-v210","p":1273},{"i":1317,"t":"preset 11: v2.0.0 vs v2.1.0","u":"/blog/svt-av1-second-deep-dive","h":"#preset-11-v200-vs-v210","p":1273},{"i":1319,"t":"preset 12: v2.0.0 vs v2.1.0","u":"/blog/svt-av1-second-deep-dive","h":"#preset-12-v200-vs-v210","p":1273},{"i":1321,"t":"TLDR","u":"/blog/svt-av1-second-deep-dive","h":"#tldr-1","p":1273},{"i":1323,"t":"Conclusion","u":"/blog/svt-av1-second-deep-dive","h":"#conclusion","p":1273},{"i":1325,"t":"Future","u":"/blog/svt-av1-second-deep-dive","h":"#future","p":1273}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/4",[0,2.721,1,5.131]],["t/6",[2,5.473,3,4.357]],["t/7",[4,5.131,5,4.502]],["t/9",[5,4.502,6,5.992]],["t/11",[5,4.502,7,5.992]],["t/13",[5,4.502,8,5.992]],["t/15",[9,5.838]],["t/17",[10,4.902]],["t/19",[4,5.131,5,4.502]],["t/21",[5,4.502,11,5.992]],["t/23",[12,4.876,13,5.992]],["t/25",[14,5.626]],["t/27",[15,4.107]],["t/29",[16,5.992,17,5.992]],["t/31",[18,4.064,19,2.867,20,4.064]],["t/33",[21,5.473,22,5.992]],["t/37",[23,7.488]],["t/39",[24,4.994,25,4.994,26,4.994]],["t/41",[27,6.84]],["t/43",[28,5.992,29,5.473]],["t/45",[29,5.473,30,5.992]],["t/47",[19,2.867,31,4.562,32,4.277]],["t/49",[33,4.994,34,4.994,35,4.562]],["t/51",[36,7.488]],["t/52",[37,7.488]],["t/54",[38,7.488]],["t/56",[19,2.458,35,3.911,39,4.282,40,4.282]],["t/58",[41,7.488]],["t/60",[42,6.84]],["t/64",[19,2.867,43,4.994,44,4.994]],["t/66",[45,5.992,46,5.992]],["t/68",[47,5.992,48,5.992]],["t/70",[49,5.992,50,4.672]],["t/72",[51,7.488]],["t/74",[52,3.923,53,5.992]],["t/76",[54,5.992,55,5.473]],["t/80",[56,6.84]],["t/82",[57,5.286]],["t/84",[19,2.867,58,4.994,59,4.994]],["t/86",[1,6.412]],["t/88",[4,6.412]],["t/90",[60,7.488]],["t/92",[61,5.144]],["t/94",[15,4.107]],["t/96",[57,4.23,62,4.672]],["t/98",[63,7.488]],["t/100",[64,5.992,65,5.992]],["t/102",[57,3.525,62,3.894,66,4.064]],["t/104",[61,5.144]],["t/106",[21,5.473,61,4.117]],["t/110",[67,3.76,68,4.015]],["t/112",[69,4.357,70,5.992]],["t/114",[19,2.151,69,4.227,71,3.747,72,3.747]],["t/116",[69,5.445]],["t/118",[73,7.488]],["t/120",[69,4.357,74,5.992]],["t/122",[15,4.107]],["t/124",[69,3.631,75,4.994,76,4.994]],["t/126",[77,5.992,78,4.502]],["t/128",[69,4.357,79,3.386]],["t/130",[80,7.488]],["t/132",[69,4.357,81,5.992]],["t/134",[82,7.488]],["t/136",[61,5.144]],["t/140",[83,7.488]],["t/142",[84,7.488]],["t/144",[85,4.994,86,4.994,87,4.994]],["t/146",[88,5.992,89,4.876]],["t/148",[90,5.992,91,5.992]],["t/150",[42,5.473,92,5.992]],["t/156",[67,3.76,93,5.131]],["t/157",[94,5.131,95,5.131]],["t/159",[94,4.277,95,4.277,96,4.994]],["t/161",[97,7.488]],["t/163",[94,5.131,98,4.672]],["t/165",[99,7.488]],["t/169",[100,5.992,101,3.619]],["t/171",[79,2.419,102,3.911,103,3.911,104,3.338]],["t/173",[1,2.567,102,2.738,103,4.448,104,2.337,105,2.998,106,2.998]],["t/177",[19,2.458,107,3.666,108,4.282,109,4.282]],["t/179",[52,3.269,78,3.753,110,3.753]],["t/181",[52,3.269,78,3.753,111,4.064]],["t/183",[61,5.144]],["t/189",[67,3.76,68,4.015]],["t/191",[112,7.488]],["t/193",[113,7.488]],["t/195",[15,4.107]],["t/196",[114,7.488]],["t/198",[115,7.488]],["t/200",[116,5.992,117,5.992]],["t/208",[118,3.556,119,5.992]],["t/210",[120,3.154,121,5.473]],["t/212",[122,4.015,123,5.131]],["t/214",[98,4.672,124,5.131]],["t/216",[125,4.876,126,5.131]],["t/218",[127,4.064,128,3.346,129,4.277]],["t/220",[128,3.346,130,4.064,131,4.277]],["t/222",[132,4.876,133,5.992]],["t/224",[134,3.049,135,2.921,136,3.422,137,3.049,138,3.049]],["t/226",[135,3.338,137,3.484,138,3.484,139,3.022]],["t/228",[128,3.346,140,4.064,141,4.994]],["t/230",[136,3.042,137,2.71,138,2.71,142,2.502,143,3.042,144,3.042]],["t/232",[137,3.049,138,3.049,143,3.422,144,3.422,145,3.049]],["t/234",[146,5.473,147,5.992]],["t/238",[120,3.154,121,5.473]],["t/240",[122,4.015,123,5.131]],["t/242",[98,4.672,124,5.131]],["t/244",[125,4.876,126,5.131]],["t/246",[127,4.064,128,3.346,129,4.277]],["t/248",[128,3.346,130,4.064,131,4.277]],["t/250",[132,4.876,148,5.992]],["t/252",[134,4.876,135,4.672]],["t/254",[128,3.346,139,3.525,149,4.562]],["t/256",[140,4.064,150,4.994,151,4.562]],["t/258",[142,3.753,151,4.562,152,4.994]],["t/260",[153,4.282,154,4.282,155,4.282,156,4.282]],["t/266",[120,3.154,157,5.992]],["t/268",[122,4.015,123,5.131]],["t/270",[98,4.672,124,5.131]],["t/272",[125,4.876,126,5.131]],["t/274",[127,4.064,128,3.346,129,4.277]],["t/276",[128,3.346,130,4.064,131,4.277]],["t/278",[132,4.876,158,5.992]],["t/280",[134,4.064,159,4.562,160,4.994]],["t/282",[139,3.525,159,4.562,161,4.994]],["t/284",[140,4.876,162,5.992]],["t/286",[142,4.502,163,5.992]],["t/288",[145,4.876,164,5.992]],["t/290",[135,3.338,139,3.022,146,3.911,165,3.022]],["t/292",[135,3.338,142,3.217,165,3.022,166,4.282]],["t/294",[128,2.51,167,3.747,168,3.747,169,3.422,170,3.747]],["t/296",[128,3.346,149,4.562,171,4.994]],["t/298",[169,3.042,172,3.331,173,3.042,174,3.042,175,3.331,176,3.331]],["t/306",[177,5.838]],["t/308",[174,5.473,178,4.876]],["t/310",[67,3.76,68,4.015]],["t/312",[15,4.107]],["t/313",[179,7.488]],["t/315",[79,4.231]],["t/317",[180,5.992,181,5.992]],["t/319",[182,5.626]],["t/325",[183,5.992,184,5.473]],["t/327",[185,7.488]],["t/329",[19,2.458,186,3.911,187,4.282,188,4.282]],["t/331",[189,7.488]],["t/333",[0,3.401]],["t/335",[190,5.992,191,5.992]],["t/337",[192,7.488]],["t/339",[193,7.488]],["t/343",[194,4.015,195,5.473]],["t/345",[196,7.488]],["t/347",[197,7.488]],["t/349",[198,5.992,199,5.473]],["t/351",[165,4.23,200,5.473]],["t/353",[201,4.994,202,2.315,203,4.994]],["t/355",[204,7.488]],["t/357",[205,5.992,206,5.992]],["t/359",[67,3.76,207,5.992]],["t/361",[208,6.84]],["t/363",[209,7.488]],["t/367",[67,3.76,68,4.015]],["t/369",[89,6.093]],["t/371",[15,4.107]],["t/372",[9,3.894,10,3.269,19,2.867]],["t/374",[3,5.445]],["t/378",[67,3.76,68,4.015]],["t/380",[89,6.093]],["t/382",[15,4.107]],["t/384",[61,5.144]],["t/392",[210,3.544]],["t/394",[211,4.994,212,4.562,213,4.562]],["t/396",[212,4.562,213,4.562,214,4.562]],["t/400",[210,3.544]],["t/402",[52,3.923,215,4.876]],["t/404",[215,4.876,216,5.473]],["t/408",[210,3.544]],["t/410",[52,4.902]],["t/412",[216,6.84]],["t/418",[210,3.544]],["t/420",[79,4.231]],["t/428",[79,4.231]],["t/430",[0,3.401]],["t/434",[217,7.488]],["t/436",[0,3.401]],["t/437",[10,3.923,14,4.502]],["t/439",[12,6.093]],["t/441",[210,3.544]],["t/443",[218,7.488]],["t/447",[79,4.231]],["t/449",[0,3.401]],["t/451",[210,3.544]],["t/452",[15,3.287,57,4.23]],["t/454",[15,3.287,219,5.473]],["t/456",[220,6.412]],["t/458",[18,4.064,19,2.867,20,4.064]],["t/462",[79,4.231]],["t/464",[101,3.017,194,3.346,221,3.525]],["t/466",[0,3.401]],["t/468",[210,3.544]],["t/469",[15,3.287,57,4.23]],["t/471",[15,3.287,219,5.473]],["t/473",[220,6.412]],["t/475",[222,5.473,223,5.473]],["t/479",[0,3.401]],["t/481",[210,3.544]],["t/485",[79,4.231]],["t/487",[101,3.017,194,3.346,221,3.525]],["t/489",[0,3.401]],["t/491",[224,7.488]],["t/493",[101,2.586,139,3.022,165,3.022,225,4.282]],["t/495",[226,7.488]],["t/497",[210,3.544]],["t/501",[79,4.231]],["t/503",[101,3.017,194,3.346,221,3.525]],["t/505",[0,3.401]],["t/507",[0,1.945,101,2.586,227,4.282,228,3.484]],["t/509",[210,3.544]],["t/511",[18,4.064,19,2.867,20,4.064]],["t/517",[79,4.231]],["t/519",[101,3.017,194,3.346,221,3.525]],["t/521",[210,3.544]],["t/522",[229,7.488]],["t/526",[230,4.994,231,4.994,232,4.994]],["t/528",[177,4.672,233,5.473]],["t/530",[234,5.992,235,3.335]],["t/532",[236,7.488]],["t/534",[0,3.401]],["t/536",[57,2.645,62,2.921,66,3.049,177,2.921,237,3.747]],["t/538",[238,6.84]],["t/542",[210,3.544]],["t/544",[79,4.231]],["t/546",[239,7.488]],["t/554",[79,4.231]],["t/556",[101,3.017,194,3.346,221,3.525]],["t/558",[0,3.401]],["t/560",[210,3.544]],["t/561",[210,2.836,240,5.992]],["t/563",[15,3.287,241,5.992]],["t/565",[15,2.739,242,4.562,243,4.562]],["t/567",[244,4.994,245,4.277,246,3.631]],["t/571",[79,4.231]],["t/573",[101,3.017,194,3.346,221,3.525]],["t/575",[0,2.721,247,5.992]],["t/577",[12,4.876,248,5.992]],["t/579",[186,6.84]],["t/581",[215,4.876,245,5.131]],["t/583",[245,5.131,249,5.131]],["t/585",[250,4.562,251,4.994,252,4.994]],["t/587",[253,7.488]],["t/589",[254,7.488]],["t/591",[15,4.107]],["t/595",[79,4.231]],["t/597",[101,3.017,194,3.346,221,3.525]],["t/599",[0,3.401]],["t/601",[15,4.107]],["t/602",[31,6.84]],["t/604",[255,6.84]],["t/606",[15,3.287,256,5.473]],["t/608",[222,5.473,223,5.473]],["t/610",[57,3.525,62,3.894,66,4.064]],["t/614",[0,3.401]],["t/616",[101,3.017,139,3.525,165,3.525]],["t/618",[210,3.544]],["t/620",[182,5.626]],["t/624",[0,3.401]],["t/625",[10,3.923,14,4.502]],["t/627",[12,6.093]],["t/629",[101,3.017,139,3.525,165,3.525]],["t/631",[210,3.544]],["t/633",[257,6.412]],["t/637",[79,4.231]],["t/639",[0,3.401]],["t/641",[246,5.445]],["t/643",[258,4.231]],["t/645",[259,7.488]],["t/647",[260,7.488]],["t/649",[261,7.488]],["t/651",[262,7.488]],["t/653",[66,4.876,263,5.992]],["t/655",[264,5.131,265,5.992]],["t/657",[266,5.992,267,5.473]],["t/661",[0,3.401]],["t/663",[79,3.386,268,5.992]],["t/665",[210,3.544]],["t/675",[79,4.231]],["t/677",[101,3.017,194,3.346,221,3.525]],["t/679",[0,3.401]],["t/681",[210,3.544]],["t/683",[220,6.412]],["t/685",[258,4.231]],["t/687",[269,7.488]],["t/689",[270,5.473,271,5.992]],["t/691",[178,4.876,272,5.131]],["t/693",[208,5.473,273,5.131]],["t/695",[267,5.473,274,5.992]],["t/697",[15,3.287,110,4.502]],["t/700",[275,7.488]],["t/702",[184,4.562,276,4.994,277,4.994]],["t/704",[278,5.473,279,5.992]],["t/706",[50,4.672,280,5.992]],["t/713",[281,5.992,282,4.672]],["t/715",[199,5.473,282,4.672]],["t/717",[165,3.525,200,4.562,283,4.994]],["t/719",[284,7.488]],["t/721",[285,7.488]],["t/723",[286,7.488]],["t/729",[93,6.412]],["t/731",[287,7.488]],["t/733",[210,3.544]],["t/735",[288,7.488]],["t/737",[210,3.544]],["t/739",[246,5.445]],["t/741",[182,5.626]],["t/747",[93,6.412]],["t/749",[210,3.544]],["t/751",[246,4.357,289,5.992]],["t/753",[246,4.357,290,5.992]],["t/755",[182,5.626]],["t/759",[56,6.84]],["t/761",[0,3.401]],["t/762",[2,5.473,3,4.357]],["t/764",[10,3.923,14,4.502]],["t/766",[10,4.902]],["t/768",[291,7.488]],["t/770",[292,6.412]],["t/772",[282,4.672,293,5.992]],["t/774",[294,7.488]],["t/776",[295,7.488]],["t/778",[296,7.488]],["t/782",[297,4.23,298,4.23]],["t/784",[67,3.76,68,4.015]],["t/785",[299,6.84]],["t/787",[32,6.412]],["t/789",[61,5.144]],["t/793",[297,4.23,298,4.23]],["t/797",[297,4.23,298,4.23]],["t/801",[297,4.23,298,4.23]],["t/803",[52,4.902]],["t/811",[297,4.23,298,4.23]],["t/813",[67,3.76,68,4.015]],["t/815",[300,7.488]],["t/817",[299,6.84]],["t/819",[32,6.412]],["t/823",[19,2.867,301,4.994,302,4.994]],["t/825",[52,3.269,110,3.753,303,4.994]],["t/829",[297,4.23,298,4.23]],["t/833",[173,6.84]],["t/835",[228,6.093]],["t/837",[228,6.093]],["t/839",[242,5.473,243,5.473]],["t/843",[27,4.562,52,3.269,111,4.064]],["t/847",[304,7.488]],["t/849",[50,5.838]],["t/851",[305,7.488]],["t/855",[68,5.017]],["t/859",[297,4.23,298,4.23]],["t/861",[67,3.76,68,4.015]],["t/863",[52,3.923,110,4.502]],["t/865",[52,3.923,111,4.876]],["t/867",[101,3.017,165,3.525,306,4.994]],["t/869",[307,5.992,308,4.672]],["t/871",[52,2.803,110,3.217,309,4.282,310,4.282]],["t/873",[101,3.619,311,5.992]],["t/875",[177,5.838]],["t/877",[15,4.107]],["t/879",[312,6.84]],["t/881",[312,5.473,313,5.992]],["t/883",[314,7.488]],["t/885",[315,5.992,316,5.992]],["t/887",[308,5.838]],["t/888",[317,5.992,318,5.992]],["t/892",[319,6.84]],["t/894",[19,2.867,110,3.753,111,4.064]],["t/896",[320,5.992,321,5.992]],["t/898",[322,7.488]],["t/900",[50,5.838]],["t/902",[282,5.838]],["t/904",[323,7.488]],["t/906",[282,4.672,319,5.473]],["t/908",[324,6.84]],["t/910",[325,7.488]],["t/912",[326,7.488]],["t/914",[19,3.337,327,3.747,328,3.747,329,3.747]],["t/916",[330,4.282,331,4.282,332,4.282,333,4.282]],["t/924",[15,4.107]],["t/925",[104,4.672,334,5.992]],["t/927",[308,5.838]],["t/929",[297,4.23,298,4.23]],["t/933",[0,3.401]],["t/935",[250,6.84]],["t/936",[55,6.84]],["t/938",[335,6.412]],["t/940",[336,6.84]],["t/948",[0,3.401]],["t/950",[210,3.544]],["t/952",[182,5.626]],["t/954",[122,3.346,337,4.562,338,4.994]],["t/960",[67,4.698]],["t/962",[177,4.672,339,5.992]],["t/964",[340,6.412]],["t/968",[341,7.488]],["t/970",[342,7.488]],["t/972",[340,6.412]],["t/976",[0,3.401]],["t/978",[79,2.822,104,3.894,343,4.994]],["t/980",[3,3.113,182,3.217,195,3.911,344,4.282]],["t/982",[336,6.84]],["t/984",[255,6.84]],["t/986",[337,5.473,345,5.992]],["t/988",[233,5.473,346,5.992]],["t/992",[67,4.698]],["t/994",[347,4.282,348,4.282,349,4.282,350,4.282]],["t/996",[351,5.992,352,5.473]],["t/1002",[353,5.992,354,5.992]],["t/1004",[355,5.992,356,5.992]],["t/1006",[194,4.015,357,5.992]],["t/1008",[358,7.488]],["t/1010",[359,7.488]],["t/1012",[360,7.488]],["t/1014",[361,5.992,362,5.992]],["t/1020",[363,7.488]],["t/1022",[210,3.544]],["t/1024",[340,6.412]],["t/1026",[0,3.401]],["t/1028",[238,6.84]],["t/1032",[364,7.488]],["t/1034",[19,2.458,50,3.338,324,3.911,335,3.666]],["t/1038",[0,3.401]],["t/1040",[235,3.335,365,5.992]],["t/1042",[256,5.473,366,5.992]],["t/1044",[335,6.412]],["t/1046",[78,5.626]],["t/1048",[292,6.412]],["t/1050",[367,7.488]],["t/1054",[0,3.401]],["t/1056",[210,3.544]],["t/1060",[368,7.488]],["t/1062",[0,3.401]],["t/1063",[3,5.445]],["t/1065",[9,5.838]],["t/1067",[10,4.902]],["t/1069",[369,7.488]],["t/1071",[0,2.721,370,5.992]],["t/1073",[257,6.412]],["t/1074",[10,1.51,215,3.187,270,3.577,352,2.107,371,3.916,372,2.306,373,2.306]],["t/1076",[273,2.852,292,2.852,374,3.331,375,3.331,376,3.331,377,3.331]],["t/1080",[0,3.401]],["t/1082",[210,3.544]],["t/1083",[78,4.502,378,5.992]],["t/1085",[379,5.992,380,5.992]],["t/1087",[381,7.488]],["t/1089",[78,4.502,382,5.992]],["t/1093",[0,3.401]],["t/1095",[210,3.544]],["t/1099",[0,3.401]],["t/1101",[210,3.544]],["t/1105",[0,3.401]],["t/1107",[210,3.544]],["t/1109",[214,6.84]],["t/1111",[383,7.488]],["t/1113",[228,4.064,384,4.994,385,4.994]],["t/1121",[0,3.401]],["t/1123",[210,3.544]],["t/1127",[0,3.401]],["t/1129",[9,3.894,10,3.269,19,2.867]],["t/1131",[3,5.445]],["t/1133",[79,3.386,104,4.672]],["t/1137",[386,7.488]],["t/1139",[387,7.488]],["t/1141",[388,5.992,389,5.992]],["t/1149",[0,3.401]],["t/1151",[10,3.923,14,4.502]],["t/1153",[10,4.902]],["t/1155",[3,5.445]],["t/1157",[390,5.992,391,5.992]],["t/1159",[0,1.945,14,3.217,278,3.911,392,4.282]],["t/1161",[210,1.773,249,3.208,393,3.747,394,3.747,395,3.747]],["t/1163",[9,5.838]],["t/1165",[257,6.412]],["t/1173",[89,6.093]],["t/1175",[210,3.544]],["t/1177",[249,6.412]],["t/1179",[273,4.277,396,4.994,397,4.994]],["t/1183",[0,3.401]],["t/1185",[210,3.544]],["t/1187",[18,4.876,20,4.876]],["t/1191",[67,3.76,68,4.015]],["t/1193",[210,3.544]],["t/1197",[15,4.107]],["t/1203",[15,4.107]],["t/1205",[308,5.838]],["t/1217",[107,6.412]],["t/1219",[19,3.042,120,1.753,145,2.71,258,1.882,398,2.597]],["t/1221",[399,5.838]],["t/1223",[398,4.672,400,5.992]],["t/1225",[399,5.838]],["t/1227",[246,4.357,398,4.672]],["t/1229",[118,1.843,120,1.635,202,0.817,235,0.982,401,5.011,402,3.105,403,3.105]],["t/1231",[118,1.617,122,1.826,178,3.665,202,1.263,235,1.517,272,3.857]],["t/1233",[120,1.435,122,1.826,178,3.665,202,1.263,235,1.517,272,3.857]],["t/1235",[118,1.617,120,1.435,202,1.263,235,1.517,404,3.095,405,4.505]],["t/1237",[118,1.617,120,1.435,202,1.263,235,1.517,404,3.095,406,4.505]],["t/1239",[118,1.617,120,1.435,202,1.263,235,1.517,404,3.095,407,4.505]],["t/1241",[118,1.617,120,1.435,202,1.263,235,1.517,308,3.512,408,4.505]],["t/1243",[120,1.435,122,1.826,202,1.263,235,1.517,409,4.505,410,4.505]],["t/1245",[118,1.779,120,1.578,202,1.389,235,1.668,411,4.17,412,2.567]],["t/1247",[120,1.578,202,1.389,235,1.668,411,4.17,412,2.567,413,2.998]],["t/1249",[120,1.435,202,1.263,235,1.517,411,3.857,412,2.334,414,2.725,415,2.725]],["t/1251",[118,1.617,120,1.435,202,1.263,235,1.517,404,3.095,416,4.505]],["t/1253",[118,1.617,120,1.435,202,1.263,235,1.517,404,3.095,417,4.114]],["t/1255",[118,1.369,120,2.061,202,1.069,404,2.691,417,4.662,418,2.306]],["t/1257",[118,1.617,120,1.435,202,1.263,235,1.517,404,3.095,419,4.505]],["t/1259",[118,1.617,122,1.826,202,1.263,235,1.517,264,2.334,420,4.114,421,2.489]],["t/1261",[120,1.435,122,1.826,202,1.263,235,1.517,264,2.334,420,4.114,421,2.489]],["t/1263",[118,1.617,120,1.435,202,1.263,235,1.517,404,3.095,422,4.505]],["t/1265",[118,1.369,120,1.214,202,1.069,235,1.284,404,2.691,423,3.916,424,3.916]],["t/1267",[425,7.488]],["t/1269",[246,3.113,399,3.338,426,4.282,427,4.282]],["t/1271",[61,5.144]],["t/1275",[428,7.488]],["t/1277",[429,7.488]],["t/1279",[107,6.412]],["t/1281",[19,2.795,120,1.578,142,2.252,145,2.439,258,1.694,398,2.337]],["t/1283",[430,7.488]],["t/1285",[431,7.488]],["t/1287",[432,7.488]],["t/1289",[399,5.838]],["t/1291",[57,2.116,62,2.337,202,1.389,258,1.694,398,2.337,433,1.811,434,1.811]],["t/1293",[120,1.972,202,1.736,258,2.117,433,2.263,434,2.263]],["t/1295",[118,2.223,202,1.736,258,2.117,433,2.263,434,2.263]],["t/1297",[120,1.972,202,1.736,258,2.117,433,2.263,434,2.263]],["t/1299",[122,2.51,202,1.736,258,2.117,433,2.263,434,2.263]],["t/1301",[95,3.208,202,1.736,258,2.117,433,2.263,434,2.263]],["t/1303",[98,2.921,202,1.736,258,2.117,433,2.263,434,2.263]],["t/1305",[125,3.049,202,1.736,258,2.117,433,2.263,434,2.263]],["t/1307",[127,3.049,202,1.736,258,2.117,433,2.263,434,2.263]],["t/1309",[130,3.049,202,1.736,258,2.117,433,2.263,434,2.263]],["t/1311",[132,3.049,202,1.736,258,2.117,433,2.263,434,2.263]],["t/1313",[134,3.049,202,1.736,258,2.117,433,2.263,434,2.263]],["t/1315",[139,2.645,202,1.736,258,2.117,433,2.263,434,2.263]],["t/1317",[140,3.049,202,1.736,258,2.117,433,2.263,434,2.263]],["t/1319",[142,2.815,202,1.736,258,2.117,433,2.263,434,2.263]],["t/1321",[399,5.838]],["t/1323",[61,5.144]],["t/1325",[435,7.488]]],"invertedIndex":[["",{"_index":19,"t":{"31":{"position":[[5,1]]},"47":{"position":[[10,1]]},"56":{"position":[[16,1]]},"64":{"position":[[5,1]]},"84":{"position":[[5,1]]},"114":{"position":[[7,1]]},"177":{"position":[[9,1]]},"329":{"position":[[6,1]]},"372":{"position":[[6,1]]},"458":{"position":[[5,1]]},"511":{"position":[[5,1]]},"823":{"position":[[11,1]]},"894":{"position":[[6,1]]},"914":{"position":[[11,1],[26,1]]},"1034":{"position":[[13,1]]},"1129":{"position":[[6,1]]},"1219":{"position":[[20,1],[25,1]]},"1281":{"position":[[20,1],[25,1]]}}}],["0",{"_index":118,"t":{"208":{"position":[[0,2]]},"1229":{"position":[[54,1],[71,1]]},"1231":{"position":[[10,1]]},"1235":{"position":[[14,1]]},"1237":{"position":[[12,1]]},"1239":{"position":[[13,1]]},"1241":{"position":[[41,1]]},"1245":{"position":[[12,1]]},"1251":{"position":[[49,1]]},"1253":{"position":[[37,1]]},"1255":{"position":[[23,1]]},"1257":{"position":[[21,1]]},"1259":{"position":[[6,1]]},"1263":{"position":[[12,1]]},"1265":{"position":[[16,1]]},"1295":{"position":[[7,2]]}}}],["1",{"_index":120,"t":{"210":{"position":[[0,2]]},"238":{"position":[[0,2]]},"266":{"position":[[0,2]]},"1219":{"position":[[22,1]]},"1229":{"position":[[12,1],[29,1]]},"1233":{"position":[[10,1]]},"1235":{"position":[[41,1]]},"1237":{"position":[[37,1]]},"1239":{"position":[[39,1]]},"1241":{"position":[[14,1]]},"1243":{"position":[[16,1]]},"1245":{"position":[[38,1]]},"1247":{"position":[[39,1]]},"1249":{"position":[[46,1]]},"1251":{"position":[[18,1]]},"1253":{"position":[[12,1]]},"1255":{"position":[[12,1],[40,1]]},"1257":{"position":[[55,1]]},"1261":{"position":[[6,1]]},"1263":{"position":[[37,1]]},"1265":{"position":[[45,1]]},"1281":{"position":[[22,1]]},"1293":{"position":[[8,2]]},"1297":{"position":[[7,2]]}}}],["10",{"_index":139,"t":{"226":{"position":[[0,3]]},"254":{"position":[[0,3]]},"282":{"position":[[0,3]]},"290":{"position":[[12,2]]},"493":{"position":[[6,3]]},"616":{"position":[[0,2]]},"629":{"position":[[0,2]]},"1315":{"position":[[7,3]]}}}],["100",{"_index":160,"t":{"280":{"position":[[15,3]]}}}],["11",{"_index":140,"t":{"228":{"position":[[0,3]]},"256":{"position":[[0,3]]},"284":{"position":[[0,3]]},"1317":{"position":[[7,3]]}}}],["12",{"_index":142,"t":{"230":{"position":[[0,3]]},"258":{"position":[[0,3]]},"286":{"position":[[0,3]]},"292":{"position":[[12,2]]},"1281":{"position":[[30,3]]},"1319":{"position":[[7,3]]}}}],["120",{"_index":414,"t":{"1249":{"position":[[12,3]]}}}],["13",{"_index":145,"t":{"232":{"position":[[0,3]]},"288":{"position":[[0,3]]},"1219":{"position":[[27,3]]},"1281":{"position":[[27,2]]}}}],["14",{"_index":146,"t":{"234":{"position":[[0,3]]},"290":{"position":[[0,3]]}}}],["15",{"_index":166,"t":{"292":{"position":[[0,3]]}}}],["16",{"_index":167,"t":{"294":{"position":[[0,3]]}}}],["17",{"_index":171,"t":{"296":{"position":[[0,3]]}}}],["170m",{"_index":129,"t":{"218":{"position":[[9,4]]},"246":{"position":[[9,4]]},"274":{"position":[[9,4]]}}}],["18",{"_index":172,"t":{"298":{"position":[[0,3]]}}}],["2",{"_index":122,"t":{"212":{"position":[[0,2]]},"240":{"position":[[0,2]]},"268":{"position":[[0,2]]},"954":{"position":[[24,1]]},"1231":{"position":[[33,1]]},"1233":{"position":[[33,1]]},"1243":{"position":[[45,1]]},"1259":{"position":[[25,1]]},"1261":{"position":[[25,1]]},"1299":{"position":[[7,2]]}}}],["2084",{"_index":170,"t":{"294":{"position":[[17,4]]}}}],["2085",{"_index":141,"t":{"228":{"position":[[10,4]]}}}],["22",{"_index":153,"t":{"260":{"position":[[0,3]]}}}],["240m",{"_index":131,"t":{"220":{"position":[[9,4]]},"248":{"position":[[9,4]]},"276":{"position":[[9,4]]}}}],["3",{"_index":95,"t":{"157":{"position":[[3,1]]},"159":{"position":[[5,1]]},"1301":{"position":[[7,2]]}}}],["316",{"_index":161,"t":{"282":{"position":[[16,3]]}}}],["3213",{"_index":156,"t":{"260":{"position":[[13,4]]}}}],["4",{"_index":98,"t":{"163":{"position":[[3,1]]},"214":{"position":[[0,2]]},"242":{"position":[[0,2]]},"270":{"position":[[0,2]]},"1303":{"position":[[7,2]]}}}],["428",{"_index":149,"t":{"254":{"position":[[10,3]]},"296":{"position":[[10,3]]}}}],["5",{"_index":125,"t":{"216":{"position":[[0,2]]},"244":{"position":[[0,2]]},"272":{"position":[[0,2]]},"1305":{"position":[[7,2]]}}}],["6",{"_index":127,"t":{"218":{"position":[[0,2]]},"246":{"position":[[0,2]]},"274":{"position":[[0,2]]},"1307":{"position":[[7,2]]}}}],["60",{"_index":413,"t":{"1247":{"position":[[12,2]]}}}],["7",{"_index":130,"t":{"220":{"position":[[0,2]]},"248":{"position":[[0,2]]},"276":{"position":[[0,2]]},"1309":{"position":[[7,2]]}}}],["8",{"_index":132,"t":{"222":{"position":[[0,2]]},"250":{"position":[[0,2]]},"278":{"position":[[0,2]]},"1311":{"position":[[7,2]]}}}],["9",{"_index":134,"t":{"224":{"position":[[0,2]]},"252":{"position":[[0,2]]},"280":{"position":[[0,2]]},"1313":{"position":[[7,2]]}}}],["aac",{"_index":69,"t":{"112":{"position":[[0,3]]},"114":{"position":[[0,3],[9,3]]},"116":{"position":[[3,3]]},"120":{"position":[[4,3]]},"124":{"position":[[15,3]]},"128":{"position":[[7,3]]},"132":{"position":[[5,3]]}}}],["aacv2",{"_index":73,"t":{"118":{"position":[[3,5]]}}}],["ac",{"_index":94,"t":{"157":{"position":[[0,2]]},"159":{"position":[[2,2]]},"163":{"position":[[0,2]]}}}],["adapt",{"_index":264,"t":{"655":{"position":[[0,8]]},"1259":{"position":[[36,9]]},"1261":{"position":[[36,9]]}}}],["addit",{"_index":233,"t":{"528":{"position":[[8,9]]},"988":{"position":[[0,10]]}}}],["adopt",{"_index":180,"t":{"317":{"position":[[0,8]]}}}],["advantag",{"_index":299,"t":{"785":{"position":[[0,10]]},"817":{"position":[[0,10]]}}}],["aka",{"_index":169,"t":{"294":{"position":[[7,3]]},"298":{"position":[[8,3]]}}}],["altern",{"_index":276,"t":{"702":{"position":[[4,12]]}}}],["aomdec",{"_index":218,"t":{"443":{"position":[[0,6]]}}}],["aomenc",{"_index":64,"t":{"100":{"position":[[0,6]]}}}],["aq",{"_index":272,"t":{"691":{"position":[[0,2]]},"1231":{"position":[[2,2],[25,2]]},"1233":{"position":[[2,2],[25,2]]}}}],["arch",{"_index":14,"t":{"25":{"position":[[0,4]]},"437":{"position":[[0,4]]},"625":{"position":[[0,4]]},"764":{"position":[[0,4]]},"1151":{"position":[[6,6]]},"1159":{"position":[[51,5]]}}}],["archiv",{"_index":213,"t":{"394":{"position":[[13,7]]},"396":{"position":[[14,7]]}}}],["ass",{"_index":347,"t":{"994":{"position":[[0,3]]}}}],["atmo",{"_index":99,"t":{"165":{"position":[[0,5]]}}}],["audio",{"_index":78,"t":{"126":{"position":[[5,5]]},"179":{"position":[[9,5]]},"181":{"position":[[6,5]]},"1046":{"position":[[0,5]]},"1083":{"position":[[0,5]]},"1089":{"position":[[6,5]]}}}],["auto",{"_index":412,"t":{"1245":{"position":[[40,6]]},"1247":{"position":[[41,6]]},"1249":{"position":[[48,6]]}}}],["autom",{"_index":7,"t":{"11":{"position":[[4,9]]}}}],["autotool",{"_index":224,"t":{"491":{"position":[[0,9]]}}}],["av1",{"_index":57,"t":{"82":{"position":[[4,4]]},"96":{"position":[[4,3]]},"102":{"position":[[4,3]]},"452":{"position":[[0,3]]},"469":{"position":[[0,3]]},"536":{"position":[[23,3]]},"610":{"position":[[4,3]]},"1291":{"position":[[4,3]]}}}],["aviator'",{"_index":365,"t":{"1040":{"position":[[0,9]]}}}],["avif",{"_index":219,"t":{"454":{"position":[[0,4]]},"471":{"position":[[0,4]]}}}],["banding/contour",{"_index":360,"t":{"1012":{"position":[[0,18]]}}}],["basic",{"_index":390,"t":{"1157":{"position":[[0,5]]}}}],["bdmv",{"_index":379,"t":{"1085":{"position":[[4,4]]}}}],["befor",{"_index":183,"t":{"325":{"position":[[0,6]]}}}],["benchmark",{"_index":300,"t":{"815":{"position":[[0,10]]}}}],["bestsourc",{"_index":296,"t":{"778":{"position":[[0,10]]}}}],["between",{"_index":34,"t":{"49":{"position":[[12,7]]}}}],["bframe",{"_index":260,"t":{"647":{"position":[[0,7]]}}}],["binari",{"_index":247,"t":{"575":{"position":[[11,8]]}}}],["bit",{"_index":165,"t":{"290":{"position":[[15,3]]},"292":{"position":[[15,3]]},"351":{"position":[[0,3]]},"493":{"position":[[10,3]]},"616":{"position":[[3,3]]},"629":{"position":[[3,3]]},"717":{"position":[[0,3]]},"867":{"position":[[10,3]]}}}],["bitstream",{"_index":319,"t":{"892":{"position":[[0,9]]},"906":{"position":[[0,9]]}}}],["bleed",{"_index":357,"t":{"1006":{"position":[[6,5]]}}}],["block",{"_index":359,"t":{"1010":{"position":[[0,8]]}}}],["breakdown",{"_index":68,"t":{"110":{"position":[[7,9]]},"189":{"position":[[7,9]]},"310":{"position":[[7,9]]},"367":{"position":[[7,9]]},"378":{"position":[[7,9]]},"784":{"position":[[7,9]]},"813":{"position":[[7,9]]},"855":{"position":[[0,9]]},"861":{"position":[[7,9]]},"1191":{"position":[[7,9]]}}}],["brief",{"_index":88,"t":{"146":{"position":[[2,5]]}}}],["bt.1361e",{"_index":163,"t":{"286":{"position":[[4,8]]}}}],["bt.1886",{"_index":157,"t":{"266":{"position":[[3,7]]}}}],["bt.2020",{"_index":135,"t":{"224":{"position":[[3,7]]},"226":{"position":[[4,7]]},"252":{"position":[[3,7]]},"290":{"position":[[4,7]]},"292":{"position":[[4,7]]}}}],["bt.470bg",{"_index":126,"t":{"216":{"position":[[3,8]]},"244":{"position":[[3,8]]},"272":{"position":[[3,8]]}}}],["bt.470m",{"_index":124,"t":{"214":{"position":[[3,7]]},"242":{"position":[[3,7]]},"270":{"position":[[3,7]]}}}],["bt.709",{"_index":121,"t":{"210":{"position":[[3,6]]},"238":{"position":[[3,6]]}}}],["build",{"_index":192,"t":{"337":{"position":[[0,5]]}}}],["bump",{"_index":90,"t":{"148":{"position":[[0,5]]}}}],["catch",{"_index":397,"t":{"1179":{"position":[[17,5]]}}}],["cdef",{"_index":405,"t":{"1235":{"position":[[9,4],[36,4]]}}}],["celt",{"_index":113,"t":{"193":{"position":[[0,4]]}}}],["chang",{"_index":236,"t":{"532":{"position":[[6,7]]}}}],["checklist",{"_index":298,"t":{"782":{"position":[[12,9]]},"793":{"position":[[12,9]]},"797":{"position":[[12,9]]},"801":{"position":[[12,9]]},"811":{"position":[[12,9]]},"829":{"position":[[12,9]]},"859":{"position":[[12,9]]},"929":{"position":[[12,9]]}}}],["chroma",{"_index":205,"t":{"357":{"position":[[0,6]]}}}],["chromat",{"_index":143,"t":{"230":{"position":[[4,12]]},"232":{"position":[[4,12]]}}}],["cleanup",{"_index":392,"t":{"1159":{"position":[[27,8]]}}}],["cli",{"_index":60,"t":{"90":{"position":[[0,3]]}}}],["clone",{"_index":186,"t":{"329":{"position":[[0,5]]},"579":{"position":[[0,7]]}}}],["close",{"_index":42,"t":{"60":{"position":[[0,7]]},"150":{"position":[[0,7]]}}}],["cmake",{"_index":225,"t":{"493":{"position":[[0,5]]}}}],["codec",{"_index":50,"t":{"70":{"position":[[4,6]]},"706":{"position":[[4,6]]},"849":{"position":[[10,5]]},"900":{"position":[[0,5]]},"1034":{"position":[[6,6]]}}}],["color",{"_index":194,"t":{"343":{"position":[[0,5]]},"464":{"position":[[10,5]]},"487":{"position":[[10,5]]},"503":{"position":[[10,5]]},"519":{"position":[[10,5]]},"556":{"position":[[10,5]]},"573":{"position":[[10,5]]},"597":{"position":[[10,5]]},"677":{"position":[[10,5]]},"1006":{"position":[[0,5]]}}}],["colorimetri",{"_index":283,"t":{"717":{"position":[[14,11]]}}}],["column",{"_index":403,"t":{"1229":{"position":[[21,7],[63,7]]}}}],["command",{"_index":105,"t":{"173":{"position":[[23,7]]}}}],["common",{"_index":207,"t":{"359":{"position":[[0,6]]}}}],["commun",{"_index":222,"t":{"475":{"position":[[0,9]]},"608":{"position":[[0,9]]}}}],["compar",{"_index":337,"t":{"954":{"position":[[0,9]]},"986":{"position":[[0,9]]}}}],["comparison",{"_index":398,"t":{"1219":{"position":[[8,11]]},"1223":{"position":[[6,11]]},"1227":{"position":[[11,11]]},"1281":{"position":[[8,11]]},"1291":{"position":[[33,12]]}}}],["compil",{"_index":12,"t":{"23":{"position":[[4,9]]},"439":{"position":[[0,9]]},"577":{"position":[[0,9]]},"627":{"position":[[0,9]]}}}],["compon",{"_index":198,"t":{"349":{"position":[[0,9]]}}}],["compress",{"_index":52,"t":{"74":{"position":[[0,11]]},"179":{"position":[[15,11]]},"181":{"position":[[12,11]]},"402":{"position":[[0,8]]},"410":{"position":[[0,11]]},"803":{"position":[[0,11]]},"825":{"position":[[23,11]]},"843":{"position":[[10,11]]},"863":{"position":[[9,11]]},"865":{"position":[[6,11]]},"871":{"position":[[17,11]]}}}],["conclus",{"_index":61,"t":{"92":{"position":[[0,10]]},"104":{"position":[[0,10]]},"106":{"position":[[6,10]]},"136":{"position":[[0,10]]},"183":{"position":[[0,10]]},"384":{"position":[[0,10]]},"789":{"position":[[0,10]]},"1271":{"position":[[0,10]]},"1323":{"position":[[0,10]]}}}],["configur",{"_index":245,"t":{"567":{"position":[[12,13]]},"581":{"position":[[0,11]]},"583":{"position":[[6,11]]}}}],["connect",{"_index":185,"t":{"327":{"position":[[0,7]]}}}],["consol",{"_index":87,"t":{"144":{"position":[[14,7]]}}}],["constant",{"_index":137,"t":{"224":{"position":[[15,8]]},"226":{"position":[[12,8]]},"230":{"position":[[29,8]]},"232":{"position":[[25,8]]}}}],["contain",{"_index":324,"t":{"908":{"position":[[0,9]]},"1034":{"position":[[15,10]]}}}],["content",{"_index":421,"t":{"1259":{"position":[[27,8]]},"1261":{"position":[[27,8]]}}}],["contribut",{"_index":184,"t":{"325":{"position":[[11,10]]},"702":{"position":[[32,10]]}}}],["contributor",{"_index":279,"t":{"704":{"position":[[26,12]]}}}],["convers",{"_index":378,"t":{"1083":{"position":[[6,10]]}}}],["core",{"_index":77,"t":{"126":{"position":[[0,4]]}}}],["cosin",{"_index":331,"t":{"916":{"position":[[9,6]]}}}],["creat",{"_index":211,"t":{"394":{"position":[[0,6]]}}}],["credit",{"_index":367,"t":{"1050":{"position":[[0,7]]}}}],["crf",{"_index":259,"t":{"645":{"position":[[0,3]]}}}],["crop",{"_index":284,"t":{"719":{"position":[[0,8]]}}}],["cu",{"_index":266,"t":{"657":{"position":[[0,2]]}}}],["cue",{"_index":342,"t":{"970":{"position":[[0,3]]}}}],["dci",{"_index":150,"t":{"256":{"position":[[4,3]]}}}],["dct",{"_index":333,"t":{"916":{"position":[[26,5]]}}}],["deblock",{"_index":262,"t":{"651":{"position":[[0,7]]}}}],["decod",{"_index":308,"t":{"869":{"position":[[12,6]]},"887":{"position":[[0,8]]},"927":{"position":[[0,8]]},"1205":{"position":[[0,8]]},"1241":{"position":[[7,6],[34,6]]}}}],["decompress",{"_index":216,"t":{"404":{"position":[[0,10]]},"412":{"position":[[0,13]]}}}],["default",{"_index":235,"t":{"530":{"position":[[9,8]]},"1040":{"position":[[10,8]]},"1229":{"position":[[34,7]]},"1231":{"position":[[15,7]]},"1233":{"position":[[15,7]]},"1235":{"position":[[19,7]]},"1237":{"position":[[17,7]]},"1239":{"position":[[18,7]]},"1241":{"position":[[19,7]]},"1243":{"position":[[21,7]]},"1245":{"position":[[17,7]]},"1247":{"position":[[18,7]]},"1249":{"position":[[25,7]]},"1251":{"position":[[23,7]]},"1253":{"position":[[17,7]]},"1257":{"position":[[26,7]]},"1259":{"position":[[11,7]]},"1261":{"position":[[11,7]]},"1263":{"position":[[17,7]]},"1265":{"position":[[21,7]]}}}],["delay",{"_index":382,"t":{"1089":{"position":[[0,5]]}}}],["demux",{"_index":381,"t":{"1087":{"position":[[0,5]]}}}],["depend",{"_index":370,"t":{"1071":{"position":[[11,12]]}}}],["deploy",{"_index":193,"t":{"339":{"position":[[0,10]]}}}],["depth",{"_index":200,"t":{"351":{"position":[[4,5]]},"717":{"position":[[4,5]]}}}],["depth(",{"_index":306,"t":{"867":{"position":[[14,8]]}}}],["deriv",{"_index":144,"t":{"230":{"position":[[17,7]]},"232":{"position":[[17,7]]}}}],["descript",{"_index":363,"t":{"1020":{"position":[[0,11]]}}}],["develop",{"_index":191,"t":{"335":{"position":[[6,11]]}}}],["dg",{"_index":406,"t":{"1237":{"position":[[9,2],[34,2]]}}}],["differ",{"_index":33,"t":{"49":{"position":[[0,11]]}}}],["disclosur",{"_index":26,"t":{"39":{"position":[[19,10]]}}}],["discord'",{"_index":30,"t":{"45":{"position":[[0,9]]}}}],["discoveri",{"_index":38,"t":{"54":{"position":[[0,9]]}}}],["discret",{"_index":330,"t":{"916":{"position":[[0,8]]}}}],["display",{"_index":152,"t":{"258":{"position":[[4,7]]}}}],["dlf",{"_index":407,"t":{"1239":{"position":[[9,3],[35,3]]}}}],["do",{"_index":275,"t":{"700":{"position":[[12,5]]}}}],["do'",{"_index":58,"t":{"84":{"position":[[0,4]]}}}],["docker",{"_index":369,"t":{"1069":{"position":[[0,6]]}}}],["dolbi",{"_index":242,"t":{"565":{"position":[[14,5]]},"839":{"position":[[0,5]]}}}],["don't",{"_index":59,"t":{"84":{"position":[[7,6]]}}}],["dwayn",{"_index":37,"t":{"52":{"position":[[0,6]]}}}],["e",{"_index":96,"t":{"159":{"position":[[0,1]]}}}],["earli",{"_index":426,"t":{"1269":{"position":[[0,5]]}}}],["ebu",{"_index":154,"t":{"260":{"position":[[4,3]]}}}],["effect",{"_index":356,"t":{"1004":{"position":[[10,6]]}}}],["efficaci",{"_index":53,"t":{"74":{"position":[[12,8]]}}}],["effici",{"_index":430,"t":{"1283":{"position":[[0,10]]}}}],["eld",{"_index":72,"t":{"114":{"position":[[13,3]]}}}],["elementari",{"_index":320,"t":{"896":{"position":[[0,10]]}}}],["enabl",{"_index":404,"t":{"1235":{"position":[[2,6],[29,6]]},"1237":{"position":[[2,6],[27,6]]},"1239":{"position":[[2,6],[28,6]]},"1251":{"position":[[2,6],[33,6]]},"1253":{"position":[[2,6],[27,6]]},"1255":{"position":[[2,6],[30,6]]},"1257":{"position":[[2,6],[36,6]]},"1263":{"position":[[2,6],[27,6]]},"1265":{"position":[[2,6],[31,6]]}}}],["enc",{"_index":117,"t":{"200":{"position":[[4,3]]}}}],["encod",{"_index":15,"t":{"27":{"position":[[0,8]]},"94":{"position":[[0,8]]},"122":{"position":[[0,8]]},"195":{"position":[[0,8]]},"312":{"position":[[0,8]]},"371":{"position":[[0,8]]},"382":{"position":[[0,8]]},"452":{"position":[[4,8]]},"454":{"position":[[5,8]]},"469":{"position":[[4,8]]},"471":{"position":[[5,8]]},"563":{"position":[[0,8]]},"565":{"position":[[0,8]]},"591":{"position":[[0,8]]},"601":{"position":[[0,8]]},"606":{"position":[[0,7]]},"697":{"position":[[9,8]]},"877":{"position":[[0,8]]},"924":{"position":[[0,8]]},"1197":{"position":[[0,8]]},"1203":{"position":[[0,8]]}}}],["end",{"_index":29,"t":{"43":{"position":[[14,3]]},"45":{"position":[[10,3]]}}}],["endian",{"_index":204,"t":{"355":{"position":[[0,10]]}}}],["entropi",{"_index":302,"t":{"823":{"position":[[13,7]]}}}],["error",{"_index":371,"t":{"1074":{"position":[[0,7],[72,7]]}}}],["everyth",{"_index":17,"t":{"29":{"position":[[8,10]]}}}],["exampl",{"_index":340,"t":{"964":{"position":[[0,7]]},"972":{"position":[[0,7]]},"1024":{"position":[[0,8]]}}}],["exhal",{"_index":82,"t":{"134":{"position":[[0,6]]}}}],["exist",{"_index":277,"t":{"702":{"position":[[17,6]]}}}],["experi",{"_index":39,"t":{"56":{"position":[[4,11]]}}}],["extract",{"_index":214,"t":{"396":{"position":[[0,7]]},"1109":{"position":[[0,10]]}}}],["faac",{"_index":80,"t":{"130":{"position":[[0,4]]}}}],["fallback",{"_index":51,"t":{"72":{"position":[[0,9]]}}}],["fast",{"_index":408,"t":{"1241":{"position":[[2,4],[29,4]]}}}],["fdk",{"_index":76,"t":{"124":{"position":[[11,3]]}}}],["featur",{"_index":177,"t":{"306":{"position":[[0,8]]},"528":{"position":[[0,7]]},"536":{"position":[[9,9]]},"875":{"position":[[6,8]]},"962":{"position":[[10,8]]}}}],["feedback",{"_index":428,"t":{"1275":{"position":[[0,8]]}}}],["ffmpeg",{"_index":79,"t":{"128":{"position":[[0,6]]},"171":{"position":[[18,7]]},"315":{"position":[[0,6]]},"420":{"position":[[0,6]]},"428":{"position":[[0,6]]},"447":{"position":[[0,6]]},"462":{"position":[[0,6]]},"485":{"position":[[0,6]]},"501":{"position":[[0,6]]},"517":{"position":[[0,6]]},"544":{"position":[[0,6]]},"554":{"position":[[0,6]]},"571":{"position":[[0,6]]},"595":{"position":[[0,6]]},"637":{"position":[[0,6]]},"663":{"position":[[0,6]]},"675":{"position":[[0,6]]},"978":{"position":[[16,6]]},"1133":{"position":[[6,6]]}}}],["ffms2",{"_index":295,"t":{"776":{"position":[[0,5]]}}}],["ffopu",{"_index":115,"t":{"198":{"position":[[0,6]]}}}],["file",{"_index":215,"t":{"402":{"position":[[11,4]]},"404":{"position":[[13,4]]},"581":{"position":[[12,4]]},"1074":{"position":[[12,4],[67,4]]}}}],["film",{"_index":148,"t":{"250":{"position":[[3,4]]}}}],["filter",{"_index":282,"t":{"713":{"position":[[9,7]]},"715":{"position":[[0,6]]},"772":{"position":[[7,7]]},"902":{"position":[[0,6]]},"906":{"position":[[10,6]]}}}],["final",{"_index":21,"t":{"33":{"position":[[0,5]]},"106":{"position":[[0,5]]}}}],["fire",{"_index":43,"t":{"64":{"position":[[0,4]]}}}],["first",{"_index":24,"t":{"39":{"position":[[4,6]]}}}],["flac",{"_index":103,"t":{"171":{"position":[[7,4]]},"173":{"position":[[7,4],[18,4]]}}}],["flash",{"_index":376,"t":{"1076":{"position":[[12,8]]}}}],["footnot",{"_index":209,"t":{"363":{"position":[[0,9]]}}}],["forget",{"_index":44,"t":{"64":{"position":[[7,6]]}}}],["fork",{"_index":223,"t":{"475":{"position":[[10,5]]},"608":{"position":[[10,5]]}}}],["format",{"_index":67,"t":{"110":{"position":[[0,6]]},"156":{"position":[[0,6]]},"189":{"position":[[0,6]]},"310":{"position":[[0,6]]},"359":{"position":[[7,7]]},"367":{"position":[[0,6]]},"378":{"position":[[0,6]]},"784":{"position":[[0,6]]},"813":{"position":[[0,6]]},"861":{"position":[[0,6]]},"960":{"position":[[0,6]]},"992":{"position":[[0,6]]},"1191":{"position":[[0,6]]}}}],["frame",{"_index":273,"t":{"693":{"position":[[10,6]]},"1076":{"position":[[34,5]]},"1179":{"position":[[6,5]]}}}],["framework",{"_index":232,"t":{"526":{"position":[[14,9]]}}}],["fraunhof",{"_index":75,"t":{"124":{"position":[[0,10]]}}}],["frequenc",{"_index":109,"t":{"177":{"position":[[23,9]]}}}],["further",{"_index":351,"t":{"996":{"position":[[0,7]]}}}],["futur",{"_index":435,"t":{"1325":{"position":[[0,6]]}}}],["gamma",{"_index":176,"t":{"298":{"position":[[23,5]]}}}],["gnu",{"_index":251,"t":{"585":{"position":[[8,3]]}}}],["googl",{"_index":85,"t":{"144":{"position":[[0,6]]}}}],["gop",{"_index":271,"t":{"689":{"position":[[5,3]]}}}],["gray",{"_index":374,"t":{"1076":{"position":[[0,4]]}}}],["gui",{"_index":4,"t":{"7":{"position":[[4,3]]},"19":{"position":[[4,3]]},"88":{"position":[[0,3]]}}}],["handbrak",{"_index":239,"t":{"546":{"position":[[0,9]]}}}],["hc",{"_index":389,"t":{"1141":{"position":[[4,2]]}}}],["hdr",{"_index":241,"t":{"563":{"position":[[9,3]]}}}],["hdr10",{"_index":228,"t":{"507":{"position":[[26,6]]},"835":{"position":[[0,5]]},"837":{"position":[[0,6]]},"1113":{"position":[[9,6]]}}}],["header",{"_index":350,"t":{"994":{"position":[[14,6]]}}}],["histori",{"_index":89,"t":{"146":{"position":[[8,7]]},"369":{"position":[[0,7]]},"380":{"position":[[0,7]]},"1173":{"position":[[0,7]]}}}],["hlg",{"_index":173,"t":{"298":{"position":[[4,3]]},"833":{"position":[[0,3]]}}}],["hqdn3d",{"_index":287,"t":{"731":{"position":[[0,6]]}}}],["hybrid",{"_index":174,"t":{"298":{"position":[[12,6]]},"308":{"position":[[0,6]]}}}],["hydrium",{"_index":314,"t":{"883":{"position":[[0,7]]}}}],["ictcp",{"_index":147,"t":{"234":{"position":[[4,5]]}}}],["ident",{"_index":119,"t":{"208":{"position":[[3,8]]}}}],["imag",{"_index":55,"t":{"76":{"position":[[11,6]]},"936":{"position":[[3,6]]}}}],["improv",{"_index":46,"t":{"66":{"position":[[8,11]]}}}],["industri",{"_index":311,"t":{"873":{"position":[[0,8]]}}}],["info",{"_index":380,"t":{"1085":{"position":[[9,4]]}}}],["inject",{"_index":383,"t":{"1111":{"position":[[0,9]]}}}],["instal",{"_index":0,"t":{"4":{"position":[[0,10]]},"333":{"position":[[0,12]]},"430":{"position":[[0,12]]},"436":{"position":[[0,12]]},"449":{"position":[[0,12]]},"466":{"position":[[0,12]]},"479":{"position":[[0,12]]},"489":{"position":[[0,12]]},"505":{"position":[[0,12]]},"507":{"position":[[8,12]]},"534":{"position":[[0,12]]},"558":{"position":[[0,12]]},"575":{"position":[[0,10]]},"599":{"position":[[0,12]]},"614":{"position":[[0,12]]},"624":{"position":[[0,12]]},"639":{"position":[[0,12]]},"661":{"position":[[0,12]]},"679":{"position":[[0,12]]},"761":{"position":[[0,12]]},"933":{"position":[[0,10]]},"948":{"position":[[0,12]]},"976":{"position":[[0,12]]},"1026":{"position":[[0,12]]},"1038":{"position":[[0,12]]},"1054":{"position":[[0,12]]},"1062":{"position":[[0,12]]},"1071":{"position":[[0,10]]},"1080":{"position":[[0,12]]},"1093":{"position":[[0,12]]},"1099":{"position":[[0,12]]},"1105":{"position":[[0,12]]},"1121":{"position":[[0,12]]},"1127":{"position":[[0,12]]},"1149":{"position":[[0,12]]},"1159":{"position":[[10,12]]},"1183":{"position":[[0,12]]}}}],["installt",{"_index":391,"t":{"1157":{"position":[[6,11]]}}}],["instruct",{"_index":188,"t":{"329":{"position":[[13,12]]}}}],["integr",{"_index":268,"t":{"663":{"position":[[7,11]]}}}],["interact",{"_index":40,"t":{"56":{"position":[[18,11]]}}}],["interpret",{"_index":432,"t":{"1287":{"position":[[0,14]]}}}],["intra",{"_index":396,"t":{"1179":{"position":[[0,5]]}}}],["intro",{"_index":281,"t":{"713":{"position":[[0,5]]}}}],["introduct",{"_index":56,"t":{"80":{"position":[[0,12]]},"759":{"position":[[0,12]]}}}],["irefresh",{"_index":409,"t":{"1243":{"position":[[2,8],[31,8]]}}}],["isn't",{"_index":304,"t":{"847":{"position":[[10,5]]}}}],["issu",{"_index":181,"t":{"317":{"position":[[9,6]]}}}],["jpeg",{"_index":309,"t":{"871":{"position":[[9,4]]}}}],["jpegxl",{"_index":316,"t":{"885":{"position":[[5,6]]}}}],["jxl",{"_index":317,"t":{"888":{"position":[[0,3]]}}}],["key",{"_index":364,"t":{"1032":{"position":[[0,3]]}}}],["la",{"_index":424,"t":{"1265":{"position":[[13,2],[42,2]]}}}],["lazi",{"_index":47,"t":{"68":{"position":[[0,4]]}}}],["lc",{"_index":70,"t":{"112":{"position":[[4,2]]}}}],["ld",{"_index":71,"t":{"114":{"position":[[4,2]]}}}],["libaom",{"_index":65,"t":{"100":{"position":[[7,8]]}}}],["libjxl",{"_index":312,"t":{"879":{"position":[[0,6]]},"881":{"position":[[0,6]]}}}],["libwebp",{"_index":334,"t":{"925":{"position":[[6,7]]}}}],["licens",{"_index":238,"t":{"538":{"position":[[0,7]]},"1028":{"position":[[0,7]]}}}],["limit",{"_index":32,"t":{"47":{"position":[[12,11]]},"787":{"position":[[0,11]]},"819":{"position":[[0,11]]}}}],["line",{"_index":106,"t":{"173":{"position":[[31,4]]}}}],["linear",{"_index":158,"t":{"278":{"position":[[3,6]]}}}],["linux",{"_index":10,"t":{"17":{"position":[[0,5]]},"372":{"position":[[0,5]]},"437":{"position":[[5,5]]},"625":{"position":[[5,5]]},"764":{"position":[[5,5]]},"766":{"position":[[6,5]]},"1067":{"position":[[0,5]]},"1074":{"position":[[97,5]]},"1129":{"position":[[0,5]]},"1151":{"position":[[0,5]]},"1153":{"position":[[0,5]]}}}],["list",{"_index":244,"t":{"567":{"position":[[0,4]]}}}],["load",{"_index":48,"t":{"68":{"position":[[5,7]]}}}],["local",{"_index":190,"t":{"335":{"position":[[0,5]]}}}],["log",{"_index":175,"t":{"298":{"position":[[19,3]]}}}],["logarithm",{"_index":159,"t":{"280":{"position":[[3,11]]},"282":{"position":[[4,11]]}}}],["lookahead",{"_index":411,"t":{"1245":{"position":[[2,9],[27,9]]},"1247":{"position":[[2,9],[28,9]]},"1249":{"position":[[2,9],[35,9]]}}}],["lore",{"_index":36,"t":{"51":{"position":[[4,4]]}}}],["lossi",{"_index":111,"t":{"181":{"position":[[0,5]]},"843":{"position":[[4,5]]},"865":{"position":[[0,5]]},"894":{"position":[[0,5]]}}}],["lossless",{"_index":110,"t":{"179":{"position":[[0,8]]},"697":{"position":[[0,8]]},"825":{"position":[[14,8]]},"863":{"position":[[0,8]]},"871":{"position":[[0,8]]},"894":{"position":[[8,8]]}}}],["lsmashsourc",{"_index":294,"t":{"774":{"position":[[0,12]]}}}],["lumin",{"_index":138,"t":{"224":{"position":[[24,9]]},"226":{"position":[[21,9]]},"230":{"position":[[38,9]]},"232":{"position":[[34,9]]}}}],["maco",{"_index":9,"t":{"15":{"position":[[0,5]]},"372":{"position":[[8,5]]},"1065":{"position":[[0,5]]},"1129":{"position":[[8,5]]},"1163":{"position":[[0,5]]}}}],["make",{"_index":252,"t":{"585":{"position":[[12,4]]}}}],["manual",{"_index":8,"t":{"13":{"position":[[4,6]]}}}],["massiv",{"_index":45,"t":{"66":{"position":[[0,7]]}}}],["max",{"_index":415,"t":{"1249":{"position":[[16,5]]}}}],["mb",{"_index":274,"t":{"695":{"position":[[0,2]]}}}],["merg",{"_index":16,"t":{"29":{"position":[[0,7]]}}}],["metadata",{"_index":385,"t":{"1113":{"position":[[16,8]]}}}],["methodolog",{"_index":429,"t":{"1277":{"position":[[0,11]]}}}],["micro",{"_index":230,"t":{"526":{"position":[[0,5]]}}}],["microsoft",{"_index":2,"t":{"6":{"position":[[0,9]]},"762":{"position":[[0,9]]}}}],["min",{"_index":418,"t":{"1255":{"position":[[19,3]]}}}],["mkvmerg",{"_index":373,"t":{"1074":{"position":[[85,8]]}}}],["mode",{"_index":178,"t":{"308":{"position":[[7,4]]},"691":{"position":[[3,4]]},"1231":{"position":[[5,4],[28,4]]},"1233":{"position":[[5,4],[28,4]]}}}],["model",{"_index":195,"t":{"343":{"position":[[6,6]]},"980":{"position":[[15,5]]}}}],["modifi",{"_index":234,"t":{"530":{"position":[[0,8]]}}}],["moir",{"_index":353,"t":{"1002":{"position":[[0,5]]}}}],["mosquito",{"_index":361,"t":{"1014":{"position":[[0,8]]}}}],["mpc",{"_index":388,"t":{"1141":{"position":[[0,3]]}}}],["mpv",{"_index":386,"t":{"1137":{"position":[[0,3]]}}}],["msys2",{"_index":226,"t":{"495":{"position":[[0,5]]}}}],["mux",{"_index":322,"t":{"898":{"position":[[0,6]]}}}],["muxer/demux",{"_index":323,"t":{"904":{"position":[[0,13]]}}}],["need",{"_index":305,"t":{"851":{"position":[[9,4]]}}}],["nero",{"_index":81,"t":{"132":{"position":[[0,4]]}}}],["new",{"_index":49,"t":{"70":{"position":[[0,3]]}}}],["nlmean",{"_index":288,"t":{"735":{"position":[[0,7]]}}}],["nois",{"_index":362,"t":{"1014":{"position":[[9,5]]}}}],["non",{"_index":136,"t":{"224":{"position":[[11,3]]},"230":{"position":[[25,3]]}}}],["normal",{"_index":240,"t":{"561":{"position":[[0,6]]}}}],["note",{"_index":182,"t":{"319":{"position":[[0,5]]},"620":{"position":[[0,5]]},"741":{"position":[[0,5]]},"755":{"position":[[0,5]]},"952":{"position":[[0,5]]},"980":{"position":[[0,4]]}}}],["nyquist",{"_index":108,"t":{"177":{"position":[[15,7]]}}}],["open",{"_index":270,"t":{"689":{"position":[[0,4]]},"1074":{"position":[[42,6],[62,4]]}}}],["optim",{"_index":256,"t":{"606":{"position":[[8,12]]},"1042":{"position":[[11,12]]}}}],["option",{"_index":249,"t":{"583":{"position":[[18,7]]},"1161":{"position":[[21,10]]},"1177":{"position":[[0,7]]}}}],["opusenc",{"_index":114,"t":{"196":{"position":[[0,7]]}}}],["order",{"_index":199,"t":{"349":{"position":[[10,5]]},"715":{"position":[[7,5]]}}}],["output",{"_index":292,"t":{"770":{"position":[[0,6]]},"1048":{"position":[[0,6]]},"1076":{"position":[[43,6]]}}}],["overlay",{"_index":416,"t":{"1251":{"position":[[9,8],[40,8]]}}}],["overview",{"_index":93,"t":{"156":{"position":[[7,8]]},"729":{"position":[[0,8]]},"747":{"position":[[0,8]]}}}],["oxid",{"_index":318,"t":{"888":{"position":[[4,5]]}}}],["p3",{"_index":151,"t":{"256":{"position":[[8,2]]},"258":{"position":[[12,2]]}}}],["pack",{"_index":201,"t":{"353":{"position":[[0,6]]}}}],["paramet",{"_index":246,"t":{"567":{"position":[[26,10]]},"641":{"position":[[0,10]]},"739":{"position":[[0,10]]},"751":{"position":[[14,10]]},"753":{"position":[[17,10]]},"1227":{"position":[[0,10]]},"1269":{"position":[[14,10]]}}}],["patch",{"_index":227,"t":{"507":{"position":[[0,7]]}}}],["path",{"_index":344,"t":{"980":{"position":[[21,4]]}}}],["pattern",{"_index":354,"t":{"1002":{"position":[[6,7]]}}}],["perceptu",{"_index":366,"t":{"1042":{"position":[[0,10]]}}}],["perceput",{"_index":327,"t":{"914":{"position":[[0,10]]}}}],["perform",{"_index":297,"t":{"782":{"position":[[0,11]]},"793":{"position":[[0,11]]},"797":{"position":[[0,11]]},"801":{"position":[[0,11]]},"811":{"position":[[0,11]]},"829":{"position":[[0,11]]},"859":{"position":[[0,11]]},"929":{"position":[[0,11]]}}}],["planar",{"_index":203,"t":{"353":{"position":[[10,6]]}}}],["plausibl",{"_index":84,"t":{"142":{"position":[[0,9]]}}}],["pq",{"_index":168,"t":{"294":{"position":[[4,2]]}}}],["prerequisit",{"_index":368,"t":{"1060":{"position":[[0,13]]}}}],["preset",{"_index":258,"t":{"643":{"position":[[0,6]]},"685":{"position":[[0,6]]},"1219":{"position":[[0,7]]},"1281":{"position":[[0,7]]},"1291":{"position":[[25,7]]},"1293":{"position":[[0,6]]},"1295":{"position":[[0,6]]},"1297":{"position":[[0,6]]},"1299":{"position":[[0,6]]},"1301":{"position":[[0,6]]},"1303":{"position":[[0,6]]},"1305":{"position":[[0,6]]},"1307":{"position":[[0,6]]},"1309":{"position":[[0,6]]},"1311":{"position":[[0,6]]},"1313":{"position":[[0,6]]},"1315":{"position":[[0,6]]},"1317":{"position":[[0,6]]},"1319":{"position":[[0,6]]}}}],["preview",{"_index":291,"t":{"768":{"position":[[0,10]]}}}],["progress",{"_index":307,"t":{"869":{"position":[[0,11]]}}}],["project",{"_index":237,"t":{"536":{"position":[[0,8]]}}}],["psi",{"_index":66,"t":{"102":{"position":[[8,3]]},"536":{"position":[[27,3]]},"610":{"position":[[8,3]]},"653":{"position":[[0,3]]}}}],["psychoacoust",{"_index":329,"t":{"914":{"position":[[28,14]]}}}],["psychovisu",{"_index":328,"t":{"914":{"position":[[13,12]]}}}],["push",{"_index":187,"t":{"329":{"position":[[8,4]]}}}],["qm",{"_index":417,"t":{"1253":{"position":[[9,2],[34,2]]},"1255":{"position":[[9,2],[16,2],[37,2]]}}}],["quantiz",{"_index":265,"t":{"655":{"position":[[9,12]]}}}],["quick",{"_index":25,"t":{"39":{"position":[[13,5]]}}}],["ram",{"_index":395,"t":{"1161":{"position":[[11,3]]}}}],["rav1",{"_index":63,"t":{"98":{"position":[[0,5]]}}}],["rd",{"_index":263,"t":{"653":{"position":[[4,2]]}}}],["rdo",{"_index":326,"t":{"912":{"position":[[0,3]]}}}],["re",{"_index":310,"t":{"871":{"position":[[14,2]]}}}],["read",{"_index":352,"t":{"996":{"position":[[8,8]]},"1074":{"position":[[53,8]]}}}],["recommend",{"_index":220,"t":{"456":{"position":[[0,15]]},"473":{"position":[[0,15]]},"683":{"position":[[0,15]]}}}],["redund",{"_index":301,"t":{"823":{"position":[[0,10]]}}}],["refer",{"_index":208,"t":{"361":{"position":[[0,10]]},"693":{"position":[[0,9]]}}}],["releas",{"_index":231,"t":{"526":{"position":[[6,7]]}}}],["remov",{"_index":384,"t":{"1113":{"position":[[0,8]]}}}],["resiz",{"_index":285,"t":{"721":{"position":[[0,8]]}}}],["resourc",{"_index":346,"t":{"988":{"position":[[11,9]]}}}],["respons",{"_index":54,"t":{"76":{"position":[[0,10]]}}}],["restor",{"_index":419,"t":{"1257":{"position":[[9,11],[43,11]]}}}],["result",{"_index":427,"t":{"1269":{"position":[[25,8]]}}}],["rgb",{"_index":196,"t":{"345":{"position":[[0,3]]}}}],["ring",{"_index":358,"t":{"1008":{"position":[[0,7]]}}}],["road",{"_index":91,"t":{"148":{"position":[[13,4]]}}}],["rout",{"_index":13,"t":{"23":{"position":[[14,5]]}}}],["row",{"_index":402,"t":{"1229":{"position":[[7,4],[49,4]]}}}],["rumor",{"_index":217,"t":{"434":{"position":[[0,6]]}}}],["run",{"_index":250,"t":{"585":{"position":[[0,7]]},"935":{"position":[[0,7]]}}}],["sampl",{"_index":107,"t":{"177":{"position":[[0,8]]},"1217":{"position":[[0,7]]},"1279":{"position":[[0,7]]}}}],["sao",{"_index":261,"t":{"649":{"position":[[0,3]]}}}],["scenario",{"_index":23,"t":{"37":{"position":[[2,8]]}}}],["scm",{"_index":420,"t":{"1259":{"position":[[2,3],[21,3]]},"1261":{"position":[[2,3],[21,3]]}}}],["score",{"_index":336,"t":{"940":{"position":[[0,7]]},"982":{"position":[[0,7]]}}}],["screen",{"_index":375,"t":{"1076":{"position":[[5,6]]}}}],["search",{"_index":86,"t":{"144":{"position":[[7,6]]}}}],["silk",{"_index":112,"t":{"191":{"position":[[0,4]]}}}],["singl",{"_index":377,"t":{"1076":{"position":[[27,6]]}}}],["site",{"_index":35,"t":{"49":{"position":[[20,5]]},"56":{"position":[[30,4]]}}}],["smpte",{"_index":128,"t":{"218":{"position":[[3,5]]},"220":{"position":[[3,5]]},"228":{"position":[[4,5]]},"246":{"position":[[3,5]]},"248":{"position":[[3,5]]},"254":{"position":[[4,5]]},"274":{"position":[[3,5]]},"276":{"position":[[3,5]]},"294":{"position":[[11,5]]},"296":{"position":[[4,5]]}}}],["softwar",{"_index":100,"t":{"169":{"position":[[0,8]]}}}],["sourc",{"_index":293,"t":{"772":{"position":[[0,6]]}}}],["space",{"_index":221,"t":{"464":{"position":[[16,5]]},"487":{"position":[[16,5]]},"503":{"position":[[16,5]]},"519":{"position":[[16,5]]},"556":{"position":[[16,5]]},"573":{"position":[[16,5]]},"597":{"position":[[16,5]]},"677":{"position":[[16,5]]}}}],["speed",{"_index":431,"t":{"1285":{"position":[[0,5]]}}}],["srgb",{"_index":164,"t":{"288":{"position":[[4,4]]}}}],["ssa",{"_index":348,"t":{"994":{"position":[[4,4]]}}}],["ssimulacra",{"_index":338,"t":{"954":{"position":[[13,10]]}}}],["ssimulacra2",{"_index":345,"t":{"986":{"position":[[13,11]]}}}],["staircas",{"_index":355,"t":{"1004":{"position":[[0,9]]}}}],["standalon",{"_index":229,"t":{"522":{"position":[[0,10]]}}}],["start",{"_index":278,"t":{"704":{"position":[[13,7]]},"1159":{"position":[[45,5]]}}}],["stat",{"_index":83,"t":{"140":{"position":[[0,5]]}}}],["statement",{"_index":92,"t":{"150":{"position":[[8,9]]}}}],["stream",{"_index":321,"t":{"896":{"position":[[11,6]]}}}],["strength",{"_index":31,"t":{"47":{"position":[[0,9]]},"602":{"position":[[0,9]]}}}],["structur",{"_index":341,"t":{"968":{"position":[[0,9]]}}}],["subsampl",{"_index":206,"t":{"357":{"position":[[7,11]]}}}],["superr",{"_index":425,"t":{"1267":{"position":[[0,9]]}}}],["support",{"_index":101,"t":{"169":{"position":[[9,7]]},"464":{"position":[[0,9]]},"487":{"position":[[0,9]]},"493":{"position":[[14,8]]},"503":{"position":[[0,9]]},"507":{"position":[[33,7]]},"519":{"position":[[0,9]]},"556":{"position":[[0,9]]},"573":{"position":[[0,9]]},"597":{"position":[[0,9]]},"616":{"position":[[7,7]]},"629":{"position":[[7,7]]},"677":{"position":[[0,9]]},"867":{"position":[[0,9]]},"873":{"position":[[9,7]]}}}],["svt",{"_index":62,"t":{"96":{"position":[[0,3]]},"102":{"position":[[0,3]]},"536":{"position":[[19,3]]},"610":{"position":[[0,3]]},"1291":{"position":[[0,3]]}}}],["tar",{"_index":212,"t":{"394":{"position":[[9,3]]},"396":{"position":[[10,3]]}}}],["tech",{"_index":155,"t":{"260":{"position":[[8,4]]}}}],["techniqu",{"_index":303,"t":{"825":{"position":[[0,10]]}}}],["tf",{"_index":422,"t":{"1263":{"position":[[9,2],[34,2]]}}}],["thought",{"_index":22,"t":{"33":{"position":[[6,8]]}}}],["thread",{"_index":269,"t":{"687":{"position":[[0,7]]}}}],["tile",{"_index":401,"t":{"1229":{"position":[[2,4],[16,4],[44,4],[58,4]]}}}],["tini",{"_index":313,"t":{"881":{"position":[[7,4]]}}}],["tip",{"_index":18,"t":{"31":{"position":[[0,4]]},"458":{"position":[[0,4]]},"511":{"position":[[0,4]]},"1187":{"position":[[0,4]]}}}],["tldr",{"_index":399,"t":{"1221":{"position":[[0,4]]},"1225":{"position":[[0,4]]},"1269":{"position":[[6,4]]},"1289":{"position":[[0,4]]},"1321":{"position":[[0,4]]}}}],["tool",{"_index":1,"t":{"4":{"position":[[15,5]]},"86":{"position":[[0,5]]},"173":{"position":[[36,5]]}}}],["tpl",{"_index":423,"t":{"1265":{"position":[[9,3],[38,3]]}}}],["transcod",{"_index":325,"t":{"910":{"position":[[0,11]]}}}],["transform",{"_index":332,"t":{"916":{"position":[[16,9]]}}}],["tree",{"_index":267,"t":{"657":{"position":[[3,4]]},"695":{"position":[[3,4]]}}}],["trick",{"_index":20,"t":{"31":{"position":[[7,6]]},"458":{"position":[[7,6]]},"511":{"position":[[7,6]]},"1187":{"position":[[9,6]]}}}],["trim",{"_index":286,"t":{"723":{"position":[[0,8]]}}}],["troubleshoot",{"_index":257,"t":{"633":{"position":[[0,15]]},"1073":{"position":[[0,15]]},"1165":{"position":[[0,15]]}}}],["truehd",{"_index":97,"t":{"161":{"position":[[0,6]]}}}],["tui",{"_index":11,"t":{"21":{"position":[[4,3]]}}}],["tune",{"_index":400,"t":{"1223":{"position":[[0,5]]}}}],["type",{"_index":410,"t":{"1243":{"position":[[11,4],[40,4]]}}}],["unlock",{"_index":393,"t":{"1161":{"position":[[0,6]]}}}],["unoff",{"_index":339,"t":{"962":{"position":[[0,9]]}}}],["unspecifi",{"_index":123,"t":{"212":{"position":[[3,11]]},"240":{"position":[[3,11]]},"268":{"position":[[3,11]]}}}],["us",{"_index":104,"t":{"171":{"position":[[12,5]]},"173":{"position":[[12,5]]},"925":{"position":[[0,5]]},"978":{"position":[[0,5]]},"1133":{"position":[[0,5]]}}}],["usag",{"_index":210,"t":{"392":{"position":[[0,5]]},"400":{"position":[[0,5]]},"408":{"position":[[0,5]]},"418":{"position":[[0,5]]},"441":{"position":[[0,5]]},"451":{"position":[[0,5]]},"468":{"position":[[0,5]]},"481":{"position":[[0,5]]},"497":{"position":[[0,5]]},"509":{"position":[[0,5]]},"521":{"position":[[0,5]]},"542":{"position":[[0,5]]},"560":{"position":[[0,5]]},"561":{"position":[[7,5]]},"618":{"position":[[0,5]]},"631":{"position":[[0,5]]},"665":{"position":[[0,5]]},"681":{"position":[[0,5]]},"733":{"position":[[0,5]]},"737":{"position":[[0,5]]},"749":{"position":[[0,5]]},"950":{"position":[[0,5]]},"1022":{"position":[[0,5]]},"1056":{"position":[[0,5]]},"1082":{"position":[[0,5]]},"1095":{"position":[[0,5]]},"1101":{"position":[[0,5]]},"1107":{"position":[[0,5]]},"1123":{"position":[[0,5]]},"1161":{"position":[[15,5]]},"1175":{"position":[[0,5]]},"1185":{"position":[[0,5]]},"1193":{"position":[[0,5]]}}}],["v2.0.0",{"_index":433,"t":{"1291":{"position":[[8,6]]},"1293":{"position":[[11,6]]},"1295":{"position":[[10,6]]},"1297":{"position":[[10,6]]},"1299":{"position":[[10,6]]},"1301":{"position":[[10,6]]},"1303":{"position":[[10,6]]},"1305":{"position":[[10,6]]},"1307":{"position":[[10,6]]},"1309":{"position":[[10,6]]},"1311":{"position":[[10,6]]},"1313":{"position":[[10,6]]},"1315":{"position":[[11,6]]},"1317":{"position":[[11,6]]},"1319":{"position":[[11,6]]}}}],["v2.1.0",{"_index":434,"t":{"1291":{"position":[[18,6]]},"1293":{"position":[[21,6]]},"1295":{"position":[[20,6]]},"1297":{"position":[[20,6]]},"1299":{"position":[[20,6]]},"1301":{"position":[[20,6]]},"1303":{"position":[[20,6]]},"1305":{"position":[[20,6]]},"1307":{"position":[[20,6]]},"1309":{"position":[[20,6]]},"1311":{"position":[[20,6]]},"1313":{"position":[[20,6]]},"1315":{"position":[[21,6]]},"1317":{"position":[[21,6]]},"1319":{"position":[[21,6]]}}}],["v4",{"_index":349,"t":{"994":{"position":[[9,4]]}}}],["vac",{"_index":116,"t":{"200":{"position":[[0,3]]}}}],["video",{"_index":335,"t":{"938":{"position":[[3,6]]},"1034":{"position":[[0,5]]},"1044":{"position":[[0,5]]}}}],["vidstabdetect",{"_index":289,"t":{"751":{"position":[[0,13]]}}}],["vidstabtransform",{"_index":290,"t":{"753":{"position":[[0,16]]}}}],["viral",{"_index":41,"t":{"58":{"position":[[0,8]]}}}],["vision",{"_index":243,"t":{"565":{"position":[[20,6]]},"839":{"position":[[6,6]]}}}],["vlc",{"_index":387,"t":{"1139":{"position":[[0,3]]}}}],["vmaf",{"_index":343,"t":{"978":{"position":[[6,4]]}}}],["vp8",{"_index":253,"t":{"587":{"position":[[0,3]]}}}],["vp9",{"_index":254,"t":{"589":{"position":[[0,3]]}}}],["vs",{"_index":202,"t":{"353":{"position":[[7,2]]},"1229":{"position":[[31,2]]},"1231":{"position":[[12,2]]},"1233":{"position":[[12,2]]},"1235":{"position":[[16,2]]},"1237":{"position":[[14,2]]},"1239":{"position":[[15,2]]},"1241":{"position":[[16,2]]},"1243":{"position":[[18,2]]},"1245":{"position":[[14,2]]},"1247":{"position":[[15,2]]},"1249":{"position":[[22,2]]},"1251":{"position":[[20,2]]},"1253":{"position":[[14,2]]},"1255":{"position":[[25,2]]},"1257":{"position":[[23,2]]},"1259":{"position":[[8,2]]},"1261":{"position":[[8,2]]},"1263":{"position":[[14,2]]},"1265":{"position":[[18,2]]},"1291":{"position":[[15,2]]},"1293":{"position":[[18,2]]},"1295":{"position":[[17,2]]},"1297":{"position":[[17,2]]},"1299":{"position":[[17,2]]},"1301":{"position":[[17,2]]},"1303":{"position":[[17,2]]},"1305":{"position":[[17,2]]},"1307":{"position":[[17,2]]},"1309":{"position":[[17,2]]},"1311":{"position":[[17,2]]},"1313":{"position":[[17,2]]},"1315":{"position":[[18,2]]},"1317":{"position":[[18,2]]},"1319":{"position":[[18,2]]}}}],["wav",{"_index":102,"t":{"171":{"position":[[0,3]]},"173":{"position":[[0,3]]}}}],["wavpack",{"_index":179,"t":{"313":{"position":[[0,7]]}}}],["way",{"_index":5,"t":{"7":{"position":[[8,3]]},"9":{"position":[[9,3]]},"11":{"position":[[14,3]]},"13":{"position":[[11,3]]},"19":{"position":[[8,3]]},"21":{"position":[[8,3]]}}}],["weak",{"_index":255,"t":{"604":{"position":[[0,10]]},"984":{"position":[[5,10]]}}}],["websit",{"_index":189,"t":{"331":{"position":[[0,7]]}}}],["website'",{"_index":28,"t":{"43":{"position":[[4,9]]}}}],["wiki",{"_index":280,"t":{"706":{"position":[[11,6]]}}}],["window",{"_index":3,"t":{"6":{"position":[[10,7]]},"374":{"position":[[0,7]]},"762":{"position":[[10,7]]},"980":{"position":[[29,7]]},"1063":{"position":[[0,7]]},"1131":{"position":[[0,7]]},"1155":{"position":[[0,7]]}}}],["windows/macos/linux",{"_index":248,"t":{"577":{"position":[[10,21]]}}}],["work",{"_index":27,"t":{"41":{"position":[[7,5]]},"843":{"position":[[22,5]]}}}],["wsl",{"_index":394,"t":{"1161":{"position":[[7,3]]}}}],["wsl2",{"_index":6,"t":{"9":{"position":[[4,4]]}}}],["xhe",{"_index":74,"t":{"120":{"position":[[0,3]]}}}],["xvycc",{"_index":162,"t":{"284":{"position":[[4,5]]}}}],["xxxxx.ivf",{"_index":372,"t":{"1074":{"position":[[17,11]]}}}],["ycgco",{"_index":133,"t":{"222":{"position":[[3,5]]}}}],["yuv",{"_index":197,"t":{"347":{"position":[[0,3]]}}}],["zune",{"_index":315,"t":{"885":{"position":[[0,4]]}}}]],"pipeline":["stemmer"]}},{"documents":[{"i":3,"t":"This guide will show you how to encode in AV1 the right and optimal way. Yes, you using standalone libaom, libsvtav1, and librav1e from FFmpeg or even piping yuv4mpeg into mainline aomenc are all unoptimal. Outdated Information While a lot of the information presented in this guide is still relevant and correct, the AV1 ecosystem has changed dramatically since this guide's inception which has demanded a rewrite. Please see the AV1 for Dummies blog post for more information. In this guide, we'll be installing Av1an for chunked encoding and infinite threading, because the current state of AV1 encoders, except for SVT-AV1, unfortunately lacks threading and will only use very low amount of cores, which hampers speeds. The only caveat to this approach is RAM consumption, encoding 2160p (4K) with aomenc with 4 workers could take upwards of 16GB of RAM! So do keep this in mind.","s":"AV1 Encoding for Dummies","u":"/blog/av1-encoding-for-dummies","h":"","p":2},{"i":5,"t":"Given all of the different operating systems that people use on a day to day basis and the various different encoding workflows that exist, there are a number of ways to do this. Jump to: Windows | macOS | Linux","s":"Installing the Tools","u":"/blog/av1-encoding-for-dummies","h":"#installing-the-tools","p":2},{"i":8,"t":"Install NMKODER which is a GUI front-end to av1an with all dependencies installed. You're done, you can skip to the encoding part Almost abandonware Since Nmkoder already ships everything by default and its last release was 29th March 2022. You need to manually update all encoders and tools to get better encoding speeds. Missing out on updates will result in your encodes being sub-optimal.","s":"The GUI Way","u":"/blog/av1-encoding-for-dummies","h":"#the-gui-way","p":2},{"i":10,"t":"(Recommended) If you're not already familiar with WSL2, the The Windows Subsystem for Linux (WSL) is a feature of the Windows operating system that allows you to run a Linux file system, along with Linux command-line tools and GUI apps, directly on Windows. This lets Linux distributions run on bare metal without managing any virtual machines, so encoding performance is very good. The easiest way to encode with WSL2 is to use rAV1ator CLI, an interactive TUI for Av1an. An ArchWSL2 installation tutorial is provided here.","s":"The WSL2 Way","u":"/blog/av1-encoding-for-dummies","h":"#the-wsl2-way","p":2},{"i":12,"t":"There is now a batch script for automating the install process, which can be found here. The instructions are in the README file. caution The script will download outdated version encoders and tools such as aom-av1-psy and MKVToolNix v76.0, if you are fine with these you can proceed.","s":"The Automated Way","u":"/blog/av1-encoding-for-dummies","h":"#the-automated-way","p":2},{"i":14,"t":"Install Python 3.10.x, this will change so consult from the Vapoursynth website if you're reading this from the future from here and select \"Windows Installer 64-bit\". Upon installation check the tick for adding Python to PATH like so ) Download and install Vapoursynth from here and select \"VapourSynth64-RXX.exe\" Open the terminal and type vsrepo.py install lsmas ffms2 to install some plugins for Av1an to work. Download MKVToolNix from here, select \"mkvtoolnix-64bit-XX.X.X-setup.exe\", and install (Also available on winget!) Download Av1an from here (SELECT LATEST AND CLICK THE \"ASSETS\" DROPDOWN) Download shared libraries FFmpeg from gyan.dev Download a pre-built fork of Aomenc (aom-av1-lavish) which has neat stuff such as sane defaults, new tunes, optimizations, etc. This can be downloaded for Windows here (Current as of Sept 6, 2023) info If you opt to compile aomenc yourself, you can view the instructions on how to do that here. Move Av1an, FFmpeg (Including the FFmpeg DLLs), and aomenc to somewhere preferable, eg C:\\Encoding. Add the folder AND MKVTOOLNIX INSTALLATION FOLDER to the Windows PATH environment.","s":"The Manual Way","u":"/blog/av1-encoding-for-dummies","h":"#the-manual-way","p":2},{"i":16,"t":"macOS is very similar to Linux, although there aren't any GUI tools for AV1 encoding that I can comfortably recommend. Homebrew + Macports for Av1an + rav1e: Note that some commands may have to be run with sudo, which I won't explicitly include for security reasons. Installing the Homebrew package manager is a well documented process at this point: /bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\" As is installing MacPorts. Install the relevent .pkg for your macOS version from the MacPorts Project website: www.macports.org/install.php Now, you can run the following commands: brew update && brew upgrade brew install rav1e aom mkvtoolnix ffmpeg # Usually you must run MacPorts commands for package installations as root port upgrade outdated port install av1an This is the easiest way to get everything set up & working to produce AV1 video with rav1e or mainline aomenc & Av1an. You can check that things are installed by running the following commands & parsing their output: % av1an --version av1an 0.4.1-unstable (rev e10880d) (Release) * Compiler rustc 1.70.0 (LLVM 16.0) * Target Triple aarch64-apple-darwin * Date Info Commit Date: 2023-06-25 * VapourSynth Plugins systems.innocent.lsmas : Not found com.vapoursynth.ffms2 : Not found % rav1e --version | grep \"release\" -C 1 rav1e 0.6.6 () (release) rustc 1.69.0 (84c898d65 2023-04-16) (built from a source tarball) aarch64-apple-darwin % aomenc --help | grep \"AOMedia\" -C 3 Included encoders: av1 - AOMedia Project AV1 Encoder 3.6.1 (default) Use --codec to switch to a non-default encoder. Notice systems.innocent.lsmas : Not found in the Av1an output. This means you won't be able to use the lsmash chunking method through vapoursynth & may instead have to rely on hybrid chunking, through -m hybrid. This is slower & takes up disk space while encoding, but still works. A sample Av1an command with this basic installation may look like this: av1an -i \"input\" -y --resume --verbose --split-method av-scenechange -m hybrid -c mkvmerge -e rav1e --force -v \" --tiles 8 -s 4 --quantizer 80 --no-scene-detection\" --photon-noise 7 --chroma-noise --pix-format yuv420p10le -w 8 -o \"output.mkv\" Building From Source If you want lsmash support, aom-av1-lavish instead of mainline, or anything else that isn't covered by the more basic installation, you'll have to compile from source. Things are very similar to Linux, with a few oddities: macOS sometimes doesn't have a /usr/local/bin by default. You can fix this by doing mkdir /usr/local/bin. Homebrew installs everything in its own directory structure. If you're building things from source that rely on libraries from vapoursynth, zimg, lsmash, etc, make sure to copy them from /opt/homebrew/lib to /usr/local/lib. Finding them is a matter of ls | grep \"keyword\" & copying what looks reasonable to be associated with the tool you're using. Building most things from source will have instructions for *nix which work for both macOS & Linux. Even if it says Linux, there's a good chance it'll work on macOS as well, & it is always worth trying Linux build instructions on Mac. I won't be going through building every encoding tool & dependency from source, as it is generally much more intuitive than Windows, but building Av1an is worth detailing here just as an example. brew install git rust nasm git clone https://github.com/master-of-zen/Av1an cd Av1an RUSTFLAGS=\"-C target-cpu=native\" cargo build --release cd .. && cd target/release cp av1an /usr/local/bin More Difficult: Building aom-av1-lavish from Source If you want to make the most out of your hardware & eke out every last drop of quality, it may be worth building aom-av1-lavish from source. The first step is to clone it from the Endless Merging branch: git clone https://github.com/Clybius/aom-av1-lavish -b Endless_Merging cd aom-av1-lavish Now, you need to make some manual changes to the source code until Clybius merges this commit. Add the line #include \"aq_variance.h\" at line 19 in av1/encoder/encodeframe_utils.c Comment out line 2546 in av1/encoder/speed_features.c. This line is const int qindex_thresh_cdef_sf_s1_s3_l2[2] = { 92, 48 }; & becomes // const int qindex_thresh_cdef_sf_s1_s3_l2[2] = { 92, 48 };. Now you can continue to build according to the Linux instructions below. Obviously you'll need cmake, which you can install with homebrew along with any other tools you may need. While still in the aom-av1-lavish directory: mkdir -p aom_build && cd aom_build cmake .. -DBUILD_SHARED_LIBS=0 -DENABLE_DOCS=0 -DCONFIG_TUNE_BUTTERAUGLI=0 -DCONFIG_TUNE_VMAF=0 -DCONFIG_AV1_DECODER=0 -DENABLE_TESTS=0 -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=\"-flto -O3 -march=native\" -DCMAKE_C_FLAGS=\"-flto -O3 -march=native -pipe -fno-plt\" -DCMAKE_LD_FLAGS=\"-flto -O3 -march=native\" make -j$(nproc) # This may need to be run as root: make install Now you can run aomenc --help | grep \"AOMedia\" -C 3 to see if lavish installed. If you're getting the same output as above, you may need to copy the aomenc executable to /opt/local/bin, /usr/local/bin, & /opt/homebrew/bin if you already installed mainline aomenc. Running the version info command again, the correct output should look something like this: % aomenc --help | grep AOMedia -C 3 Included encoders: av1 - AOMedia Project AV1 Encoder Psy v3.6.0 (default) Use --codec to switch to a non-default encoder. Notice how it says AOMedia Project AV1 Encoder Psy instead of AOMedia Project AV1 Encoder. You should be all set after this to start using aom-av1-lavish & following the current parameter meta as outlined below.","s":"macOS","u":"/blog/av1-encoding-for-dummies","h":"#macos","p":2},{"i":18,"t":"info Yet again, try using Arch. It's way easier.","s":"Linux","u":"/blog/av1-encoding-for-dummies","h":"#linux","p":2},{"i":20,"t":"Install Aviator (SVT-AV1 + FFmpeg) or rAV1ator basically same thing but Av1an + rav1e. Both are only available as Flatpaks. Keep in mind Aviator ships with SVT-AV1 and rAV1ator with rav1e instead of aomenc/AOM-AV1, which I will not be covering here.","s":"The GUI Way","u":"/blog/av1-encoding-for-dummies","h":"#the-gui-way-1","p":2},{"i":22,"t":"(Recommended) Install rav1ator-cli, a TUI for using Av1an meant to be easy to use. Much more flexible than the GUI options & can work with a number of encoders. See this page for more info. Can be easily used on any distro.","s":"The TUI Way","u":"/blog/av1-encoding-for-dummies","h":"#the-tui-way","p":2},{"i":24,"t":"Ubuntuβ The guide below is targeted towards 22.04, packages and other things may be different on other versions. First Install Rust via rustup first, as apt version of Rust is severely outdated, then you can continue. Install dependencies: sudo apt install wget python unzip unrar build-essential meson autoconf automake libtool git nasm yasm python3-dev python3-pip cython3 libass-dev libqt5websockets5-dev libfftw3-dev libtesseract-dev ffmpeg libavcodec-dev libavformat-dev libswscale-dev libavutil-dev libswresample-dev libmediainfo-dev mkvtoolnix mediainfo perl nasm yasm git cmake libavutil-dev libavcodec-dev libavformat-dev libavdevice-dev libavfilter-dev libswscale-dev libswresample-dev libpostproc-dev llvm libclang-dev libssl-dev Install l-smash: git clone https://github.com/l-smash/l-smash.git cd l-smash ./configure --enable-shared --extra-cflags=\"-march=native\" make -j$(nproc) sudo make install Install zimg: git clone --recursive https://github.com/sekrit-twc/zimg.git cd zimg ./autogen.sh ./configure make -j$(nproc) sudo make install Install ImageMagick: git clone https://github.com/ImageMagick/ImageMagick cd ImageMagick ./configure make -j$(nproc) sudo make install Install Vapoursynth R63: wget https://github.com/vapoursynth/vapoursynth/archive/refs/tags/R63.zip unzip R63.zip cd vapoursynth-R63 ./autogen.sh ./configure CFLAGS=\"-march=native\" CXXFLAGS=\"-march=native\" --libdir=/usr/lib make -j$(nproc) sudo make install sudo mkdir /usr/lib/vapoursynth sudo ldconfig The plugin directory will be located in /usr/lib/vapoursynth. Install L-SMASH-Works Vapoursynth Plugin: git clone https://github.com/AkarinVS/L-SMASH-Works -b ffmpeg-4.5 cd L-SMASH-Works/VapourSynth && mkdir build && cd build meson .. --optimization=3 --default-library=static -Db_lto=true -Dc_args=\"-march=native\" -Dcpp_args=\"-march=native\" ninja -j$(nproc) sudo cp libvslsmashsource.so /usr/lib/vapoursynth/ danger L-SMASH-Works doesn't work on aarch64, it is recommended to use other plugins instead. Install FFMS2 Vapoursynth Plugin: git clone https://github.com/FFMS/ffms2 cd ffms2 ./autogen.sh ./configure CFLAGS=\"-O3 -march=native\" CXXFLAGS=\"-O3 -march=native\" make -j$(nproc) sudo cp src/core/.libs/libffms2.so src/core/.libs/libffms2.so.5 src/core/.libs/libffms2.so.5.0.0 /usr/lib/vapoursynth Install Av1an: git clone https://github.com/master-of-zen/Av1an cd Av1an RUSTFLAGS=\"-C target-cpu=native\" cargo build --release sudo cp target/release/av1an /usr/local/bin When there's no errors, proceed to compiling aom-av1-lavish.","s":"The Compiling Route","u":"/blog/av1-encoding-for-dummies","h":"#the-compiling-route","p":2},{"i":26,"t":"Install dependencies: sudo pacman -S vapoursynth ffmpeg av1an mkvtoolnix-gui git perl cmake ninja meson nasm vapoursynth-plugin-lsmashsource ffms2 you're done, proceed. Compiling aom-av1-lavishβ git clone https://github.com/Clybius/aom-av1-lavish -b Endless_Merging cd aom-av1-lavish && mkdir -p aom_build && cd aom_build cmake .. -DBUILD_SHARED_LIBS=0 -DENABLE_DOCS=0 -DCONFIG_TUNE_BUTTERAUGLI=0 -DCONFIG_TUNE_VMAF=0 -DCONFIG_AV1_DECODER=0 -DENABLE_TESTS=0 -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=\"-flto -O3 -march=native\" -DCMAKE_C_FLAGS=\"-flto -O3 -march=native -pipe -fno-plt\" make -j$(nproc) sudo make install","s":"Arch","u":"/blog/av1-encoding-for-dummies","h":"#arch","p":2},{"i":28,"t":"The moment you've all been waiting for, let's just get into it. Here's an example recommended parameter as of now (09/03/23) [MM/DD/YY]: av1an -x 300 -i input.mkv -w 4 -e aom -c mkvmerge --resume -m lsmash --photon-noise=10 --set-thread-affinity=2 --verbose -a \" -an \" -f \" -an \" -v \" --bit-depth=10 --cpu-used=4 --end-usage=q --cq-level=24 --threads=2 --tile-columns=0 --tile-rows=0 --lag-in-frames=64 --tune-content=psy --tune=ssim --enable-keyframe-filtering=1 --disable-kf --kf-max-dist=9999 --enable-qm=1 --deltaq-mode=0 --aq-mode=0 --quant-b-adapt=1 --enable-fwd-kf=0 --arnr-strength=1 --sb-size=dynamic --enable-dnl-denoising=0 \" -o \"output.mkv\" Parameter Meta It is strongly recommended to join the AV1 Discord server to get the latest updates on what to use and which to set, as it's the only easily reachable place for everything AV1 & encoding tips in general. Now let's dissect it one-by-one Av1an parameters: -i Input. -x 300 Sets scene split length to 300 frames, you can increase it for more quality at the tradeoff of video seekability. -w 4 Specifies the amount of \"workers\" or amount of encoders working on the video. --verbose Sets logging to verbose. --resume Resumes the encode even when you haven't encoded yet. I strongly recommend leaving this if you resume a lot since you can accidentally delete your whole progress (There's no delete confirmation feature.. yet) if you \"resumed\" without the parameter in place. -e aom Specifies we're using aomenc encoder which should be the default option. -c mkvmerge Specifies we're using mkvmerge (MKVToolNix) to concatenate the parts when done, you can specify with ffmpeg if you want to but this is the best method. -m lsmash Specifies we're using l-smash (Vapoursynth plugin) to split the videos, this is also the best method because ffms2 causes video lag (Tested a year ago, might change now) and other methods just suck (Slow and not worth it, learned the hard way). You can attempt to use ffms2 when inputting VC-1 videos as it is not possible with l-smash (Or convert it to lossless with x264 qp 0). -f \" -an \" -f Stands for ffmpeg parameters, -an is to remove all audio since its better to encode and merge it separately. To crop use -f \" -an -vf crop=1920:800 \" for example to crop the video to 1920x800. -v \" \" Is where you put the encoder's parameters in. -a \" -an \" FFmpeg audio encoding options, we're removing it cause we can always add it later. But if you want to, you can also encode directly. Here's an example for encoding to Opus using libopus assuming stereo: -a \" -c:a libopus -b:a 128k \". --photon-noise=10 AV1 grain synthesis, which is a technique where the encoder puts fake grain in so it looks more natural and potentially hiding video artifacts (cause grain is hard to encode and explodes bitrate usage because of their randomness), 5-8 for almost none to little grain, 10-14 for medium, 15+ heavy, 20+ extremely heavy, 30+ for extremely grainy 90s live action films. --set-thread-affinity=2 Pins the thread to the encoder, aligns with --threads=2 in the encoder parameter so set them accordingly. aomenc parameters: --bit-depth=10 We're using 10bit because it makes the video smaller and reduces banding. --cpu-used=4 This is the preset which ranges from 0-9, you can go to 3 if you want more efficiency, 2 if you have a lot of time, 4 is the sweet spot, and 6 if you want speed. Don't go above 6 (Worst efficiency) or even 0 (It would take WEEKS to finish). --end-usage=q --cq-level=24 This specifies that we are going to use a knockoff version of CRF level similar to x264/x265 encoders, in this case CRF 24. --threads=2 Sets the amount of threads the encoder can use, aligns with --set-thread-affinity in Av1an. --tile-columns=0 --tile-rows=0 This is the tiles options, where the encoder splits the videos into tiles to encode faster, see the image below (Yellow lines): Tile usage Do NOT use tiles for 1080p and below, use 1 tile-columns at 1440p (2K), 2 tile-columns and 1 tile-rows for 2160p (4K) --lag-in-frames=64 Similar to x264/x265 rc-lookahead. Sets a number of frames to look ahead for frametype and ratecontrol, allowing for better compression decision making. Setting to a value greater than 64 is generally not considered useful. --aq-mode adaptive quantization mode, 0 is better most of the time --tune-content=psy --tune=ssim As the name suggests they are tunes that affect the video output, for the better, and for the worst Tunes to use Set tune-content to animation if you're encoding above cq-level=30 A.K.A lower quality, despite it's name Set tune-content to psy for everything else, do not use if you encode above cq-level=30 For tune, this is a bit tricky. For now, the meta seems to be ssim, but back then it was lavish which is considered THE best tune because it's based on butteraugli. Now it's fallen behind because its more blurry than ssim, and before that it was butteraugli, and then ipq_vmaf_psy, and finally just ipq. If you use any of the VMAF tunes, you need to specify --vmaf-model-path= to where you put it. --enable-keyframe-filtering=1 We're setting it to 1 because of compatibility reasons, 2 is more efficient but there are seeking issues and FFmpeg for some reason can't input it. --sb-size=dynamic Allows the encoder to use 128x128 block partitioning besides 64x64 which gives an efficiency boost, ignore it. --deltaq-mode set to 0 because its just better. --arnr-strength=1 Controls how strong the filtering will be, 1 is good for 3D Pixar CGI-like and 2D animation, use 4 if you're doing live action content. Using maximum at higher bitrates would just result in a blurry mess. --disable-kf --enable-fwd-kf=0 We're disabling keyframes cause Av1an already did scene detection, so we wont have to.. And it speeds things up. --kf-max-dist=9999 Maximum keyframe interval, we're setting it at the highest possible value since av1an's scene detection keyframe interval is already 240 by default --enable-chroma-deltaq=1 --enable-qm=1 --quant-b-adapt=1 Parameters that give you free efficiency boost. --enable-dnl-denoising=0 Disables the encoder's built-in denoising technique when grain synthesis is enabled, you can optionally set it to 1 when you have a pretty noisy video since it works quite well. Concatenation Error on Linux Run ulimit -n 200000, resume, and it should concatenate just fine. If it still errors, head to the encode directory > encode, and run mkvmerge @../options.json","s":"Encoding","u":"/blog/av1-encoding-for-dummies","h":"#encoding","p":2},{"i":30,"t":"Once you're done just encode your audio using ffmpeg (or just passthrough it), subtitles should be carried along with your video output, and merge them in MKVToolNix! Don't want Matroska files? That's fine, you can use FFmpeg or MP4Box to output into mp4, just keep in mind that PGS/SUP/VOBSUB subtitles are not supported and Opus audio support is still experimental.","s":"Merging Everything","u":"/blog/av1-encoding-for-dummies","h":"#merging-everything","p":2},{"i":32,"t":"--denoise-noise-level=10 Alternative to photon-noise, slower than photon-noise and is the OG grain synthesis method, performs okay and just serves as an alternative. Don't attempt to use it at high values (>12) since it creates noticeable grain patterns. --arnr-maxframes to set max reference frames that will be used to filter the encode, higher values would make the video blurrier at high fidelity but look better at lower bitrates. --butteraugli-resize-factor=2 if you use any of the butteraugli-based tunes (lavish, butteraugli) to speed it up without much losses and --butteraugli-intensity-target=250 to match the content light level.","s":"Tips & Tricks","u":"/blog/av1-encoding-for-dummies","h":"#tips--tricks","p":2},{"i":34,"t":"Encoding has always been about experimentation for the best, there is really no \"One size fits all\" for encoding content, as they differ from scene complexity, how it's captured (2D/Real life), film grain, dark scenes, etc. So experiment away for your specific type of content! Guide originally hosted on https://rentry.co/AV1, rewrite and migration by Simulping.","s":"Final Thoughts","u":"/blog/av1-encoding-for-dummies","h":"#final-thoughts","p":2},{"i":36,"t":"A 567.14 MB, 12 min 11 s, 2K (2,048 x 858), VP9 + Opus, 6.51 Mbps average, Blender short film \"Cosmos Laundromat\"","s":"Embedding the Un-Embeddable","u":"/blog/embedding-the-un-embeddable","h":"","p":35},{"i":38,"t":"While chatting in your favorite Discord servers & group chats, you may see a friend send a weird link. You might even consider it suspicious on first glance. It is a video featuring an image of a movie poster with a play button that is almost begging to be clicked. Naturally, you click it. It loads for a second, and to your surprise it is a full-length, 90-minute (sometimes even two hour)-long unauthorized copy of a movie. If you don't know exactly what is going on, you probably sit there dumbfounded as a pixel perfect HD movie plays back. You may have expected a stereotypically muddy, blocky, laggy shitpost, but this has defied your expectations. The truth is, there are multiple site that do this. Currently, there are five at the time of writing. Below is a list the ones I am currently familiar with: https://stolen.shoes https://discord.nfp.is https://embeds.video https://x266.mov/discord-embed https://autocompressor.net/av1 The big question is, how do they work? Let's get to dissecting.","s":"A Scenario","u":"/blog/embedding-the-un-embeddable","h":"#a-scenario","p":35},{"i":40,"t":"The Codec Wiki unequivocally condemns any form of piracy, including the unauthorized distribution of copyrighted content. This blog post is intended to educate & inform. You may not use the tools discussed to infringe upon the intellectual property rights of content creators without serious legal risk. We encourage our readers to respect copyright laws & use the tools we discuss here appropriately.","s":"But First, a Quick Disclosure","u":"/blog/embedding-the-un-embeddable","h":"#but-first-a-quick-disclosure","p":35},{"i":42,"t":"The entire scheme is actually very simple, as it is all just HTML meta tags (If you are familiar with web development, this is all a walk in the park). The technology's inner working can be divided into two distinct parts. First, let's see how it works on the website's end.","s":"How it Works","u":"/blog/embedding-the-un-embeddable","h":"#how-it-works","p":35},{"i":44,"t":"If you view each website's source, you will find this specific line in each one but they may have a different order: These are the head parts of HTML, which dictate metadata for the document itself such as what the website title/name is, cosmetic embed, defining the site's icon, etc. They are usually found in between the and
tags. Here's an example of a static HTML site serving one specific video: some embed site Hi
Just your friendly neighborhood video embed site
< br /> These interactive sites usually deploy a live script, like a Javascript framework. Examples are NodeJS, ExpressJS, Svelte, etc. These are used to parse video and thumbnails realtime so they can be embedded on Discord (or potentially other platforms).","s":"The Website's End","u":"/blog/embedding-the-un-embeddable","h":"#the-websites-end","p":35},{"i":46,"t":"Traditionally, Discord's media embedder will impose it's own video embed size limit (50 MiB) when a user sends a direct video link as usual. But in this case Discord will embed the thumbnail first, not the video. You could say the link \"tricks\" Discord by showing a \"false face\" first.","s":"Discord's End","u":"/blog/embedding-the-un-embeddable","h":"#discords-end","p":35},{"i":48,"t":"After a combination of countless hours of observation, rigorous testing throughout the period of a year, and conversations with the sites' creators, the current strengths & limitations of this exploit are enumerated below. Strengthsβ You can embed non-web compatible codecs such as HEVC in MP4/MOV, but the user must be using a compatible browser. Thorium or Safari version 13 or greater will work for HEVC playback. There is no maximum size. You could embed a video the size of a raw Bluray, although I do not condone this unless you have the necessary legal permissions to do so or you're uploading a Creative Commons licensed movie like Big Buck Bunny while adhering to the restrictions of the applicable Creative Commons license. This also means you can send high bitrate gaming clips to your friends without any restrictions, assuming you already have a place to upload them. Limitationsβ You can only use hotlinks, which means direct linking to the video itself ending in the appropriate file extension such as .mp4. Cloud services like Google Drive or OneDrive will not work for storage. You cannot use Discord's CDN (cdn.discordapp.com) as the video source. I assume this is because of Discord's proxy blocking embeds over 50 MiB, but only discord.nfp.is can do this, as it proxies cdn.discordapp.com itself. You cannot embed videos in any resolutions higher than 3840 x 2160, Discord imposes a hard limit for this on all video after it was discovered that some videos could play normally but then be maliciously scaled to ridiculous resolutions during playback to crash Discord.","s":"Strengths & Limitations","u":"/blog/embedding-the-un-embeddable","h":"#strengths--limitations","p":35},{"i":50,"t":"As mentioned before, there are five known sites at the time of writing. They all serve the same function, but one may interest you more than another due to slight differences in features & functionality. Here are the sites, each with one noteworthy special benefit: https://stolen.shoes - Recognition, as it is the OG. https://discord.nfp.is - You can use Discord CDN as video source. https://embeds.video - Immediately input video source into the URL (https://embeds.video/https://example.com/v/video.mp4) https://x266.mov/discord-embed - Attractive domain, simple layout. https://autocompressor.net/av1 - Lots of info dump, pretty advanced features. That concludes the technical overview! Next, let's cover the history of this exploit.","s":"Differences between Sites","u":"/blog/embedding-the-un-embeddable","h":"#differences-between-sites","p":35},{"i":53,"t":"In around April of 2022, a Reddit user going by the name of u/CreativeGamer03 posted a video on r/discordapp of a link where a GIF of Dwayne \"The Rock\" Johnson plays caption with \"Is this a GIF or is it a video?\" When played, a low-quality music video of Rick Astley's \"Never Gonna Give You Up\" plays. The link used is now unfortunately removed.","s":"Dwayne","u":"/blog/embedding-the-un-embeddable","h":"#dwayne","p":35},{"i":55,"t":"On 23rd June 2022, a Discord user Clybius on the AV1 Community server asked people for VP9 or H.264 videos that were over 100 MB in size. At the time the current 500 MB nitro tier did not exist. They then decided to use a 59 minute 1080p sample video of nature scenery from around the world with a thumbnail featuring a GIF of a waterfall to test the exploit. It worked. He tried shortly afterward with AV1. Eureka, it also worked: Clybius confirmed that this could be patched if discovered. He cites having had the idea from the Dwayne Johnson example above, but forgetting about it for a couple of months. So, it seems this entire concept stemmed from a silly rickroll.","s":"Discovery","u":"/blog/embedding-the-un-embeddable","h":"#discovery","p":35},{"i":57,"t":"After the discovery of AV1 embedding, experimentation brought about the discovery that any video codec will work as long as the user can decode/play the codec and the container/extension is an MP4, MOV, or WebM. These are all traditionally web-compatible containers. If you're interested in learning about containers, please see the Containers section on the Terminology page. This applies to HEVC, ProRes, xHE-AAC, and other bizarre codecs that are rarely seen on the Web. While experimentating, Clybius converted one their idle domains stolen.shoes into an interactive embedder that provided a textbox for a video URL, a thumbnail URL, a width value, & a height value for the desired video. This would be the first website for Discord embedding.","s":"The Experiments & Interactive Site","u":"/blog/embedding-the-un-embeddable","h":"#the-experiments--interactive-site","p":35},{"i":59,"t":"It's not long before people outside of the AV1 Community discovered stolen.shoes, and its popularity increased rapidly. Its use usually involved the illicit distribution of full-length, unauthorized copies of movies; this sometimes happened very shortly after some movies were released. There were a couple notable instances of this happenening that caused quite the stir online each time. The first instance featured the DreamWorks sequel of \"Puss in Boots (2011)\", \"Puss in Boots: The Last Wish (2022)\". A 1080p video sourced from a streaming site was the first wake up call that attracted attention to the existence of these embed sites. This example used stolen.shoes. The second instance was when highly-anticipated animated film \"The Super Mario Bros. Movie (2023)\" produced by Illumination, Universal Studios, and Nintendo was spread around Discord. It was first spotted as a Cam (A camera recording by someone in theaters), then as it went out on streaming services a different link appeared but spread faster and with upgraded 1080p quality. Both used stolen.shoes as the embed site. The third instance is very recent as of the day this was posted. A streaming-service sourced \"Five Nights at Freddy's (2023)\" was spread around since the movie released both in theaters and streaming service (Peacock) day one, and it gained steam extremely fast as most people had not seen it yet. Currently, this illegal novelty is gaining hundreds of upvotes within the r/discordapp subreddit. The copy seems to be a compressed 720p encode. This example used discord.nfp.is. Note the ones listed here are the ones that I saw become extremely popular. There may be lesser known links that have been spread around privately or just did not cause enough noise for me to notice. Some less popular examples I've noticed, featuring more illicit copyrighted content distribution: Top Gun Maverick (2022) The SpongeBob trilogy (2005/2015/2020) Spider-Man: Across the Spider-Verse (2023)","s":"Virality","u":"/blog/embedding-the-un-embeddable","h":"#virality","p":35},{"i":61,"t":"The ability to embed unusually large videos on Discord has enabled both positive and negative use cases. On the one hand, it allows high-quality content to be shared easily among friends. However, it has also facilitated mass copyright infringement by empowering virtually anyone with a Discord accound to freely spread pirated movies. While this is fascinating from a technical perspective, embedding techniques like these tread a fine ethical line. As with anything, it is important to be mindful of how our actions affect others, and I should remind everyone that content creators deserve to be compensated for their work. As users, we should support them by accessing their content via legitimate platforms. It is hard to say how long this exploit will continue to be usable. Instead of enabling piracy, which may cause Discord to be more likely to patch this exploit if they see it as a serious threat, let's instead use these capabilities responsibly to share our own creations, gaming highlights, and other media which we can share legally. Given some thoughtfulness, perhaps we can find a fair balance between respecting copyright law and appeasing Discord's sensibilities while allowing some creative flexibility on the platform. Thank you for reading this blog post, I hope you learned something!","s":"Closing","u":"/blog/embedding-the-un-embeddable","h":"#closing","p":35},{"i":63,"t":"A big part of understanding any multimedia codec technology is knowing the application for such technology. For images, a big use case is web delivery. Compared to other multimedia, images are incredibly popular on the Web & knowing how to serve them properly can be a massive boon to your website's traffic as well as less of a headache for users on slower connections or who are under bandwidth constraints. The most disappointing part is that images are often poorly done on the web; all too frequently will you run into a site serving massive photographic PNGs for no reason, or photography sites serving photographs fresh out of the editing software with no thought put into their final delivery. A little effort, patience, & knowledge will go a long way toward improving the user experience for individuals using your site, & this article will illustrate some of the basics. caution These instructions are for photographic images; other kinds of images, like non-photographic, artwork, pixel art, etc. should likely be handled differently. danger Many images won't load properly unless your browser supports JXL, AVIF, & proper ICCv2 color management. This is for demonstration purposes only & shouldn't represent an actual common website experience. If you're curious anyway, the following browsers can display the contents of this page perfectly: Thorium | Linux, macOS, Windows, Android Waterfox | Linux, macOS, Windows Mercury | Linux, Windows","s":"Reducing Image Load Online","u":"/blog/site-optimization","h":"","p":62},{"i":65,"t":"First, we'll illustrate what not to do, which is fortunately not incredibly difficult to avoid. Taking an image straight out of your editing software at a massive size will often bloat the size & resolution to something that isn't generally usable for a website regardless of the codec you're using & its quality per bit. It can be argued there are specific use cases that demand incredible resolution & fidelity coexist on the Web, but we won't be covering those here. Here's an example of a bloated image: exported straight from Darktable at JPEG q90, with no scaling 2.2 MB","s":"Fire & Forget","u":"/blog/site-optimization","h":"#fire--forget","p":62},{"i":67,"t":"The easiest way to have a large improvement without doing much work is to simply resize the image before serving it. Even if you exported a lossy JPEG, resizing should remove a lot of artifacts. The way to perceive a worst-case for an image's size on a site is to inspect the image element's width & height, which should give us an estimate of how large we should make our image. Any larger than this value is unreasonable since we're overfilling the element's size for no reason & the image is being scaled down anyway. Inspect Element in Firefox. The Mac used to take this screenshot has a relatively high display resolution of 2560x1664. Because Macs scale things differently, we're probably going to want to double the horizontal resolution here. The width is the most important value here, so our new image is going to be exported with a width of 1699 pixels. This new image, encoded at JPEG q90 with cjpegli, looks like this: Obviously, there's lost fidelity compared to the original, but considering this is so much smaller, it is worth the trade-off for many. It is also worth noting we are using an improved jpeg encoder in the form of cjpegli, although that is secondary to the resize. If it doesn't look as good as you want it to, you can always scale the resolution up a bit, though currently, it looks plenty passable for its size. 2.2 MB -> 233 kB","s":"Massive Improvement","u":"/blog/site-optimization","h":"#massive-improvement","p":62},{"i":69,"t":"A bonus tip is to add the loading=\"lazy\" attribute to your picture tag to allow the image to load only when scrolled to by a user. This doesn't save bandwidth, but it improves the user experience by loading images further down the page only when necessary. An example may look like this: ","s":"Lazy Loading","u":"/blog/site-optimization","h":"#lazy-loading","p":62},{"i":71,"t":"If you desire further improvement, it may be time to consider using a newer codec like AVIF or JPEG-XL. These options will compress far more effectively than JPEG, with the only trade-off being browser support. We're not going to consider WebP or HEIC, since WebP is not competitive enough with JPEG for photographic imagery (often being worse) & HEIC has been superseded by AVIF - which sees greater support anyhow - & is not royalty free, effectively preventing widespread Web adoption forever. Again, we're just considering lossy compression for photographic images; it is a different story with WebP elsewhere, as it performs well on non-photographic content & is almost always better than PNG for 8-bit lossless compression. So, we are left with JXL & AVIF for now.","s":"New Codecs","u":"/blog/site-optimization","h":"#new-codecs","p":62},{"i":73,"t":"AVIF sees widespread support, but JPEG-XL isn't quite there yet with Web support as Google continues to push AVIF (it is debatable if it ever will be outside the Apple ecosystem). Even with AVIF, adoption isn't remotely close to JPEG, so it is worth providing a fallback. This can look like the following example: Here is a JXL falling back to an AVIF falling back to a WebP falling back to a JPEG. Pretty intense to have this many fallbacks unless you're really after the ultimate compression ratio, but it is certainly an option. AVIF & JPEG alone will probably be enough for most.","s":"Fallbacks","u":"/blog/site-optimization","h":"#fallbacks","p":62},{"i":75,"t":"Let's look at how our image examples compare to the original with our new codec selection. We'll be aiming for high visual fidelity, so around the same quality as our initial JPEG encoded with cjpegli (which scores ~83.01 with the SSIMULACRA2 visual fidelity metric). 137.0 kB JPEG-XL image, encoded with cjxl lossless.png out.jxl -d 1.49 -e 9. Score: ~83.04 3.06s user time 124.8 kB AVIF image, encoded with avifenc -c aom -s 4 -j 8 -d 10 -y 444 --min 1 --max 63 -a end-usage=q -a cq-level=16 -a tune=ssim lossless.png out.avif. Score: ~83.03 7.54s user time JXL also supports lossless transcoding of JPEG images. This means every pixel is identical, the image just has a smaller filesize than the original JPEG; if you can use JXL, you can transcode existing JPEGs losslessly on your site & save some bandwidth that way. The JPEG transcode below gives a higher SSIMULACRA2 score than the original for some reason, but I'll chalk that up to a decoding inconsistency between how the ssimulacra2 program decodes JPEG & JXL. Either way, the scores are fairly close. 189.4 kB JPEG-XL image from JPEG, encoded with cjxl input.jpg input-recomp.jxl -d 0.0 -e 9 --brotli_effort=11. Score: ~84.92 (???) 0.67s user time The final trick we can use, while not a new codec at all, still increases quality per bit. Encoding an XYB JPEG with cjpegli encodes with the perceptual XYB colorspace using an ICC profile to modify the original JPEG colors, avoiding JPEG's normal YCbCr which isn't perceptually optimized for the human visual system. Using XYB, we can afford identical quality with less bitrate than normal JPEG. This has universal compatibility, but not every application understands how to handle the XYB color profile (although color-managed modern browsers should be fine). 208.3 kB XYB JPEG, encoded with cjpegli lossless.png out.jpg --xyb -d 1.155. Score: ~83.04 0.10s user time In this particular instance, AVIF seems to be the overall winner. This isn't always the case due to JXL's superiority at higher fidelity & with more detailed images, but according to SSIMULACRA2, AVIF has the best quality per bit with this image. You can use your own eyes to further clarify your choice, though. It is worth mentioning that as these were encoded from a 16-bit source PNG, the JXL image is the only one that maintains the full original bit depth, & AVIF isn't fast to encode.","s":"Compression Efficacy","u":"/blog/site-optimization","h":"#compression-efficacy","p":62},{"i":77,"t":"Displaying an image that is too large for a viewport is a waste of bandwidth, & displaying an image that's too small for the viewport leaves fidelity to be desired. Luckily, we have the Responsive Image Linter that can help us figure out which image sizes we should be using. In our fire & forget example, we see that we are serving an image that is far too large. We already know that, but now we can see that given various viewport sizes we could be serving images that have respective widths of 270px, 958px, 1350px, 1660px, & 1916px to optimize for delivery to a variety of different devices. Here's how we'd write that in HTML: It is worth noting that this example above & the example below aren't perfect implementations of a responsive image given the conditions of this site, but the general concept still applies. Some things to note: srcset = the images available to your browser to serve, & their respective widths sizes = the conditions given to the browser explaining under what conditions should it serve which image (min-width: XXXpx) YYYpx = Given the viewport is at least XXX wide, serve an image of YYY horizontal resolution. The browser will pick an image from srcset that is CSS pixels * display scaling. calc(100vw - 24px) = Usually preceded by a (min-width) condition. Specifies a value the browser should calculate on its own to pick the closest option from the srcset. Let's say we have (min-width: 997px) calc(75vw - 257px). This means given the viewport is at least 997px wide, calculate 0.75 * the current viewport resolution - 257 to find the closest image in the srcset to fit the number of pixel specified. That's all! Massive thanks to Auto-Rez Media Technologies for the inspiration behind this article & explicit permission to use their Reduce Your Page's Image Load blog post when writing this entry. I have confirmed with their leadership that this wiki entry can be safely licensed under CC BY-SA 4.0.","s":"Responsive Images","u":"/blog/site-optimization","h":"#responsive-images","p":62},{"i":79,"t":"AV1 for Dummies is a comprehensive, legible guide on how to get started with AV1 at any experience level. Whether you're on Windows using your first video encoding program, or a seasoned Linux user looking to optimize your encoding pipeline, this guide has you covered.","s":"AV1 for Dummies","u":"/blog/av1-for-dummies","h":"","p":78},{"i":81,"t":"AV1 is a royalty-free video codec developed by the Alliance for Open Media. It is designed to replace VP9 and presently competes with H.266. AV1 is known for its high compression efficiency, which the marketing will have you believe reduces file sizes by up to 50% compared to H.264 and up to 30% compared to H.265 across the board. It is supported by several major browsers and is widely used across many streaming services and video platforms.","s":"Introduction","u":"/blog/av1-for-dummies","h":"#introduction","p":78},{"i":83,"t":"Before we dive in, it is important to understand why you may want to use AV1 instead of other codecs. The reality is that AV1 is not better than H.264/5 in every single scenario; video encoding is a complicated field, and the best codec for you will depend on your specific needs. AV1 excels in: Low to medium-high fidelity encoding Higher resolution encoding Encoding content with very little grain or noise Slow, non-realtime contexts (e.g. offline encoding) The enumeration above still consists of broad strokes, but the point is to understand that AV1 is not a silver bullet. It will not automatically make your videos smaller while preserving your desired quality. To make things more difficult, the x264 & x265 encoders are very mature, while AV1 encoding efforts designed to meet the extremely complicated needs of the human eye are still in their infancy. This guide focuses almost entirely on tools great for offline, non-realtime encoding for various kinds of content. Streaming is an altogether different application of AV1, and is only covered sparsely here.","s":"Why AV1?","u":"/blog/av1-for-dummies","h":"#why-av1","p":78},{"i":85,"t":"Due to a lot of misunderstandings about codecs and compression, there are a lot of common misconceptions that are held regarding video encoding. We'll start by outlining some bad practices: Don't encode the same video multiple times. This is a common mistake made by people new to video encoding. Every time you encode a video, you lose additional quality due to generation loss. This is because video codecs are lossy, and every time you encode a video, you lose more information. This is why it is important to keep the original video file if you frequently re-encode it. Don't blindly copy settings from others without understanding them. What works for one person's content and workflow may not work for yours. Even the default settings on many encoders are not optimal for most content. Don't assume that higher bitrate equates to better quality. Inefficient encoding can waste bits without improving visual quality, and efficient encoding can make lower bitrate video look drastically better than higher bitrate video using the same codec. Don't assume all encoders/presets/settings/implementations are created equal. Even given two encoding frameworks that use the same underlying encoder, you may achieve different results given encoder version mismatches or subtly different settings used under the hood. Don't use unnecessarily slow presets/speeds unless you have a specific need and ample time. While slower presets improve encoding efficiency most of the time, the quality gains reach a point of diminishing returns beyond a certain point. Use the slowest preset you can tolerate, not the slowest preset available. Don't blindly trust metric scores. It is unfortunate how trusted VMAF is considering how infrequently it correlates with visual fidelity in practice now that it has become so popular. Even the beloved SSIMULACRA2 is not a perfect one-to-one with the human eye. Now, let's move on to some good practices: Experiment with different settings and compare the results. Consider your content type when choosing encoding settings. Film, animation, and sports all have different characteristics that benefit from distinct approaches. Try to use CRF for offline encoding, as opposed to CBR or VBR. While the latter two are effective for precisely targeting a particular bitrate, CRF is more effective at targeting a specific quality level efficiently. Always use 10-bit color, even with an 8-bit source. AV1's internal workings are much more suited to 10-bit color, and you are almost always guaranteed quality improvements with zero compatibility penalty as 10-bit color is part of AV1's baseline profile. Consider using grain synthesis for grainy content, as AV1 can struggle with preserving film grain efficiently. Keep your encoding software up-to-date; the encoding world moves quickly.","s":"Do's & Don'ts","u":"/blog/av1-for-dummies","h":"#dos--donts","p":78},{"i":87,"t":"Since writing the last guide, there are now several versatile tools available for AV1 encoding. Below, we've assembled some tables of popular encoding tools and their features.","s":"Tools","u":"/blog/av1-for-dummies","h":"#tools","p":78},{"i":89,"t":"Here's the key: Encoder(s): The AV1 encoder(s) the software uses or is able to use. This is between SVT-AV1, aomenc, rav1e, SVT-AV1-PSY, and various aomenc forks. We'll get into this more later. Ease of Use: How intuitive the software is, especially for beginners. Complexity: How flexible the software can be for advanced users. Efficiency: How \"good\" the underlying encoder(s) are. This is more subjective, but tools with bad defaults, misleading options, or slow release cycles won't score well here. Name Platform(s) Encoder(s) Ease of Use Complexity Efficiency Aviator Linux SVT-AV1-PSY βββββ βββββ βββββ NMKODER Windows Any βββββ βββββ βββββ Av1ation Station Any Any βββββ βββββ βββββ StaxRip Windows Any βββββ βββββ βββββ NEAV1E Windows Any βββββ βββββ βββββ Handbrake Any SVT-AV1 βββββ βββββ βββββ FastFlix Any Any βββββ βββββ βββββ rAV1ator Linux rav1e βββββ βββββ βββββ Autocompressor Web SVT-AV1 βββββ βββββ βββββ Our top pics are: For beginners: Aviator. If you are on Linux and you want zero hassle whatsoever, Aviator is simple but highly effective. As a Flatpak application, it ships a bundled SVT-AV1-PSY binary that is up-to-date, compiled efficiently, and handled with extensively tested defaults. It is hard to go wrong with Aviator even as an advanced user, despite its simplicity. For advanced users: Av1ation Station. Av1ator Station is thoughtfully designed to be your one-stop shop for video encoding. Even though it is brand new, it has proven itself to be a capable and reliable solution for advanced video encoders that have a lot of settings to tweak and encodes to keep track of. For Windows users: StaxRip. StaxRip is a long-standing, well-maintained, and highly flexible video encoding tool that supports a plethora of encoders and formats. It comes bundled with SVT-AV1-PSY and is a great choice for Windows users who want a powerful and versatile tool for video encoding.","s":"GUI","u":"/blog/av1-for-dummies","h":"#gui","p":78},{"i":91,"t":"In the terminal, advanced encoders can explore a much greater degree of complexity than many GUIs allow. That being said, ease of use is still a consideration when managing complex command-line encoding workflows. Let's begin with the key: We're omitting the \"Platform(s)\" column here, as this section targets Linux users. As a Windows user, WSL has you covered - as a Mac user, you're likely to experience parity with Linux (most of the time). Name Framework(s) Ease of Use Complexity Features Scene Detection SvtAv1EncApp None βββββ βββββ βββββ No FFmpeg Itself βββββ βββββ βββββ No Av1an FFmpeg βββββ βββββ βββββ Yes rAV1ator CLI Av1an βββββ βββββ βββββ Yes alabamaEncoder FFmpeg βββββ βββββ βββββ Yes Our top picks are: For beginners: rAV1ator CLI. rAV1ator CLI essentially walks you through the process of writing Av1an commands. It can install binaries for you, save your previous commands, and detect scenes via Av1an. This makes it a great choice for beginners who want to learn the ropes of AV1 encoding without diving into the deep end headfirst. For advanced users, it makes rapidly writing, testing, & cataloging Av1an encoding commands much simpler. For advanced users: alabamaEncoder. alabamaEncoder is a powerful and flexible tool that allows you to encode video with FFmpeg and AV1. It is highly configurable and supports a mind-bending array of powerful features that would be hard to find elsewhere. While the tool is still new, it is already a great choice for advanced users who want to push the boundaries of what is possible to incorporate into an encoding workflow.","s":"CLI","u":"/blog/av1-for-dummies","h":"#cli","p":78},{"i":93,"t":"In conclusion, the best AV1 tool for you will depend on your needs and experience level. If you are a beginner, Aviator is a great choice for Linux users, while StaxRip is a good option for Windows users. For advanced users, Av1ation Station is a powerful and versatile tool that can handle numerous encoding tasks. If you prefer the command line, rAV1ator CLI is a great choice for beginners, while alabamaEncoder is a powerful tool for advanced users. No matter which tool you choose, it is hard to go wrong in the modern AV1 encoding landscape when it comes to your utility of choice.","s":"Conclusion","u":"/blog/av1-for-dummies","h":"#conclusion","p":78},{"i":95,"t":"The world of AV1 encoding is diverse and complex, with several open-source encoders available, each bringing its own set of strengths, weaknesses, and unique features to the table. In this section, we'll dive deep into the characteristics of four major AV1 encoders: SVT-AV1, rav1e, aomenc (libaom), and SVT-AV1-PSY Understanding these encoders is crucial for making informed decisions about what best suits your specific encoding needs.","s":"Encoders","u":"/blog/av1-for-dummies","h":"#encoders","p":78},{"i":97,"t":"SVT-AV1 (Scalable Video Technology for AV1) is an AV1 encoder library and application developed by Intel, Netflix, and others. It has gained significant popularity in the encoding community due to its impressive balance of speed, quality, and scalability. Links: Wiki page: SVT-AV1 Git repository: https://gitlab.com/AOMediaCodec/SVT-AV1 Documentation: https://gitlab.com/AOMediaCodec/SVT-AV1/-/blob/master/Docs/README.md Performance & Scalability SVT-AV1 is renowned for its encoding speed, particularly at higher speed presets. It leverages parallel processing, making it exceptionally efficient on multi-core systems. Fun fact: SVT-AV1's parallel processing is lossless, so it doesn't compromise quality for speed. Quality-to-Speed Ratio SVT-AV1 strikes an impressive balance between encoding speed and output quality. At faster presets, it usually outperforms other encoders in quality per unit of encoding time. While it may not achieve the absolute highest quality per bit possible, its quality is generally considered impressive for its speed. Flexibility SVT-AV1 offers a wide range of encoding options and presets, allowing fine-tuned control over the encoding process. It provides 14 presets (0-13), with 0 being the slowest and highest quality, and 13 being the fastest but lowest quality. Advanced options allow users to adjust parameters like hierarchical levels, intra-refresh type, and tuning modes. Continuous Development SVT-AV1 receives frequent updates and optimizations, with new releases often coming alongside big changes. The open-source nature of the project encourages community contributions and rapid feature development. SVT-AV1 is an excellent choice for a wide range of encoding scenarios. It's particularly well-suited for: High-volume encoding operations where speed is crucial Live or near-live encoding of high-resolution content Scenarios where a balance between quality and encoding speed is required Users with multi-core systems who want to leverage their hardware efficiently Some downsides include: Higher memory usage compared to other encoders The developers assess quality via its performance on traditional legacy metrics, which harms its perceptual fidelity ceiling.","s":"SVT-AV1","u":"/blog/av1-for-dummies","h":"#svt-av1","p":78},{"i":99,"t":"rav1e is an AV1 encoder written in Rust & Assembly. Developed by the open-source community alongside Xiph, it brings a unique approach to AV1 encoding with its focus on safety and correctness. Links: Wiki page: rav1e Git repository: https://github.com/xiph/rav1e Documentation: https://github.com/xiph/rav1e/tree/master/doc#readme Safety & Reliability Being written in Rust, rav1e emphasizes memory safety and thread safety. This focus on safety translates to a more stable and reliable encoding process, with reduced risks of crashes or undefined behavior. High Fidelity At high fidelity targets - an area where AV1 usually lacks - rav1e is a strong contender compared to other encoders. It excels in preserving fine details and textures, making it a good choice for high-fidelity encoding. Quality While not typically matching aomenc or SVT-AV1 in pure compression efficiency, rav1e can produce high-quality output videos. It often achieves a good balance between quality and encoding time, especially at medium-speed settings. Perceptually Driven rav1e's development is driven by visual fidelity, without relying heavily on metrics. This focus on perceptual quality leads to a stronger foundation for future potential improvements in visual quality, as well as making the encoder very easy to use as it does not require excessive tweaking. rav1e is well-suited for: Projects where stability is paramount Users who prioritize a community-driven, open-source development approach Encoding tasks where a balance between quality and speed is needed, but the absolute fastest speeds are not required Some limitations of rav1e include: Lagging development compared to other encoders Slower encoding speeds compared to SVT-AV1 at similar quality & size Fewer advanced options compared to other encoders","s":"rav1e","u":"/blog/av1-for-dummies","h":"#rav1e","p":78},{"i":101,"t":"aomenc, based on the libaom library, is the reference encoder for AV1. Developed by the Alliance for Open Media (AOM), it is the benchmark for AV1 encoding quality and compliance. Links: Wiki page: aomenc Git repository: https://aomedia.googlesource.com/aom/ Encoding Quality aomenc is widely regarded as the gold standard for AV1 encoding quality. It often achieves high compression efficiency among AV1 encoders, especially at slower speed settings. The encoder squeezes out nearly every last bit of efficiency from the AV1 codec, making it ideal for archival purposes or when quality per bit is critical. Encoding Speed aomenc is generally the slowest among major AV1 encoders. It offers 13 CPU speed levels (0-12), but even at its fastest settings, it's typically slower than other encoders at their slower settings. The slow speed is often considered a trade-off for its high compression efficiency. Extensive Options As the reference implementation, aomenc offers the most comprehensive encoding options. It provides fine-grained control over nearly every aspect of the AV1 encoding process. Advanced users can tweak many parameters to optimize for specific content types or encoding scenarios. Flexibility Being the reference encoder, aomenc produces highly standards-compliant AV1 bitstreams that take advantage of the full arsenal of AV1 features. It supports 4:2:0 and 4:4:4 chroma subsampling, 8- to 12-bit color depth, and various other advanced features that more specialized encoders like SVT-AV1 do not support. aomenc is ideal for: Scenarios where achieving the highest possible quality is the primary goal Archival encoding where compression efficiency is crucial Research and development in video compression Encoding projects where encoding time is not a significant constraint Some drawbacks of aomenc include: Unresponsive development driven by legacy metrics, leading to slower adoption of new techniques and ignoring improvements communicated by people outside the Google development team Cripplingly difficult to use for beginners, with a culture of cargo-culting settings Slow encoding speeds compared to other AV1 encoders, which has less of an impact on the quality of the output than it used to compared to maturing encoders like SVT-AV1","s":"aomenc (libaom)","u":"/blog/av1-for-dummies","h":"#aomenc-libaom","p":78},{"i":103,"t":"SVT-AV1-PSY is a community fork of the SVT-AV1 encoder focused on psychovisual optimizations to enhance perceived visual quality. It aims at closing the distance between SVT-AV1's high speeds and the perceptual quality of aomenc's slow brute force approach. Links: Wiki page: SVT-AV1-PSY Git repository: https://github.com/gianni-rosato/svt-av1-psy Documentation: https://github.com/gianni-rosato/svt-av1-psy/blob/master/Docs/PSY-Development.md Perceptual Optimizations SVT-AV1-PSY introduces various psychovisual enhancements to improve the perceived quality of encoded video. These optimizations often result in output that looks better to the human eye, even if it might not always score as well in objective metrics. Additional Features Introduces new options like variance boost, which can help maintain detail in high-contrast scenes. Offers alternative curve options for more nuanced control over the encoding process. Extends the CRF (Constant Rate Factor) range to 70 (from 63 in mainline SVT-AV1), allowing for extremely low-bitrate encodes. Introduces additional tuning options, including a new \"SSIM with Subjective Quality Tuning\" mode that can improve perceived quality. Visual Fidelity Focus Aims to produce more visually pleasing results, sometimes at the expense of metric performance. Includes options like sharpness adjustment and adaptive film grain synthesis which can significantly impact the visual characteristics of the output. Features modified defaults driven by perceptual quality considerations. Extended HDR Support Includes built-in support for Dolby Vision & HDR10+ encoding. This makes it particularly useful for encoding HDR content without requiring additional post-processing steps or external tools. Performance Based on SVT-AV1, it retains the performance characteristics of its parent encoder. Adds super slow presets (-2 and -3) for research purposes and extremely high-quality encoding. These additional presets can be useful for creating reference encodes or applications where encoding time is not a concern. SVT-AV1-PSY is particularly well-suited for: Encoding scenarios where subjective visual quality is prioritized over pure metric performance HDR content encoding in Dolby Vision or HDR10+ Users who want fine-grained control over psychovisual aspects of encoding Projects that require a balance between the speed of SVT-AV1 and enhanced visual quality Encoding challenging content with complex textures or high-contrast scenes Some drawbacks are: Everything that applies to SVT-AV1, including the lack of support for 4:4:4 chroma subsampling and 12-bit color depth that are useful in specific scenarios","s":"SVT-AV1-PSY","u":"/blog/av1-for-dummies","h":"#svt-av1-psy","p":78},{"i":105,"t":"While SVT-AV1 is known for being fast, aomenc is renowned for its high-quality output, and rav1e is recognized for its safety and reliability, each encoder has strengths and weaknesses. The best encoder for you will depend on your specific needs and priorities. As this guide is focused on offline encoding, SVT-AV1-PSY combines aomenc's traditional perceptual strength with SVT-AV1's speed. Like rav1e, it is easy to use due to strong default settings that prevent cargo culting, and overall it can be considered the best of all three worlds. It is actively developed by a team of responsive community members, and it is a great choice for most users who want a balance between quality per bit, speed, and ease of use. The \"best\" encoder often depends on your use case, content type, and target audience. It's always worth experimenting with different encoders and settings to find the optimal balance for your needs. Many advanced users even employ multiple encoders in their workflows, choosing the most appropriate tool for each specific task or content type.","s":"Conclusion","u":"/blog/av1-for-dummies","h":"#conclusion-1","p":78},{"i":107,"t":"The AV1 ecosystem has grown to the point where a single comprehensive guide cannot effectively cover the entire extent of the available tools and techniques for AV1 encoding across every use case. If you want more detail about a particular tool, where to acquire it, or how to compile an encoder, you can find that information throughout the various wiki entries linked on this page. Reading can get you far, but it is natural to have questions. Please don't hesitate to connect with the team behind the Codec Wiki and many of these tools via our AV1 for Dummies Discord server. We're happy to help you with anything you need, and your questions and feedback help the wiki grow and improve. We hope you enjoy your journey into AV1 encoding, and we wish you the best of luck in your encoding endeavors!","s":"Final Conclusion","u":"/blog/av1-for-dummies","h":"#final-conclusion","p":78},{"i":109,"t":"AAC, or Advanced Audio Coding, is an umbrella for a number of different codecs. When people refer to AAC, they are often referring to the commonly used AAC-LC profile developed as part of the original AAC standard in 1997 (although there is a distinction between this version of AAC-LC, called MPEG-2 AAC, & MPEG-4 AAC which is newer). However, there are a number of other variants that have been created over time. These include: AAC-LC (low-complexity AAC) AAC-LD (low delay AAC) AAC-ELD (enhanced low delay AAC) HE-AAC (high efficiency AAC, uses Spectral Band Replication) HE-AACv2 (high efficiency AAC v2, uses Spectral Band Replication + Parametric Stereo) xHE-AAC (extended high efficiency AAC (kinda), uses USAC (Unified Speech & Audio Coding)) AAC is even used as a Bluetooth audio codec for encoding audio streams & sending them to a Bluetooth audio device. Encoding & even decoding some of the above formats can prove to be difficult, so it is worth exploring each codec individually.","s":"AAC","u":"/docs/audio/AAC","h":"","p":108},{"i":111,"t":"Let's explore each codec individually.","s":"Format Breakdown","u":"/docs/audio/AAC","h":"#format-breakdown","p":108},{"i":113,"t":"While AAC-LC was introduced alongside two higher complexity profiles (AAC Main & AAC-SSR), AAC-LC has seen much more widespread adoption through various distribution mediums for video (often paired with AVC video) & audio alike. AAC-LC is ubiquitous within the Apple ecosystem & was (& still is) used on YouTube before their switch to primarily using Opus. Encoding AAC-LC can be done with relative ease, accessible through nearly every encoder in the \"Encoders\" section.","s":"AAC-LC","u":"/docs/audio/AAC","h":"#aac-lc","p":108},{"i":115,"t":"AAC-LD & AAC-ELD are both designed to transmit audio in instances where latency is very important. Both are far more efficient than previous low-latency audio coding offerings. Over AAC-LD, AAC-ELD offers better audio quality through Spectral Band Replication, lower latency, & a greater quality range with a lower bitrate minimum & higher maximum.","s":"AAC-LD & AAC-ELD","u":"/docs/audio/AAC","h":"#aac-ld--aac-eld","p":108},{"i":117,"t":"High Efficiency AAC introduces Spectral Band Replication (SBR) to the AAC specification for the purpose of higher quality audio at lower bitrates. SBR is an encoding technique that allows the decoder to reconstruct higher frequencies from an audio signal given lower frequencies & data that informs the decoder about information in the higher frequencies, allowing them to be effectively restored from this helper data. In short, lower frequencies are encoded with extra detail incorporated to allow the reconstruction of higher frequency information in an audio signal. This feature alone allows HE-AAC to be much more efficient than AAC-LC at lower bitrates; the gap closes substantially at higher bitrates, however.","s":"HE-AAC","u":"/docs/audio/AAC","h":"#he-aac","p":108},{"i":119,"t":"High Efficiency AAC v2 introduces Parametric Stereo (PS), which further increases audio quality with an emphasis on lower bitrates. PS uses a mono signal downmixed from a multichannel stereo input alongside information about the spatial properties of the stereo input to allow the decoder to reconstruct a left & right channel using salient spatial data from the mono signal. HE-AACv2 combine PS with SBR for greater efficiency gains over HE-AAC & AAC-LC, although again the gap closes at higher bitrates.","s":"HE-AACv2","u":"/docs/audio/AAC","h":"#he-aacv2","p":108},{"i":121,"t":"Extended High Efficiency AAC is actually slightly different from xHE-AAC. While Extended High Efficiency AAC specifically includes all of HE-AACv2's coding techniques as well as compression techniques from the USAC specification, the Extended High Efficiency AAC profile was designed in such a manner that building an encoder for the format would allow it to be perfectly backwards compatible with past variations of AAC. xHE-AAC is a codec that combines the Extended High Efficiency AAC profile with further USAC coding techniques, specifically within the MPEG-D DRC Loudness Control Profile. For all intents and purposes, when someone refers to \"USAC audio,\" they probably mean xHE-AAC. xHE-AAC further extends performance at lower bitrates compared to past variants of AAC. USAC's specialty & purpose was to create an audio codec that didn't compromise music performance for speech & vice versa, but rather automatically tuned its coding technique selection to perform the best on any given source. xHE-AAC is not widely supported. On a Mac, playback is only possible through QuickTime Player & other utilities which use CoreAudio for decoding. Windows 11 & Android support xHE-AAC natively, but Windows 10 & Linux will not allow you to play it back without some grief. The most effective way currently is to use an AAC plugin with foobar2000, although this plugin is 32-bit only, so you'll need 32-bit foobar2000. This works on Linux under WINE.","s":"xHE-AAC","u":"/docs/audio/AAC","h":"#xhe-aac","p":108},{"i":123,"t":"There are a number of ways to encode each format, which I'll outline below.","s":"Encoders","u":"/docs/audio/AAC","h":"#encoders","p":108},{"i":125,"t":"Fraunhofer FDK AAC is a high quality, open-source AAC encoder by Fraunhofer IIS. It was introduced with the release of Android 4.1 and has since been forked to a seperate repository. Issues regarding the legality of its distribution have arisen since the Free Software Foundation declared the license incompatible with the GPL. The license does not grant the user rights to the patented technologies used in the source code, and therefore restricts its use. However, this topic is still debated as Debian does not consider FDK AAC free software while Red Hat does. Another similar tool, FhG-AAC, performs similarly to FDK-AAC but with greater precision as it performs floating point operations. It can be used through the proprietary Windows media player Winamp. FDK-AAC can encode the following formats: AAC-LC AAC-LD AAC-ELD HE-AAC HE-AACv2","s":"Fraunhofer FDK AAC","u":"/docs/audio/AAC","h":"#fraunhofer-fdk-aac","p":108},{"i":127,"t":"Apple's proprietary Core Audio Toolbox encoder is a popular, high-quality choice for encoding AAC. This encoder can be used in macOS via Apple's own afconvert utility, or in FFmpeg by specifying aac_at as the audio encoder. Windows users can also take advantage of Core Audio by using the free qaac command-line utility, but users will need to either have iTunes installed, or extract the libraries from said installer, using the makeportable script. Core Audio can encode the following formats: AAC-LC, HE-AAC, HE-AACv2* *HE-AACv2 encoding is only supported on macOS.","s":"Core Audio","u":"/docs/audio/AAC","h":"#core-audio","p":108},{"i":129,"t":"FFmpeg's native AAC encoder is known for being completely free & open source, licensed under the GNU General Public License (or LGPL, depending on how FFmpeg is configured). If you want a completely FOSS solution for encoding AAC, FFmpeg AAC is one of the only places you'll find this. It is not known for being particularly feature rich or high quality & doesn't support the entire profile of the AAC variants it can encode, but it gets the job done in most scenarios. FFmpeg's AAC encoder can be called with a simple -c:a aac parameter, but there are more granular options available. By default, the profile is aac_low which encodes MPEG-4 AAC-LC. The mpeg2_aac_low profile encodes simpler, lower-quality MPEG-2 AAC from 1997, while aac_main encodes the more obscure main AAC profile from the 1997 MPEG-2 specification. Finally, aac-ltp encodes AAC long-term prediction introduced in MPEG-4. Using FFmpeg AAC, we can encode the following formats: AAC-LC","s":"FFmpeg AAC","u":"/docs/audio/AAC","h":"#ffmpeg-aac","p":108},{"i":131,"t":"FAAC is an old AAC encoder, name meaning Freeware Advanced Audio Coder. It is one of the lower quality options, & isn't recommended for general use. It only supports AAC-LC.","s":"FAAC","u":"/docs/audio/AAC","h":"#faac","p":108},{"i":133,"t":"The discontinued Nero AAC audio coder had its last release in 2010. The developer of FAAC worked on Nero AAC, & the Nero AAC encoder & decoder are proprietary. Nero AAC was known to produce decent quality output while it was maintained, although development has since stalled. Nero AAC can encode the following formats: AAC-LC, HE-AAC, HE-AACv2","s":"Nero AAC","u":"/docs/audio/AAC","h":"#nero-aac","p":108},{"i":135,"t":"Exhale is the only encoder on this list capable of encoding xHE-AAC audio. While it does not excel at this task compared to proprietary competition, there isn't really a straightforward way to encode xHE-AAC outside of using exhale on most platforms right now. Given that fact, it is the only xHE-AAC encoder many can consider using due to the lack of real competition. It can encode Extended HE-AAC audio with or without eSBR, although encoding using eSBR produces higher quality results. exhale can encode the following formats: xHE-AAC","s":"Exhale","u":"/docs/audio/AAC","h":"#exhale","p":108},{"i":137,"t":"Alternatives to AAC include Opus, Vorbis, MP3, & FLAC, among others. Vorbis & MP3 are considered to be worse, although Vorbis has its moments & is entirely royalty-free unlike AAC. FLAC is a lossless audio codec with an entirely different use case. It is important to reinforce that lossy audio codecs are not replacements for lossless, as good as they might get. Opus is competitive with HE-AACv2 & xHE-AAC moreso than the older, worse variants, & is widely supported as well as royalty free. The open source reference encoder opusenc is an excellent implementation of the format & the tooling is much easier to use compared to AAC's frankly nightmarish assortment of various options with numerous individual downsides. xHE-AAC is generally better than Opus, especially at lower bitrates, which is a fact that merits consideration.","s":"Conclusion","u":"/docs/audio/AAC","h":"#conclusion","p":108},{"i":139,"t":"Around a month ago, Codec Wiki celebrates it's very first anniversary as it officially hits one year old (Wow, that's a very long time!). First and foremost, I apologize for this delayed celebration post, I have been very busy for the past month, y'know, real life things. Second of all, I would like to say THANK YOU for everyone who helped build this wiki from the ground up and writing down the entries, you know who you are!","s":"Codec Wiki: One Year Later","u":"/blog/codec-wiki-one-year-later","h":"","p":138},{"i":141,"t":"Thanks to everyone's efforts, over the course of 12 months Codec Wiki has grown into the (almost) go-to documentation and guide for newcomers just starting out in the quirky and wild world of multimedia encoding. So here are some pretty graphs to look at our progress so far!","s":"Stats","u":"/blog/codec-wiki-one-year-later","h":"#stats","p":138},{"i":143,"t":"As you may know, we run our own Plausible Analytics instance. Which is a self-hosted, open source, and privacy-respecting analytics. Currently hosted in Singapore and available for everyone to view here. 7.7K unique total visitors for a whole year may not be much but it's a great start! Let's not forget those who use uBlock Origin or similar to block the analytics script. So the visitor count could be two or three times more!","s":"Plausible","u":"/blog/codec-wiki-one-year-later","h":"#plausible","p":138},{"i":145,"t":"For those who are unfamiliar, Google Search Console is used for viewing well.. Google search data for your website. This data is unfortunately private and only the owner and those invited can see the data, no setting available for public view. Probably for the better. And for our top 10 search queries in the last 28 days: With pretty much zero competition in this niche of a market, it skyrocketed Codec Wiki to the top of search results for everything multimedia related π
.","s":"Google Search Console","u":"/blog/codec-wiki-one-year-later","h":"#google-search-console","p":138},{"i":147,"t":"Around a year ago, I started work on an unofficial \"wiki\" for all things encoding, it initially started as a half-serious joke because I was annoyed there were no good documentation available anywhere on the internet on the real application and usage of encoders such as \"Should I use tune SSIM in x encoder?\". With the search results you'll be presented with being one or more of these: Doesn't exist. Mentioned in the FFmpeg mailing list but nothing else. PDF research papers on IEEE and similar. Outdated information on Doom9 or Multimedia Wiki. \"The documentation are in the code blocks\". Link rot. Abandoned, or mostly focused on other topics. Focused on a specific type of storytelling medium. Actual good explanation in an old forgotten site (rare). Some or maybe even most of these results still appear up to this day. Frustrated with how scarce information is available on the open internet and combined with the fact that most, if not all of them only exist in non-search-engine-reachable private chat communities such as Discord where you'll have to join and scroll around older conversations. Prone to data erasure as Discord can nuke these servers for whatever reason they please and years of \"documentation\" is lost in an instant. It is simply not a good platform for a plethora of valuable information. That's why I took initiative.","s":"A Brief History","u":"/blog/codec-wiki-one-year-later","h":"#a-brief-history","p":138},{"i":149,"t":"There were also generally a lot of distrust and pushbacks when this project first began, particularly from those who are more experienced with encoding. Which is understandable because everyone who attempted before burned out and never finished it. But Codec Wiki stood the test of time with constant updates through active collaboration of everyone involved, nowadays those voices have mostly subsided or even joined the project themselves. And I am grateful for that.","s":"Bumps in the Road","u":"/blog/codec-wiki-one-year-later","h":"#bumps-in-the-road","p":138},{"i":151,"t":"Once again, THANK YOU VERY MUCH for those who are involved in this project and making the once pipe dream of a one-stop-shop user-friendly documentation for encoding into something that's possible. Let's continue documenting the most crucial yet invisible driving force of internet traffic, pages by pages. Together we are strong, divided we burn out. \"There are times when a critic truly risks something, and that is the discovery and defense of the new. The world is often unkind to new talent, new creations. The new needs friends.\" β Anton Ego","s":"Closing Statement","u":"/blog/codec-wiki-one-year-later","h":"#closing-statement","p":138},{"i":153,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor!","s":"ALAC","u":"/docs/audio/ALAC","h":"","p":152},{"i":155,"t":"Under Maintenance The content in this entry is incomplete & is in the process of being completed. Dolby Digital is a family of both lossless and lossy audio compression algorithms and technologies.","s":"Dolby Digital","u":"/docs/audio/Dolby","h":"","p":154},{"i":158,"t":"Originally known as Dolby Digital, AC-3 was first released in 1991 to provide digital 5.1 sound in cinemas from 35mm film reels. AC-3 is notable for being the first audio codec to make use of the βModified Discrete Cosine Transformβ algorithm. The codec has seen widespread use an adoption, due to its prevalence in DVDs, TV, and Blu-rays as a surround codec. Dolby Digital Surround EXβ Like Dolbyβs earlier Pro Logic technology, Dolby Digital Surround EX matrixes a sixth, centre back surround channel into the left and right surround channels of a 5.1 stream, allowing for a 6.1 mix to be unfolded when played on a 6.1 or 7.1 system with EX decoding. This technology is fully backwards compatible with existing AC-3 decoders, producing the standard 5.1 stream. Surround EX was first introduced in 1999 with the release of βStar Wars: Episode I β The Phantom Menaceβ.","s":"AC-3","u":"/docs/audio/Dolby","h":"#ac-3","p":154},{"i":160,"t":"Often referred to as βDolby Digital Plusβ, E-AC-3 is the successor to Dolbyβs earlier AC-3 codec, featuring support for higher bitrates (6,144kbps vs 640kbps), more channels (15 vs 5), and additional coding tools allowing for more efficient encoding. E-AC-3 can be found in the short-lived HD-DVD format, Blu-ray discs, and as the main surround codec for most streaming services, particularly if Dolby Atmos is used. Contrary to popular belief, E-AC-3 is not backwards compatible with AC-3, rather Dolby mandates that all E-AC-3 decoders can also decode standard AC-3 content. As E-AC-3 is an optional codec on Blu-ray, all discs encoded with E-AC-3 encode the first 5.1 channels as AC-3, with the additional rear channels/Atmos content being encoded as E-AC-3.","s":"E-AC-3","u":"/docs/audio/Dolby","h":"#e-ac-3","p":154},{"i":162,"t":"Dolbyβs TrueHD is a lossless multi-channel audio codec based on Meridianβs Lossless Packing (MLP) codec, although the two arenβt compatible with each other. TrueHD is mainly used on Blu-ray and supports Dolby Atmosβs spatial audio data. The TrueHD specification supports up to 16 audio channels (although the Blu-ray specification limits this to 7.1) with a sample rate of 192KHz and a bit depth of 24 bits. As TrueHD is an optional codec on Blu-ray, each TrueHD steam includes a backup AC-3 stream encoded alongside it for compatibility purposes. Since 2010, Dolby TrueHD has seen a decline in usage in favour of DTS-HD Master Audio on Blu-ray discs, but has seen a slight resurgence as the codec used for Dolby Atmos audio, but DTS-HD MA is still more common on non-Atmos titles.","s":"TrueHD","u":"/docs/audio/Dolby","h":"#truehd","p":154},{"i":164,"t":"To be added.","s":"AC-4","u":"/docs/audio/Dolby","h":"#ac-4","p":154},{"i":166,"t":"To be added.","s":"Atmos","u":"/docs/audio/Dolby","h":"#atmos","p":154},{"i":168,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! FLAC (Free Lossless Audio Coding) is an open-source lossless audio codec with widespread support & compatibility released in 2001. It represents the most efficent lossless audio format in common use today. FLAC is commonly contained in a ogg container with either a .flac or .ogg extension. It can less commonly be used within a matroska container (.mkv or .mka) for mixing with a video stream. caution It is not recommended to transcode a lossily encoded file to FLAC as the file size will grow tremendously while any quality loss from lossy encoding will remain. FLAC is best if you need to preserve existing lossless audio.","s":"FLAC","u":"/docs/audio/FLAC","h":"","p":167},{"i":170,"t":"FLAC is supported by the majority of web browsers and media players in common use as of 2024.","s":"Software support","u":"/docs/audio/FLAC","h":"#software-support","p":167},{"i":172,"t":"ffmpeg -i example.wav -c:a flac example.flac","s":"WAV to FLAC using FFmpeg:","u":"/docs/audio/FLAC","h":"#wav-to-flac-using-ffmpeg","p":167},{"i":174,"t":"You can include an argument of a number 0-8 to specify the compression effort, 0 being fastest and 8 having the highest compression. flac example.wav -8 -o example.flac","s":"WAV to FLAC using FLAC command-line tool:","u":"/docs/audio/FLAC","h":"#wav-to-flac-using-flac-command-line-tool","p":167},{"i":176,"t":"Under Maintenance The content in this entry is incomplete & is in the process of being completed. Digital audio is the representation of sound recorded in, or converted into, digital form. To understand digital audio, it's crucial to grasp some fundamental concepts, including sampling, Nyquist Frequency, and the Nyquist-Shannon Sampling Theorem.","s":"Introduction to Lossy & Lossless Audio Compression","u":"/docs/audio/intro","h":"","p":175},{"i":178,"t":"Sampling is the process of converting a continuous, analog audio signal into a discrete digital signal by measuring the amplitude of the audio signal at uniform intervals. The frequency of this measurement is known as the sampling rate, typically measured in samples per second, or Hertz (Hz). For example, audio CDs use a sampling rate of 44,100 Hz, which means the audio signal is sampled 44,100 times every second. To accurately represent a wave, you need at least two measurements per cycle; one to capture the peak of the wave, and one to capture the trough. If you sample less than twice per cycle, you can't distinguish between different frequencies; this is where the Nyquist frequency comes from. Named after Harry Nyquist, the Nyquist frequency is half of the sampling rate of a discrete signal processing system. For a given sampling rate, the Nyquist frequency represents the highest frequency that can be accurately sampled without introducing errors such as aliasing. For example, with a sampling rate of 48,000 Hz, the Nyquist frequency is 24,000 Hz. Capturing frequencies above the Nyquist frequency for a given system can bring about aliasing artifacts. Aliasing occurs where high-frequency components appear as lower frequencies in the sampled signal, distorting the information. The Nyquist-Shannon sampling theorem states that to avoid aliasing, the sampling rate must be at least twice the highest frequency present in the signal. This theorem is crucial for ensuring that the digital representation of the audio signal retains all the information from the original analog signal without distortion.","s":"Sampling & the Nyquist Frequency","u":"/docs/audio/intro","h":"#sampling--the-nyquist-frequency","p":175},{"i":180,"t":"The main benefit of lossless compression is the preservation of audio quality, making it ideal for professional audio production, archiving, and situations where high fidelity is required. However, lossless files are significantly larger than their lossy counterparts, which can be a drawback for storage and (especially) streaming. Lossless Compression If you would like to dive more deeply into the topic of lossless compression, you can check out the Lossless Compression entry in the Introduction section of the wiki. FLAC, WavPack, & ALAC are examples of popular lossless audio codecs that you are likely to encounter in the wild.","s":"Lossless Audio Compression","u":"/docs/audio/intro","h":"#lossless-audio-compression","p":175},{"i":182,"t":"The primary advantage of lossy compression is the significant reduction in file size, making it ideal for streaming, portable devices, and situations where storage space is limited. However, the trade-off is a potential loss in audio quality, which may be noticeable in critical listening environments. Lossy Compression If you would like to dive more deeply into the topic of lossy compression, you can check out the Lossy Compression entry in the Introduction section of the wiki. MP3, AAC, Vorbis, & Opus are some examples of popular lossy audio codecs that you are likely to encounter in the wild.","s":"Lossy Audio Compression","u":"/docs/audio/intro","h":"#lossy-audio-compression","p":175},{"i":184,"t":"Understanding these baseline principles of digital audio processing is essential for informed reading when it comes to the rest of the Audio section of this wiki. We hope this page helped you grasp some of the fundamental concepts that underpin digital audio processing.","s":"Conclusion","u":"/docs/audio/intro","h":"#conclusion","p":175},{"i":186,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! MP3, formally known as MPEG-1 Audio Layer III or MPEG-2 Audio Layer III, is a coding format for digital audio. It was developed largely by the Fraunhofer Society in Germany under the lead of Karlheinz Brandenburg, with support from other digital scientists in other countries. MP3 is defined in two ISO/IEC specification families: MPEG-1: 11172-3 and MPEG-2: 13818-32. It uses lossy compression, which often allows for large reductions in file size compared to uncompressed audio. Lossy MP3 compression works by attempting to reduce (or approximate) the accuracy of certain components of sound that could be considered (by some psychoacoustic analysis) to be beyond the hearing capabilities of most humans and storing the coefficients corresponding to these more salient frequency bands. Compared to CD-quality digital audio, MP3 compression can commonly achieve a 75 to 95% reduction in size. For example, an MP3 encoded at a constant bit rate of 128 kbit/s would result in a file approximately 9% of the size of the original CD audio. MP3 audio is considered transparent at 320kb/s. It is still very common to see MP3 files in the wild today, despite the fact that the format was finalized in 1993 (with modifications in 1995 to support lower sample rates and bit rates). This is due to the fact that MP3 could be considered the first widely adopted audio format that allowed for high quality audio to be compressed to a relatively small file size. Compared to more modern formats like Opus and AAC, MP3 may not seem as impressive, but it is still widely supported by many devices and pieces software and has certainly left a powerful legacy to live up to for modern codecs.","s":"MP3","u":"/docs/audio/MP3","h":"","p":185},{"i":188,"t":"Under Maintenance The content in this entry is incomplete & is in the process of being completed. Opus is an open-source audio codec that has largely replaced Vorbis as the standard open audio codec. It is the recommended codec for usage in WebM video containers in tandem with the VP9 or AV1 video codecs. Opus is known for its incredible coding efficiency and unique multi-channel optimizations. Stereo Opus audio reaches transparency (psychoacoustically lossless audio quality) at 128kb/s, compared to AAC's generally agreed upon 256kb/s and MP3's 320kb/s. Transparency varies based on the type of content & the encoding implementation used, especially for codecs other than Opus, and the values provided above may be debated to a degree. Opus is described on opus-codec.org as a \"totally open, royalty-free, highly versatile audio codec. Opus is unmatched for interactive speech and music transmission over the Internet, but is also intended for storage and streaming applications. It is standardized by the Internet Engineering Task Force (IETF) as RFC 6716 which incorporated technology from Skypeβs SILK codec and Xiph.Orgβs CELT codec.\" Opus supports the following features: Bitrates from 6 kb/s to 510 kb/s (with a maximum of around 255 kb/s per channel on non stereo layouts) Sampling rates from 8 kHz (narrowband) to 48 kHz (fullband) Frame sizes from 2.5 ms to 60 ms Support for both constant bitrate (CBR) and variable bitrate (VBR) Audio bandwidth from narrowband to fullband Support for speech and music Support for mono and stereo Support for up to 255 channels (multistream frames) Dynamically adjustable bitrate, audio bandwidth, and frame size Good loss robustness and packet loss concealment (PLC) Floating point and fixed-point implementation via opus-codec.org and wiki.hydrogenaud.io.","s":"Opus","u":"/docs/audio/Opus","h":"","p":187},{"i":190,"t":"Opus is a hybrid audio codec, composed of two codecs as mentioned above. These are Skype's SILK codec for voice & Xiph.Org's CELT codec. Opus's initial name, Harmony, may have been because of the \"harmony\" of these two codecs and the musical connotation of harmony.","s":"Format Breakdown","u":"/docs/audio/Opus","h":"#format-breakdown","p":187},{"i":192,"t":"SILK, initially from Skype, was designed to be used for voice calls on Microsoft products like Skype. The first stable release of the codec was in 2009, and since then it has been freely licensed under the BSD 2-Clause license which has allowed for its adoption into Opus. The version of SILK used in Opus is substantially modified from - and not compatible with - the standalone SILK codec previously described here. SILK is optimized for speech, and so has limited sample rates as follows: Narrowband: 3-4000hz Mediumband: 3-6000hz Wideband: 3-8000hz SILK's latency is 10 to 60ms based on the desired framesize + 5ms lookahead to estimate noise shaping + (potentially) 1.5ms sampling rate conversion overhead if the input audio needs to be resampled.","s":"SILK","u":"/docs/audio/Opus","h":"#silk","p":187},{"i":194,"t":"Much like SILK, CELT is under the BSD 2-Clause license. The preview release came out in 2011. CELT stands for \"Code-Excited Lapped Transform\" and was designed to be the true successor to Vorbis, even being dubbed as \"Vorbis II\" during its initial development as part og Xiph.Org's \"Ghost\" project in 2005. CELT was designed to be a full-band general purpose codec without a particular specialization for a certain kind of audio, making it distinctly different from Xiph's Speex codec & more similar to Vorbis. It is computationally simple relative to competing codec technologies like AAC & even Vorbis, enabling extremely low latency that is competitive with AAC-LD. CELT can work with the following sample rates: Narrowband: 3-4000hz Mediumband: 3-6000hz Wideband: 3-8000hz SuperWideband: 3-12000hz Fullband: 3-20000hz","s":"CELT","u":"/docs/audio/Opus","h":"#celt","p":187},{"i":197,"t":"Opus's reference encoder is opusenc, which is known for its fantastic performance and versatility. It is licensed under the BSD 3-clause license as part of the reference libopus library. There are a myriad of options that may be used to encode with opusenc, but the utility is considered to have sane encoding defaults for local storage & playback. The best options will be outlined below. Usage: opusenc [options] input_file output_file.opus --bitrate #.### Sets the overall target bitrate in kbit/s. Most encoders use bits per second, meaning you have to specify \"128K\" for 128kbit/s for example. Opus doesn't follow this, so you'd just have to type \"128\" though keep in mind using efficient VBR encoding means the final bitrate may be different than the target. Opus supports bitrates from 6 kb/s to 510 kb/s. --vbr Tells the encoder to encode using a variable bit rate, allocating more or less bits when necessary to preserve overall fidelity per bit. This is the best option for local storage & playback, and is enabled by default. --cvbr Tells the encoder that it is allowed to vary the bitrate like with VBR, but it must constrain the maximum bitrate at any given moment to the value provided. --hard-cbr Tells the encoder to use a constant bitrate the whole time. --music & --speech Forces the AI content-detector built into opusenc to treat the input as either speech or music. The bitrate range where this is relevant is around 12-40kb/s. --comp # Sets the encoder complexity to a value from 0 to 10, 0 being the least complex & 10 being the most. The default is 10. --framesize # Sets the maximum encoder frame size in milliseconds. Lowering this is useful for improving latency at the expense of audio quality per bit. It is worth noting that 40 & 60ms framesizes are just multiple 20ms frames stitched together via opusenc's default behavior, and are not considered useful as they just lower the encoder's adaptability which can worsen both latency & coding efficiency. The default value is 20. --expect-loss # Percentage value for expected packet loss. Not useful for local encoding & playback, but useful for real-time applications. Default value is 0. --downmix-mono Downmixes multiple channels into a single channel. --downmix-stereo Downmixes multiple channels into two channels, left & right, given more than two channels are provided to the encoder. --no-phase-inv Disables phase inversion. Helpful when downmixing stereo to mono, although this is the default behavior in that scenario since libopus 1.3. Slightly decreases stereo audio quality. --max-delay # Sets maximum container delay in milliseconds, from 0-1000. Default is 1000. Looking at the default values for the encoder flags, opusenc almost always follows the best practices for every default value. This makes it very easy to use, and it is as simple as plugging in a source of some kind and using only the most basic commands to encode with opus. An example opusenc command: opusenc \"input.wav\" \"output.opus\" --bitrate 96 FFmpeg using libopus: ffmpeg -i \"input.flac\" -c:a libopus -b:a 128K \"output.ogg\" If you'd like to learn more about opusenc & its recommended default behavior, read this article on Opus Recommended Settings. Existing bug in ffmpeg Due to a bug in ffmpeg (#5718), ffmpeg won't automatically remap 5.1(side) to 5.1 when using libopus. To remap the channel layout explicitly, try this: ffmpeg -i \"input.flac\" -c:a libopus -af aformat=channel_layouts=5.1 \"output.ogg\" tip You can handle arbitrary audio stream mappings with this: -af aformat=channel_layouts=7.1|5.1|stereo -mapping_family 1","s":"Opusenc","u":"/docs/audio/Opus","h":"#opusenc","p":187},{"i":199,"t":"FFopus is an experimental native opus encoder from FFmpeg. It is not widely regarded as providing any decent uplift in coding efficiency compared to libopus, and is usually considered worse; its only merit is being able to handle 5.1(side) streams while libopus in FFmpeg cannot. It only implements the CELT part of the Opus codec. FFopus usage: ffmpeg -i \"input.wma\" -c:a opus -b:a 128K -strict -2 \"output.opus\"","s":"FFopus","u":"/docs/audio/Opus","h":"#ffopus","p":187},{"i":201,"t":"VAC, or Value Added Codec, is a libopus encoder that uses SoX to resample inputs & supports output to .ogg rather than exclusively .opus. Better resampling theoretically leads to better coding efficiency, but vac-enc hasn't been thoroughly tested. Encoding a 16-bit signed little endian pcm_s16le WAV to 128kbit/s Opus in an OGG container: vac-enc input.wav output.ogg 128","s":"vac-enc","u":"/docs/audio/Opus","h":"#vac-enc","p":187},{"i":203,"t":"There are many aspects which determine how the color information for a video is stored and how it is rendered. As technology has improved, new standards developed, and with new techologies such as HDR, new standards continue to develop. However, the result is that it can be confusing to know which color settings to use for a given video. Some properties such as the color format must be set. However, properties such as color range, primaries, matrix coefficients, and transfer function are optional. It is always best practice to set these when you are encoding a video, because if they are not set, the player must make a guess as to what the correct settings are. If it guesses incorrectly, this can lead to the colors of the video being different from what was intended.","s":"Intro","u":"/docs/colorimetry/intro","h":"","p":202},{"i":205,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! Vorbis is an open-source audio codec that has seen great success in its usage by Spotify, among others. It is the default audio codec for Minecraft's sounds & music. It has largely been replaced by Opus.","s":"Vorbis","u":"/docs/audio/Vorbis","h":"","p":204},{"i":207,"t":"Matrix coefficients represent the multiplication matrix that is used when converting from YUV to RGB. As with primaries, the integer values are defined within universal specifications, and as such they will be the same across all encoding and playback tools. The following values are available:","s":"Matrix Coefficients","u":"/docs/colorimetry/matrix","h":"","p":206},{"i":209,"t":"Specifies that the identity matrix should be used, i.e. this data is already in an RGB-compatible colorspace. This matrix coefficient setting is used in the following standards: GBR (often referred to as RGB) YZX (often referred to as XYZ) IEC 61966-2-1 sRGB SMPTE ST 428-1 (2019)","s":"0: Identity","u":"/docs/colorimetry/matrix","h":"#0-identity","p":206},{"i":211,"t":"BT.709 is the standard used for modern high-definition video, and is a safe default assumption. This matrix coefficient setting is used in the following standards: Rec. ITU-R BT.709-6 Rec. ITU-R BT.1361-0 conventional colour gamut system and extended colour gamut system (historical) IEC 61966-2-4 xvYCC709 SMPTE RP 177 (1993) Annex B","s":"1: BT.709","u":"/docs/colorimetry/matrix","h":"#1-bt709","p":206},{"i":213,"t":"This value indicates that no color matrix is set for the video, and the player must decide which value to use. mpv will use the following heuristics in this case: if width >= 1280 || height > 576 { \"BT.709\" } else { \"SMPTE 170M\" }","s":"2: Unspecified","u":"/docs/colorimetry/matrix","h":"#2-unspecified","p":206},{"i":215,"t":"BT.470M is a standard that was used in analog television systems in the United States.","s":"4: BT.470M","u":"/docs/colorimetry/matrix","h":"#4-bt470m","p":206},{"i":217,"t":"BT.470BG is a standard that was used for European (PAL) television systems and DVDs. This matrix coefficient setting is used in the following standards: Rec. ITU-R BT.470-6 System B, G (historical) Rec. ITU-R BT.601-7 625 Rec. ITU-R BT.1358-0 625 (historical) Rec. ITU-R BT.1700-0 625 PAL and 625 SECAM IEC 61966-2-1 sYCC IEC 61966-2-4 xvYCC601","s":"5: BT.470BG","u":"/docs/colorimetry/matrix","h":"#5-bt470bg","p":206},{"i":219,"t":"SMPTE 170M is a stanrard that was used for NTSC television systems and DVDs. Its matrix coefficients are equivalent to BT.470BG. This matrix coefficient setting is used in the following standards: Rec. ITU-R BT.601-7 525 Rec. ITU-R BT.1358-1 525 or 625 (historical) Rec. ITU-R BT.1700-0 NTSC SMPTE ST 170 (2004)","s":"6: SMPTE 170M","u":"/docs/colorimetry/matrix","h":"#6-smpte-170m","p":206},{"i":221,"t":"SMPTE 240M was an interim standard used during the early days of HDTV (1988-1998).","s":"7: SMPTE 240M","u":"/docs/colorimetry/matrix","h":"#7-smpte-240m","p":206},{"i":223,"t":"The YCoCg color model, also known as the YCgCo color model, is the color space formed from a simple transformation of an associated RGB color space into a luma value and two chroma values called chrominance green and chrominance orange.","s":"8: YCgCo","u":"/docs/colorimetry/matrix","h":"#8-ycgco","p":206},{"i":225,"t":"BT.2020 is a standard used for ultra-high-definition video, i.e. 4K and higher. It may be used with or without HDR, as HDR is defined by the transfer characteristics. If you do not know if you want non-constant or constant luminance, you probably want non-constant. This matrix coefficient setting is used in the following standards: Rec. ITU-R BT.2020-2 (non-constant luminance) Rec. ITU-R BT.2100-2 Yβ²CbCr","s":"9: BT.2020 Non-Constant Luminance","u":"/docs/colorimetry/matrix","h":"#9-bt2020-non-constant-luminance","p":206},{"i":227,"t":"This is a variant of BT.2020 with constant luminance values, represented using the YcCbcCrc colorspace. You probably want the non-constant luminance variant instead, unless you know you want this one.","s":"10: BT.2020 Constant Luminance","u":"/docs/colorimetry/matrix","h":"#10-bt2020-constant-luminance","p":206},{"i":229,"t":"SMPTE 2085 is a standard used with HDR signals in the XYZ colorspace. I've never actually seen it used in the wild.","s":"11: SMPTE 2085","u":"/docs/colorimetry/matrix","h":"#11-smpte-2085","p":206},{"i":231,"t":"I'm not really sure when you would use this.","s":"12: Chromaticity-Derived Non-Constant Luminance","u":"/docs/colorimetry/matrix","h":"#12-chromaticity-derived-non-constant-luminance","p":206},{"i":233,"t":"I'm not really sure when you would use this.","s":"13: Chromaticity-Derived Constant Luminance","u":"/docs/colorimetry/matrix","h":"#13-chromaticity-derived-constant-luminance","p":206},{"i":235,"t":"ICtCp is an alternative colorspace developed for use with HDR and wide gamut video, by Dolby because they love doing extra stuff like this. I've never actually seen it used in the wild.","s":"14: ICtCp","u":"/docs/colorimetry/matrix","h":"#14-ictcp","p":206},{"i":237,"t":"This section details the first of three settings that are important for retaining accurate color when encoding videos, those settings being primaries, color matrix, and transfer characteristics. Color primaries are used to indicate the correct coordinates for the red, blue, and green colors. There are historical reasons for why so many standards exist, and this guide will not go in depth into history lessons, but will explain what primaries are available and when to use each one. Note that for primaries, matrices, and transfer, you can view the values that are set on a video using a tool like MediaInfo. If there are no values set, the player will need to guess which values to use. A safe default assumption for most modern videos is BT.709, although this may vary depending on source and resolution for the video. It is strongly recommended to set the correct values when encoding. Each setting has at least one name and exactly one integer value representing it--most encoder softwares will accept one or more of the names, but some tooling such as Vapoursynth and MKVToolnix accepts the integer values instead. The integer values are defined within universal specifications, and as such they will be the same across all encoding and playback tools.","s":"Color Primaries","u":"/docs/colorimetry/primaries","h":"","p":236},{"i":239,"t":"BT.709 is the standard used for modern high-definition video, and is a safe default assumption. This color primary setting is used in the following standards: Rec. ITU-R BT.709-6 Rec. ITU-R BT.1361-0 conventional colour gamut system and extended colour gamut system (historical) IEC 61966-2-1 sRGB or sYCC IEC 61966-2-4 Society of Motion Picture and Television Engineers (SMPTE) RP 177 (1993) Annex B","s":"1: BT.709","u":"/docs/colorimetry/primaries","h":"#1-bt709","p":236},{"i":241,"t":"This value indicates that no color primary is set for the video, and the player must decide which value to use. mpv will use the following heuristics in this case: if matrix == \"BT.2020\" { \"BT.2020\" } else if matrix == \"BT.709\" { \"BT.709\" } else if width >= 1280 || height > 576 { \"BT.709\" } else if height == 576 { \"BT.470BG\" } else if height == 480 || height == 488 { \"SMPTE 170M\" } else { \"BT.709\" }","s":"2: Unspecified","u":"/docs/colorimetry/primaries","h":"#2-unspecified","p":236},{"i":243,"t":"BT.470M is a standard that was used in analog television systems in the United States. This color primary setting is used in the following standards: Rec. ITU-R BT.470-6 System M (historical) United States National Television System Committee 1953 Recommendation for transmission standards for color television United States Federal Communications Commission (2003) Title 47 Code of Federal Regulations 73.682 (a) (20)","s":"4: BT.470M","u":"/docs/colorimetry/primaries","h":"#4-bt470m","p":236},{"i":245,"t":"BT.470BG is a standard that was used for European (PAL) television systems and DVDs. This color primary setting is used in the following standards: Rec. ITU-R BT.470-6 System B, G (historical) Rec. ITU-R BT.601-7 625 Rec. ITU-R BT.1358-0 625 (historical) Rec. ITU-R BT.1700-0 625 PAL and 625 SECAM","s":"5: BT.470BG","u":"/docs/colorimetry/primaries","h":"#5-bt470bg","p":236},{"i":247,"t":"SMPTE 170M is a standard that was used for NTSC television systems and DVDs. Rec. ITU-R BT.601-7 525 Rec. ITU-R BT.1358-1 525 or 625 (historical) Rec. ITU-R BT.1700-0 NTSC SMPTE ST 170 (2004)","s":"6: SMPTE 170M","u":"/docs/colorimetry/primaries","h":"#6-smpte-170m","p":236},{"i":249,"t":"SMPTE 240M was an interim standard used during the early days of HDTV (1988-1998). Its primaries are equivalent to SMPTE 170M.","s":"7: SMPTE 240M","u":"/docs/colorimetry/primaries","h":"#7-smpte-240m","p":236},{"i":251,"t":"This represents generic film using Illuminant C.","s":"8: Film","u":"/docs/colorimetry/primaries","h":"#8-film","p":236},{"i":253,"t":"BT.2020 is a standard used for ultra-high-definition video, i.e. 4K and higher. It may be used with or without HDR, as HDR is defined by the transfer characteristics. This color primary setting is used in the following standards: Rec. ITU-R BT.2020-2 Rec. ITU-R BT.2100-2","s":"9: BT.2020","u":"/docs/colorimetry/primaries","h":"#9-bt2020","p":236},{"i":255,"t":"SMPTE 428 is used for D-Cinema Distribution Masters, aka DCDM. This color primary setting is used in the following standards: SMPTE ST 428-1 (2019) (CIE 1931 XYZ as in ISO 11664-1)","s":"10: SMPTE 428","u":"/docs/colorimetry/primaries","h":"#10-smpte-428","p":236},{"i":257,"t":"DCI-P3 is a wide-gamut colorspace used alongside RGB. It is used internally by most HDR monitors on the market.","s":"11: DCI-P3","u":"/docs/colorimetry/primaries","h":"#11-dci-p3","p":236},{"i":259,"t":"Display-P3 is a variant of DCI-P3 developed by Apple because they wanted to be different.","s":"12: Display-P3","u":"/docs/colorimetry/primaries","h":"#12-display-p3","p":236},{"i":261,"t":"Nobody really knows what this is.","s":"22: EBU Tech 3213","u":"/docs/colorimetry/primaries","h":"#22-ebu-tech-3213","p":236},{"i":263,"t":"Range is a concept that describes the valid values for a pixel. Typically, RGB will use full range and YUV will use limited range. What does this mean? For 8-bit video, full range indicates that all values between 0-255 may be used to represent a color value. On the other hand, limited range indicates that only values between 16-235, or 16-240 for chroma, are valid, and any values outside that range will be clamped to fit in that range. These expand to equivalent ranges for high bit depth videos. Why is limited range a thing that exists? Essentially, it's due to historical reasons, but it's a convention that we are stuck with today. Even though full range may provide slightly better color accuracy, it is far less meaningful for high bit depth content, and even HDR blu-rays use limited color range. Therefore, it is recommended to follow existing conventions.","s":"Color Range","u":"/docs/colorimetry/range","h":"","p":262},{"i":265,"t":"Transfer characteristics, also known as transfer functions, represent the gamma function of a video--that is, how to convert from a gamma-compressed video to one that is in linear light. These are sometimes also called EOTF and OETF functions. As with primaries, the integer values are defined within universal specifications, and as such they will be the same across all encoding and playback tools. The following values are available:","s":"Transfer Characteristics","u":"/docs/colorimetry/transfer","h":"","p":264},{"i":267,"t":"BT.1886 is the standard used for most modern, SDR video, and is a safe default assumption. This transfer function is used in the following standards: Rec. ITU-R BT.709-6 Rec. ITU-R BT.1361-0 conventional colour gamut system (historical)","s":"1: BT.1886","u":"/docs/colorimetry/transfer","h":"#1-bt1886","p":264},{"i":269,"t":"This value indicates that no transfer function is set for the video, and the player must decide which value to use. mpv will always assume BT.1886 in this case.","s":"2: Unspecified","u":"/docs/colorimetry/transfer","h":"#2-unspecified","p":264},{"i":271,"t":"BT.470M is a standard that was used in analog television systems in the United States. This transfer represents a power function with a gamma of 2.2. This transfer function is used in the following standards: Rec. ITU-R BT.470-6 System M (historical) United States National Television System Committee 1953 Recommendation for transmission standards for color television United States Federal Communications Commission (2003) Title 47 Code of Federal Regulations 73.682 (a) (20) Rec. ITU-R BT.1700-0 625 PAL and 625 SECAM","s":"4: BT.470M","u":"/docs/colorimetry/transfer","h":"#4-bt470m","p":264},{"i":273,"t":"BT.470BG is a standard that was used for European (PAL) television systems and DVDs. This transfer represents a power function with a gamma of 2.8.","s":"5: BT.470BG","u":"/docs/colorimetry/transfer","h":"#5-bt470bg","p":264},{"i":275,"t":"SMPTE 170M is a stanrard that was used for NTSC television systems and DVDs. Its transfer function is equivalent to BT.1886. This transfer function is used in the following standards: Rec. ITU-R BT.601-7 525 or 625 Rec. ITU-R BT.1358-1 525 or 625 (historical) Rec. ITU-R BT.1700-0 NTSC SMPTE ST 170 (2004)","s":"6: SMPTE 170M","u":"/docs/colorimetry/transfer","h":"#6-smpte-170m","p":264},{"i":277,"t":"SMPTE 240M was an interim standard used during the early days of HDTV (1988-1998).","s":"7: SMPTE 240M","u":"/docs/colorimetry/transfer","h":"#7-smpte-240m","p":264},{"i":279,"t":"This value indicates that the content is already in linear light.","s":"8: Linear","u":"/docs/colorimetry/transfer","h":"#8-linear","p":264},{"i":281,"t":"Indicates a logarithmic transfer function with a 100:1 range.","s":"9: Logarithmic 100","u":"/docs/colorimetry/transfer","h":"#9-logarithmic-100","p":264},{"i":283,"t":"Indicates a logarithmic transfer function with a (100 * sqrt(10)):1 range.","s":"10: Logarithmic 316","u":"/docs/colorimetry/transfer","h":"#10-logarithmic-316","p":264},{"i":285,"t":"Used in standard IEC 61966-2-4. I have no idea what this actually is.","s":"11: XVYCC","u":"/docs/colorimetry/transfer","h":"#11-xvycc","p":264},{"i":287,"t":"This was intended to be a standard for \"future\" television systems, but it never really came into use.","s":"12: BT.1361E","u":"/docs/colorimetry/transfer","h":"#12-bt1361e","p":264},{"i":289,"t":"Represents the sRGB colorspace. This transfer function is used in the following standards: IEC 61966-2-1 sRGB (with MatrixCoefficients equal to 0) IEC 61966-2-1 sYCC (with MatrixCoefficients equal to 5)","s":"13: sRGB","u":"/docs/colorimetry/transfer","h":"#13-srgb","p":264},{"i":291,"t":"Typically used with ultra-high-definition 10-bit SDR video. Its transfer function is equivalent to BT.1886.","s":"14: BT.2020 10-bit","u":"/docs/colorimetry/transfer","h":"#14-bt2020-10-bit","p":264},{"i":293,"t":"Typically used with ultra-high-definition 12-bit SDR video. Its transfer function is equivalent to BT.1886.","s":"15: BT.2020 12-bit","u":"/docs/colorimetry/transfer","h":"#15-bt2020-12-bit","p":264},{"i":295,"t":"PQ is the most widely used transfer function for HDR content. It allows for a wider range of luminance to be represented than conventional transfer functions. This transfer function is used in the following standards: SMPTE ST 2084 (2014) for 10-, 12-, 14- and 16-bit systems Rec. ITU-R BT.2100-2 perceptual quantization (PQ) system","s":"16: PQ aka SMPTE 2084","u":"/docs/colorimetry/transfer","h":"#16-pq-aka-smpte-2084","p":264},{"i":297,"t":"SMPTE 428 is used for D-Cinema Distribution Masters, aka DCDM.","s":"17: SMPTE 428","u":"/docs/colorimetry/transfer","h":"#17-smpte-428","p":264},{"i":299,"t":"HLG is an alternative transfer function for HDR content used by some televisions. This transfer function is used in the following standards: ARIB STD-B67 (2015) Rec. ITU-R BT.2100-2 hybrid log- gamma (HLG) system","s":"18: HLG aka Hybrid Log-Gamma","u":"/docs/colorimetry/transfer","h":"#18-hlg-aka-hybrid-log-gamma","p":264},{"i":301,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! Speex is an open-source audio codec designed for speech. It has largely been replaced by Opus.","s":"Speex","u":"/docs/audio/Speex","h":"","p":300},{"i":303,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! 7-zip (7z) is a file format that supports several different data compression, encryption, & pre-processing algorithms. It was created by the 7-Zip archiver, which is free and open-source software for dealing with various data compression formats including formats similar to 7z like XZ. The 7-zip format has some noteworthy advantages over the popular ZIP format. The 7-zip utility can compress files to the 7z format \"30-70% better\" than to ZIP format despite having a highly efficient ZIP encoder. It mainly uses the LZMA & LZMA2 algorithms, which are more modern than DEFLATE and usually compress better. 7-zip can encrypt files with AES-256 using a user provided password. AES-256 is more secure than the ZipCrypto encryption often used by ZIP. 7-zip can support files up to 16 exabytes in size, while traditional ZIP has a 4 GB limit (ZIP64, which is commonly used, does not suffer from this 4 GB limitation so this is less relevant now). 7-zip also supports various pre-processing filters, which can improve compression for certain types of data like executables and binaries. However, 7-zip also has some drawbacks and limitations. 7-zip is not as widely supported as ZIP by other software and platforms. Some users may need to install additional programs or plugins to open or extract 7z files. Slower speed: 7-zip archives may take longer to compress or decompress compared to ZIP. This is somewhat mitigated by the 7-zip utility's effective parallelization when decoding, but this only affects real time as opposed to user time meaning it is still likely going to be more expensive to decompress than ZIP. 7-zip does not have any built-in mechanism to repair corrupted or damaged archives. Users may need to use third-party tools or backup copies to recover their data1 7z archives are supported natively by macOS & many Linux distributions.","s":"7-zip (7z)","u":"/docs/data/7z","h":"","p":302},{"i":305,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! WavPack is an open-source lossless audio codec with support for lossless & lossy compression with a unique hybrid compression mode for compressing a lossy audio stream alongside a lossless reference. Created by David Bryant in 1998, it gained a lot of software support, although not as much as FLAC. Compressed file size is somewhat between FLAC and heavier state-of-art lossless audio compressors like TAK, OptimFrog or SAC. Compared to FLAC, WavPack usually gives a lower bitrate at the expense of slightly more resource usage.","s":"WavPack","u":"/docs/audio/WavPack","h":"","p":304},{"i":307,"t":"WavPack is one of the most robust and feature-rich lossless audio codecs. Some notable features include: Hybrid mode Support for 1-32 bit integer/floating point audio streams Muxable into Matroska .mkv container Multichannel with up to 4096 channels APEv2/ID3v1 tagging format RIFF chunks support Multithreaded encoding/decoding Error detection using CRC32 checksums and optionally also MD5 hash of original audio data","s":"Features","u":"/docs/audio/WavPack","h":"#features","p":304},{"i":309,"t":"Hybrid Mode is not to be confused with hybrid codecs like Opus. WavPack uses the same algorithm for both lossy and lossless mode. When using lossy mode, the encoder transmits only the unary magnitude and the sign bit of Recursive Golomb encoded residuals. During decoding, those data points can be further enhanced if the correction file is provided. WavPack can produce 2 output files when using Hybrid Mode. The main .wv file with truncated (lossy) residuals and a .wvc correction file containing the enhancement layer. When both files are provided to the decoder, it should be able to recreate original audio data. Otherwise, if only the .wv file is available, the decoder will decode lossy audio stream.","s":"Hybrid Mode","u":"/docs/audio/WavPack","h":"#hybrid-mode","p":304},{"i":311,"t":"Like in many lossless audio codecs, WavPack only encodes the prediction error value. In the default fast mode, prediction is just extrapolation of the previous two samples. More sophisticated predictors are used with higher encoding modes. Due to poor performance, unpredictability, and other problems with floating-point arithmetic in CPUs of its time, WavPack only uses integer arithmetic even when operating on IEEE float data. Nowadays, many of those issues were addressed, however it could still make porting WavPack to chips with no FPU support much easier. The encoding process consists of 3 main steps: Joint stereo processing - Converts the stereo channels to the standard difference and average, removing inter-channel correlations. Multipass decorrelation - Includes multiple prediction passes where the number of passes and predictor type depend on the selected encoding mode, removing intra-channel correlations between neighboring audio samples. Entropy coding the residuals with Recursive Golomb Coding - Instead of Rice Coding, the author proposed a new technique that combines Golomb and Elias gamma code to better address the nature of audio data.","s":"Format Breakdown","u":"/docs/audio/WavPack","h":"#format-breakdown","p":304},{"i":314,"t":"Default options wavpack input.wav -o out.wv Fast, lowest compression, md5 hash wavpack input.wav -f -m -o out.wv Very slow, highest compression, 8 threads wavpack input.wav -hh -x6 --threads=8 -o out.wv Lossy, slow, 240kbps wavpack input.wav -b240 -h -x3 -o out.lsy.wv Highest hybrid compression, very slow, 4bps wavpack input.wav -b4 -cc -hh -x6 -o out.hyb.wv wvunpack can be used to decode resulting .wv files, however most major media players like MPV or VLC already have (limited) WavPack support. Options: -f Faster encode/decode at the expense of larger file size -h Slower encode/decode with higher compression -hh Slowest encode/decode with highest compression -x0 Disable extra filters -x3 Try all predefined filters, slow, higher compression -x6 Generate custom filters, very slow, best compression -b240 Enable lossy mode, set bitrate to 240kbps (acceptable range is 24-9600 but it won't get lower than 2 bits per sample) -b4 Enable lossy mode, set bits per sample to 4 (acceptable range is 2-23.9) -c Enable hybrid mode (will produce .wv and .wvc file) -cc Enable and optimize for hybrid mode, might lower decoding speed and hurt quality -m Include MD5 hash of original audio data in the output file --threads=8 Use 8 threads (acceptable range is 1-12) For more detailed description of all available options, see the manual.","s":"wavpack","u":"/docs/audio/WavPack","h":"#wavpack-1","p":304},{"i":316,"t":"FFmpeg has its own native WavPack encoder and decoder. It used to also support libwavpack with --enable-libwavpack, however it was removed due to interface incompleteness. The native encoder is single-threaded and doesn't support neither Lossy or Hybrid Mode. It uses the -compression_level parameter to control speed to compression ratio. Fastest, lowest compression ffmpeg -i input.wav -compression_level 0 out.wv Slowest, highest compression ffmpeg -i input.wav -compression_level 8 out.wv For all possible parameters, consult the FFmpeg documentation.","s":"FFmpeg","u":"/docs/audio/WavPack","h":"#ffmpeg","p":304},{"i":318,"t":"As of 2024, WavPack has been largely superseded by FLAC, which became the de facto standard for lossless audio on the Web and in Hardware. The implementation of WavPack in media software is often incomplete. FFmpeg doesn't support Hybrid Mode, and other media players usually don't support it either. There are also issues with its support in the .mkv container. Without this feature, WavPack doesn't provide much benefit over already widespread FLAC. The compressed file might be slightly smaller, however music streaming companies tend to choose well-standardized FLAC which also has the benefit of DRM support in the .mp4 container (apparently very important thing on the modern web). Even if Hybrid Mode had better software support, the minimum lossy setting is 2 bits per sample. That translates to around 200kbps with stereo audio track which is quite high. The quality of WavPack lossy mode is also somewhat lacking compared to modern lossy codecs such as Opus or AAC because it doesn't utilize any psychoacoustic model.","s":"Adoption issues","u":"/docs/audio/WavPack","h":"#adoption-issues","p":304},{"i":320,"t":"WavPack was one of the first compressed lossless audio codecs preceding Monkey's Audio (2000), FLAC (2001), OptimFROG (2002) and ALAC (2004). OptimFROG Dualstream is a feature of OptimFROG codec serving a similar purpose to WavPack Hybrid, however it was introduced much later. Apparently there are some devices with WavPack Hardware Support. References: WavPack Compression Techniques WavPack Technical Document Old WavPack File Format Specification Hydrogenaudio Wiki Multimedia.cx Wiki Wikipedia FFmpeg WavPack Source","s":"Notes","u":"/docs/audio/WavPack","h":"#notes","p":304},{"i":322,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! Brotli was released by Google in late 2013, & it is commonly used on the Web for content delivery. It is a core part of the .woff2 Web Open Font Format, allowing web fonts to be smaller when sent to users as part of a website. It is not very common to pass around .tar.br Brotli archives like you would with gzip or xz, so it is perfectly acceptable that such files aren't really compatible anywhere. Brotli is almost universally compatible across the Web, supported by as much as 96% of the World Wide Web's users. Brotli is based on LZ77 & Huffman coding, much like ZIP. It also uses context modeling to allow the use of multiple Huffman trees for the same alphabet in the same block; this essentially means that based on the context of the data being compressed, it can be compressed more efficiently especially if it contains multiple different kinds of data. Brotli was co-authored & partially developed by Jyrki Alakuijala, who also worked on JPEG-XL & the efficient JPEG encoder jpegli. JPEG-XL's metadata information is usually Brotli-compressed.","s":"Brotli","u":"/docs/data/brotli","h":"","p":321},{"i":324,"t":"Codec Wiki - community-maintained wiki for all things encoding.","s":"Contribution Guide","u":"/docs/contribution-guide","h":"","p":323},{"i":326,"t":"By contributing to the Codec Wiki, you are communicating that you have read & agreed to our Terms & Conditions, Privacy Policy, & Code of Conduct. Ensure your understanding of the material you're contributing is sufficient to a point where it is useful to the project. It is perfectly acceptable not to get everything right the first time, but always double-check your contributions for factual correctness. Our current priority is filling out the existing pages with content. Please assist in doing this, if possible, before considering adding new pages. If you're unsure the content in your entry is completely correct or you believe your entry needs review, please attach the following message at the top of your entry: Pending Review The content in this entry may not be entirely accurate, & is pending further review to assess the quality of the information. If you're aware your entry is too short or incomplete, please add the following message to the top of your entry: Under Maintenance The content in this entry is incomplete & is in the process of being completed. If you've added a new page & you aren't sure what should go there (this isn't recommended while there are still so many empty pages to be filled), add the following message as your page entry: Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor!","s":"Before You Contribute","u":"/docs/contribution-guide","h":"#before-you-contribute","p":323},{"i":328,"t":"If you'd like to join the \"AV1 for Dummies\" Discord server to communicate with other passionate contributors helping this project, please join using the widget below: Alternatively, we have a (soon to be) bridged Revolt server linked right here. Revolt is an open-source Discord alternative, which you can read more about on this page.","s":"Connect With Us","u":"/docs/contribution-guide","h":"#connect-with-us","p":323},{"i":330,"t":"Make sure to clone from & edit the main branch only, & push your final changes to the deployment branch according to the instructions below. Also be sure to use node 18 LTS, as later versions tend to be troublesome. don't forget to add unimportant files to the .gitignore before making any commits Clone from the main branch to start to make a contribution: % git clone git@github.com:av1-community-contributors/av1-wiki.github.io.git -b main Test your changes locally before making a commit: % yarn % yarn start Push changes to main branch: % git add . % git commit -m \"Commit Message\" % git push -u origin main Deploy to deployment branch to make live on site: % GIT_USER= DEPLOYMENT_BRANCH=deployment yarn deploy Docusaurus Info","s":"Clone & Push Instructions","u":"/docs/contribution-guide","h":"#clone--push-instructions","p":323},{"i":332,"t":"This website is built using Docusaurus 3, a modern static website generator.","s":"Website","u":"/docs/contribution-guide","h":"#website","p":323},{"i":334,"t":"$ yarn","s":"Installation","u":"/docs/contribution-guide","h":"#installation","p":323},{"i":336,"t":"$ yarn start This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.","s":"Local Development","u":"/docs/contribution-guide","h":"#local-development","p":323},{"i":338,"t":"$ yarn build This command generates static content into the build directory and can be served using any static contents hosting service.","s":"Build","u":"/docs/contribution-guide","h":"#build","p":323},{"i":340,"t":"See the initial instructions at the top.","s":"Deployment","u":"/docs/contribution-guide","h":"#deployment","p":323},{"i":342,"t":"To represent color values, a format is agreed upon. Color formats are made up of three things, the color model--which includes the order of the components and sometimes chroma subsampling-- the bit depth, and whether it is a packed or a planar format. In some cases, endianness may be important.","s":"Color Formats","u":"/docs/colorimetry/format","h":"","p":341},{"i":344,"t":"A color model is a method of representing colors in a video or image using data. Different color models store color and brightness information in different ways. There are many different color models, but this section will cover the models most commonly used for images and video.","s":"Color Models","u":"/docs/colorimetry/format","h":"#color-models","p":341},{"i":346,"t":"RGB is probably the most well-known color model, and is primarily used in image encoding. RGB consists of three color channels, Red, Green, and Blue, which are then combined to determine the final color of each pixel. Typically, RGB is the final model that a monitor or TV will use to display images, because the pixels on a screen are made up of red, green, and blue LEDs, although it is not commonly used for video encoding because other models can provide better compression.","s":"RGB","u":"/docs/colorimetry/format","h":"#rgb","p":341},{"i":348,"t":"YUV, also known as YCbCr, is the most widely used color model for video encoding. It consists of three components: Y aka Luma, which represents luminance or brightness, and two chroma planes, which represent color. Generally a video player will have to convert a YUV video into RGB before it can be rendered, but there are significant compression benefits to using YUV over RGB for video. The most notable reason to use YCbCr is an optimization called chroma subsampling. This means that the chroma components can be encoded at a lower resolution than the luma components, which results in a smaller output file. You can read more about chroma subsampling further below.","s":"YUV","u":"/docs/colorimetry/format","h":"#yuv","p":341},{"i":350,"t":"The order in which the components in a color model are arranged is simply represented by writing them out. For example, RGB for red first, then green, then blue, or BGR for blue, green, red.","s":"Component order","u":"/docs/colorimetry/format","h":"#component-order","p":341},{"i":352,"t":"A bit depth is how many bits are available to store the sample value. There are two main ways to specify the bit depth in a : bits per component. Here, RGB888 reads as RGB color model, with 8 bits for the red component, 8 bits for the green component, and 8 bits for the blue component and RGB565 reads as RGB color model, with 5 bits for the red component, 6 bits for the green component, and 5 bits for the blue component. bits per sample. Here, RGB24 reads as RGB color model, with 24 bits in total for the red, green, and blue components. This is ambiguous, because one does not know exactly how many bits are allocated to each component. RGB565, RGB556, and RGB655 (even though the latter ones do not make much sense as the eye is most sensitive to green light) all become RGB16.","s":"Bit depth","u":"/docs/colorimetry/format","h":"#bit-depth","p":341},{"i":354,"t":"Components can be stored either packed, where all components are interleaved (here, RGB): Sample number: 1 2 3 4 5 Data: RGB RGB RGB RGB RGB or stored separately for each component: Sample number: 1 2 3 4 5 Data: R R R R R... Data: G G G G G... Data: B B B B B... In planar formats, many operations can be easier to implement, as it is possible to implement the algorithm once and then operate on all planes. On the other hand, packed formats are simpler and often used in hardware.1","s":"Packed vs planar","u":"/docs/colorimetry/format","h":"#packed-vs-planar","p":341},{"i":356,"t":"Different computer architectures store numbers differently. For more information, visit the Wikipedia article on endianness. There are two main ways to store numbers with more than 8 bits (1 is the least significant byte and 4 is the most significant byte, here 4 bytes): Most significant byte first, little endian, 4321. This is what x86-family processors use. Least significant byte first, big endian, 1234. This is what PowerPC-family processors use. This can be important for color formats, as some computers might store it in their native endianness. VapourSynth doesn't seem to care about endianness, but FFmpeg does. For example, RGB565 might store its two bytes in 12 or 21 order, and if they are read in the wrong order, it will produce garbage.","s":"Endianness","u":"/docs/colorimetry/format","h":"#endianness","p":341},{"i":358,"t":"In Y'CbCr signals, there are three widely used variants of chroma subsampling: 4:2:0 which has half the vertical and horizontal chroma resolution 4:2:2 which has half the horizontal chroma resolution but full vertical resolution 4:4:4 which has full chroma resolution (no subsampling) 4:2:2 is not particularly useful over the other options, so this guide will focus on 4:2:0 and 4:4:4. 4:2:0 is the most commonly used format for videos. Nearly every DVD, blu-ray, camera recording, etc. uses 4:2:0 subsampling. This is because, in the majority of cases, human eyes do not notice the reduction in chroma resolution. There is very little benefit to using 4:4:4 in the average case. However, there are some exceptions. The most notable is screen recordings. Things like text overlays, video game UI overlays, etc. have very fine, color-dependent detail that can be destroyed by chroma subsampling and result in an aliased look to the video. Therefore, it is recommended to use 4:4:4 subsampling when recording your screen, and 4:2:0 subsampling in most other cases.","s":"Chroma subsampling","u":"/docs/colorimetry/format","h":"#chroma-subsampling","p":341},{"i":360,"t":"VS name FFmpeg name Meaning GRAY8 gray8 Brightness only, 8 bits, packed GRAY16 gray16le, gray16be (the suffix specifies the endianness) Brightness only, 16 bits RGB888 rgb24 red, green, blue, 8 bits per component YUV420P8 yuv420p luma, chroma blue, chroma red, 8 bits per component, planar, 4:2:0 subsampling YUV422P8 yuv422p luma, chroma blue, chroma red, 8 bits per component, planar, 4:2:2 subsampling YUV444P8 yuv444p luma, chroma blue, chroma red, 8 bits per component, planar, no subsampling YUV420P10 yuv420p10le, yuv420p10le luma, chroma blue, chroma red, 10 bits per component, planar, 4:2:0 subsampling YUV422P10 yuv422p10le, yuv422p10le luma, chroma blue, chroma red, 10 bits per component, planar, 4:2:2 subsampling YUV444P10 yuv444p10le, yuv444p10le luma, chroma blue, chroma red, 10 bits per component, planar, no subsampling","s":"Common formats","u":"/docs/colorimetry/format","h":"#common-formats","p":341},{"i":362,"t":"Footnotesβ YUV - VideoLAN Wiki β©","s":"References","u":"/docs/colorimetry/format","h":"#references","p":341},{"i":364,"t":"YUV - VideoLAN Wiki β©","s":"Footnotes","u":"/docs/colorimetry/format","h":"#footnote-label","p":341},{"i":366,"t":"Gzip is a DEFLATE implementation for use with individual files. It is popular on Unix-like systems such as Linux & macOS, and is often seen paired with tar to create .tar.gz archives. Formats like ZIP & PNG also use Deflate to different effects.","s":"gzip","u":"/docs/data/gzip","h":"","p":365},{"i":368,"t":"While ZIP is a multi-file format that can compress multiple files into a single compressed file, Gzip is a single-file format that compresses a single file into a single compressed file. Both use DEFLATE for compression. ZIP supports encryption, while Gzip does not. ZIP also stores more extensive header information.","s":"Format Breakdown","u":"/docs/data/gzip","h":"#format-breakdown","p":365},{"i":370,"t":"In order to properly understand the gzip format, we must first talk about ZIP. A lot of similar or identical information is covered in our ZIP entry. The ZIP format was developed by Phil Katz as an open format with an open specification, where his implementation PKZIP was shareware. A restricted ZIP format exists and is used in other filetypes such as Java .jar archives, a slew of Microsoft Office file formats, Office Document Format files (.odt, .ods, .odp), and EPUB files for e-readers. In around 1990, Info-ZIP came onto the scene. \"Info-ZIP's purpose is to provide free, portable, high-quality versions of the Zip and UnZip compressor-archiver utilities that are compatible with the DOS-based PKZIP by PKWARE, Inc.\" (https://infozip.sourceforge.net/). They did this successfully, leading to increased adoption of the ZIP format. In the early 1990s the gzip format was developed, derived from the Deflate code in the Info-ZIP utilities. It was designed to replace the Unix compress utility, which used the (at the time) patented LZW compression algorithm which threatened its free use. Though some specific implementations of Deflate were patented by Phil Katz, the format was not, so a Deflate implementation that did not infringe on any patents was written. As a compress replacement, the Unix gzip utility can decompress data that was compressed using compress. Gzip compresses quite a bit better than Unix compress due to its use of DEFLATE, and it has very fast decompression. It also adds a CRC-32 checksum as an integrity check for the archived data. The header format permits the storage of more information than the compress format allowed, such as the original file name & the file modification time. The popular tar utility, which creates an archive of files, has an option to compress directly to the .tar.gz format and is a very popular use caze for gzip. Since the compression of a .tar can take advantage of redundancy across files, ZIP often compresses less effectively than the marriage of tar & gz. .tar.gz is the most common archive format in use on Unix due to its very high portability, but there are better compression methods available. Some of these include XZ, bzip2, brotli, 7-zip, & Zstandard.","s":"History","u":"/docs/data/gzip","h":"#history","p":365},{"i":373,"t":"Chances are, you have gzip already available on your system. You can encode gzip archives using the gzip command. Open a terminal window. Navigate to the directory where you want to create the gzip archive. Use the gzip command followed by the name of the file you want to compress. For example: gzip -7 myfile.txt This will create a compressed file called myfile.txt.gz in the current directory using compression level 7. Compression levels span from 1 through 9 (-1 .. -9; shortcuts are --fast for -1, --best for -9). If you want to compress multiple files at once, you can use the -a option followed by the names of the files you want to compress. For example: gzip -a myfile1.txt myfile2.txt This will create compressed files called myfile1.txt.gz & myfile2.txt.gz in the current directory. If you want to compress a directory and all its contents, you can use the -r option followed by the name of the directory. For example: gzip -r mydirectory/ This will create compressed versions of each file in the specified directory. If you want to encode the gzip archive with a different extension, you can use the -S option followed by the suffix .suf. For example: gzip -S .suf myfile.txt This will create a gzip-compressed file called myfile.txt.suf in the current directory. Also, you can use other options like -v for verbose mode, -f to force overwriting & compress links, -l for listing the files and -d for decompressing the files. You can find more information about the gzip command & its options by running man gzip in a terminal.","s":"Linux & macOS","u":"/docs/data/gzip","h":"#linux--macos","p":365},{"i":375,"t":"To be filled. References: Mark Adler is an American software engineer best known for his work in the field of data compression as the author of the Adler-32 checksum function, and a co-author of the zlib compression library and gzip. He has contributed to Info-ZIP, and has participated in developing the Portable Network Graphics (PNG) image format. Much of this post is based on his writing in this StackOverflow answer","s":"Windows","u":"/docs/data/gzip","h":"#windows","p":365},{"i":377,"t":"Pending Review The content in this entry may not be entirely accurate, & is pending further review to assess the quality of the information. ZIP is an archive file format that supports lossless data compression. A ZIP file may contain one or more files or directories that may have been compressed using any one of a number of different algorithms present in the ZIP specification. The most common algorithm used in ZIP is DEFLATE, which is also used in gzip & PNG. Deflate acts as a combination of LZ77 lossless coding & Huffman coding, where it can first use LZ77 to find patterns in the data & reduce redundancy. This is followed by using Huffman coding to assign smaller bit values to patterns found more frequently by LZ77. Additionally, files in a ZIP archive are compressed individually so it is possible to extract existing files or add new ones without applying compression or decompression to the entire archive. ZIP is noteworthy for its nearly universal compatibility. \"Traditional ZIP\" (compression method 8 in the ZIP specification) limits the size of compressed archives to 4 GB, though most ZIP compressors use Deflate64(tm) (compression level 9 in the ZIP specification) to bypass this limitation. ZIP is competitive with gzip and has been succeeded many times by formats & algorithms such as bzip2, XZ, 7-zip, brotli (to a degree), and Zstandard.","s":"ZIP","u":"/docs/data/zip","h":"","p":376},{"i":379,"t":"DEFLATE is an LZ77-based compressor that finds repeated sequences of bytes in the input data and replaces them with shorter references to previous occurrences. It also uses Huffman coding to encode the symbols with variable-length codes based on how frequently they occur. DEFLATE has two modes for each block of compressed data: These are specified as either \"static\" or \"dynamic\" Huffman compressed blocks. In static mode, the Huffman codes are fixed and predefined. In dynamic mode, the Huffman codes are generated dynamically & transmitted along with the compressed data. ZIP files have a specific structure that consists of four main file header components: local file headers, central directory file headers, end of central directory record, and data descriptors. The local file headers store information about each compressed file, such as its name, size, CRC-32 checksum, compression method, and optional extra fields. The central directory file headers store similar information as the local file headers, but also include the offset of each local file header in the ZIP file. The end of central directory record marks the end of the ZIP file and contains information about the number and size of the central directory file headers. The data descriptors are optional and store additional information about the compressed data, such as its CRC-32 checksum, uncompressed size, & compressed size. ZIP files can also support other compression methods, such as Deflate64(tm), BZIP2, LZMA, & Zstandard. These methods are not widely supported by most ZIP utilities and may cause compatibility issues. ZIP files can also contain uncompressed data. The format also supports encryption to protect the data from unauthorized access. There are two types of encryption supported by ZIP: traditional ZipCrypto encryption and strong encryption. ZipCrypto encryption is considered insecure, while stronger encryption in ZIP uses more resilient algorithms albiet spread across a number of standards. Because of this, strong encryption is not standardized and may cause compatibility issues. ZIP files can reduce the size of files and folders by more efficiently representing redundant data. They can also combine multiple files and folders into a single archive that can be easily transferred or stored. You will not find a more popular implementation than ZIP for general data compression purposes like these. ZIP files can also preserve the metadata of the original files, such as their names, paths, dates, and attributes. However, ZIP files also have some limitations and concerns. For example, traditional ZIP files (that aren't ZIP64) have a maximum size of 4 gigabytes for each compressed file and 65,535 entries for each archive. Most ZIP implementations do not support symbolic links or hard links within the archive. Additionally, ZIP can be encoded in a number of different ways. Apple has a default \"Compress\" option in Finder that compresses selected files into a ZIP file, and many Linux desktops offer GUI functionality for creating ZIP files easily as well. It is common to compress to ZIP on Windows using the 7-zip data compression & decompression utility (not to be confused with the 7-zip compression format, though the two are related).","s":"Format Breakdown","u":"/docs/data/zip","h":"#format-breakdown","p":376},{"i":381,"t":"The ZIP format was developed by Phil Katz as an open format with an open specification, where his implementation, PKZIP, was shareware. A restricted ZIP format exists and is used in other filetypes such as Java .jar archives, a slew of Microsoft Office file formats, Office Document Format files (.odt, .ods, .odp), and EPUB files for e-readers. In around 1990, Info-ZIP came onto the scene. \"Info-ZIP's purpose is to provide free, portable, high-quality versions of the Zip and UnZip compressor-archiver utilities that are compatible with the DOS-based PKZIP by PKWARE, Inc.\" (https://infozip.sourceforge.net/). They did this successfully, leading to increased adoption of the ZIP format. In the early 1990s the gzip format was developed, derived from the Deflate code in the Info-ZIP utilities. It was designed to replace the Unix compress utility, which used the (at the time) patented LZW compression algorithm which threatened its free use. Though some specific implementations of Deflate were patented by Phil Katz, the format was not, so a Deflate implementation that did not infringe on any patents was written. Unlike .tar, .zip has a central directory at the end, which provides a list of the contents. That and the separate compression provides random access to the individual entries in a .zip file. A .tar file would have to be decompressed and scanned from start to end in order to build a directory. The popular tar utility, which creates an archive of files, has an option to compress directly to the .tar.gz format and is a very popular use caze for gzip. Since the compression of a .tar can take advantage of redundancy across files, ZIP often compresses less effectively than the marriage of tar & gz. .tar.gz is the most common archive format in use on Unix due to its very high portability, but there are better compression methods available. Some of these include XZ, bzip2, brotli, 7-zip, & Zstandard. In this case, the benefit of ZIP is that because it compresses files separately and builds a central directory at the end of the archive which provides a list of the contents, ZIP provides random access to the individual entries in a .zip file. A .tar file would have to be decompressed and scanned from start to end in order to build a directory.","s":"History","u":"/docs/data/zip","h":"#history","p":376},{"i":383,"t":"Linux & macOSβ To encode to a ZIP file most efficiently on Linux or macOS, it is worth using the 7-zip implementation of DEFLATE for ZIP compression. The 7zip website's homepage claims: \"For ZIP and GZIP formats, 7-Zip provides a compression ratio that is 2-10 % better than the ratio provided by PKZip and WinZip.\" You can use the highly flexible 7-zip CLI utility through binaries available on 7-zip's Download page. Here are some direct download links: Linux x86_64 | macOS Universal Once you've downloaded the utility, remember whether you are choosing to use the 7zz binary or the static 7zzs binary. Commands run using 7zz should run using 7zzs as well, so please replace 7zz in our examples as appropriate if you choose not to use it. Additionally, please copy your choice of binary to your /usr/local/bin if you want to be able to use it everywhere. To encode a ZIP file at the lowest effort setting using one thread: 7zz a -bso0 -tzip -mmt1 -mx1 \"Output.zip\" \"Input\" To encode a ZIP file at the highest effort setting using eight threads: 7zz a -bso0 -tzip -mmt8 -mx9 \"Output.zip\" \"Input\" Windowsβ To be filled.","s":"Encoding","u":"/docs/data/zip","h":"#encoding","p":376},{"i":385,"t":"The only real benefit of using ZIP over more modern formats currently is compatibility. It may be viable when compared to 7z & XZ due to a reduction in complexity that improves encode & decode speed, but Zstandard is incredibly performant in both of these areas and generally outperforms ZIP. When it comes to content delivery on the Web, Brotli has been adopted across all modern web browsers and offers a better alternative to older compression technologies used on the Web that resemble ZIP.","s":"Conclusion","u":"/docs/data/zip","h":"#conclusion","p":376},{"i":387,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! bzip2 is a open source file compression format and utility. It's efficency is slightly better than zip, but worse than lzma based formats like xz and 7z. bzip2 cannot be used to compress mutliple files at once, you should collate files together into a tarball to compress mutliple files using bzip2.","s":"bzip2","u":"/docs/data/bzip2","h":"","p":386},{"i":389,"t":"Pending Review The content in this entry may not be entirely accurate, & is pending further review to assess the quality of the information. AMF for AMD GPUs allows applications to take advantage of the dedicated video encoding & decoding hardware present in AMD GPUs. The AMD Advanced Media Framework (AMF) is a low-level API developed by AMD that enables developers to leverage hardware-accelerated video encoding & decoding on AMD GPUs. By utilizing specialized hardware on the GPU's media block, video encoding and decoding tasks can be offloaded from the CPU, resulting in drastic speed & efficiency increases. AMF provides multimedia processing functionality to applications, and competes with Nvidia's NVENC & Intel's QSV for similar functionality. AMF provides support for various video codecs, including H.264 , H.265, VP9, and more recently AV1 on the latest supported GPUs. The GPU's encoding capabilities are especially useful for compressing video content in real-time, where speed is of greater importance than coding efficiency. Hardware-accelerated video encoding using AMF usually significantly improves encoding performance at low compression efficiency compared to software-based encoding solutions. It usually allows for higher-quality output at lower bitrates when encoding much faster than real time, such as at 60-200 fps. This is particularly beneficial for applications that require real-time encoding, such as live streaming, video conferencing, and game recording. However, slower software encoding solutions almost always offer improvements in fidelity per bit compared to hardware encoding. For offline re-encoding & storage, software encoding is generally preferred. AMF in particular is not known for having strong compression efficiency, as it is hampered by AMD's comparably weak media blocks which are usually outperformed by other hardware implementations from Nvidia, Intel, & Apple. AMF is designed to integrate seamlessly with popular media frameworks and libraries, such as FFmpeg and GStreamer. These frameworks often include AMF support, allowing developers to easily incorporate hardware-accelerated encoding into their applications without the need for low-level API programming. AMF is compatible with a wide range of AMD GPUs, including both discrete and integrated graphics solutions. It supports various operating systems, including Windows and Linux, making it accessible to developers across different platforms.","s":"AMF","u":"/docs/encoders_hw/amf","h":"","p":388},{"i":391,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! tar, or Tape ARchive, is a archiving format and utility first developed for Version 7 Unix in 1977. It's original purpose was to collate files into one that can be stored on tape. Similarly, today it is used to bring many files together into a \"tarball\", which can be compressed with any general data compression algorithm.","s":"tar","u":"/docs/data/tar","h":"","p":390},{"i":393,"t":"note This guide has been written for GNU tar on linux, however it should be applicable to BSD tar, macOS tar, and the tar command in powershell on Windows.","s":"Usage","u":"/docs/data/tar","h":"#usage","p":390},{"i":395,"t":"tar -cf {archive name} {files listed here} You can use tar to compress your archive, for example into a .tar.gz or .tar.xz archive. To do this, you either can either use a flag such as -z, -j, or -J (gzip, bzip2, xz), or you can use -a ('automatic'), which allows it to intuit what algorithm you want from the file extension, such as archive.tar.xz for an xz compressed tarball. GNU tar can use these compression algorithms gzip (.gz) bzip2 (.bz) xz (.xz) lzip (.lz) lzma (.lzma) lzop (.lzo) zstd (.zstd)","s":"Create a tar archive","u":"/docs/data/tar","h":"#create-a-tar-archive","p":390},{"i":397,"t":"tar -xf {tarball}.tar -C {directory to extract to} tar can extract from it's supported compressed formats, such as archive.tar.xz automatically, with no extra flags.","s":"Extract a tar archive","u":"/docs/data/tar","h":"#extract-a-tar-archive","p":390},{"i":399,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! Zstandard is a compression algorithm developed by Facebook known for its extremely fast decompression speeds. It was released in early 2015 and is used in a variety of different contexts. It was designed to perform similarly to older Deflate-based compression algorithms like ZIP or gzip while being faster overall. In practice, it is said to compress similarly to pure LZMA (part of XZ & 7-zip) while being much faster. While .tar.zstd archives aren't as popular as .tar.xz or .tar.gz, Zstandard is already a very popular tool for compression in the world of open-source software. It has been integrated into both the FreeBSD kernel & the Linux kernel and is available as a filesystem compression method for the btrfs, squashfs, bcachefs, & OpenZFS filesystems. Filesystem compression refers to a compression scheme that transparently compresses files stored on a filesystem at all times, leading to an overall reduction in storage used across the filesystem. The command line zstd utility can compress to Zstandard at compression levels 1 through 19 by default. The upper bound is raised to 22 when passing the --ultra flag. All Arch Linux packages are compressed at zstd level 20, allowing Arch packages to be decompressed 14 times faster compared to XZ at the cost of an average 0.8% filesize increase across all packages. It is popular in the game emulation scene as well, as many game file formats for emulating console games support zstd compression. The ZIP file format standard actually supports Zstandard in compression level 93 since version 6.3.8, published in 2020. Content encoding using zstd is supported in chromium since Chromium 118 behind an experimental flag, meaning it might compete with Brotli on the web in the future. Apple's LZFSE algorithm is purportedly similar to Zstandard compression level 6. Zstandard has the potential to effectively compete with nearly every modern compression method available across most modern use cases. In certain scenarios, if it takes off as a content delivery format, it could replace Brotli if the benefits of super-fast & super-light decode improve the responsiveness of web pages & are worth sacrificing a bit of compression ratio. When using the much higher effort settings, it often outcompetes Brotli for the archive size as well. In the future, .tar.zst could replace 7-zip, ZIP, or other archiving formats, making speedy decode a reality on systems featuring varying levels of compute horsepower.","s":"Zstandard","u":"/docs/data/zstd","h":"","p":398},{"i":401,"t":"note This guide has been written for the zstd command-line utility, however GUI archivers such as peazip and 7zip have growing support for zstd.","s":"Usage","u":"/docs/data/zstd","h":"#usage","p":398},{"i":403,"t":"Like many other compressing utilities, in order to compress mutliple files, one should probably archive them with tar. zstd -# {file} -o {file}.zstd -# is actually a number that represents the desired compression level, for example -3, -6. By default you can specify 1-19. By also passing -ultra, you can go up to compression level 22.","s":"Compress a file","u":"/docs/data/zstd","h":"#compress-a-file","p":398},{"i":405,"t":"zstd -d {file}.zstd -o file","s":"Decompress a file","u":"/docs/data/zstd","h":"#decompress-a-file","p":398},{"i":407,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! XZ is a data compression format and utility based on the Lempel-Ziv-Markov Chain Algorithm (LZMA). The XZ format itself is an improvement on LZMA, allowing for preprocessing filters similar to 7-zip to increase the resulting archive's compression ratio. XZ can only compress one file at a time, so making a tar archive of the files you'd like to compress (if there are multiple) is necessary when using XZ. XZ is more widely supported when compared to other data compression formats, seeing support across iOS, macOS, and many Linux distributions by default. To decompress & compress XZ on Windows, you will likely need the 7-Zip archive utility.","s":"XZ","u":"/docs/data/xz","h":"","p":406},{"i":409,"t":"This usage is for the xz utility on linux, but is applicable to other platforms where xz can be used. It should be noted that xz's default behavior is to delete the original file after it has completed the relevant compression or decompression operation, but this can be stopped with the flag below. An arbitary number of files may be passed to xz and it will individually complete the specified operation on each given file.","s":"Usage","u":"/docs/data/xz","h":"#usage","p":406},{"i":411,"t":"xz {file} This will result in a file named {file}.xz being created in the current working directory. A more advanced variant is listed here: xz -# --extreme -M 800Mib -T 2 -k {file} -# is a number between 0 and 9 specifying speed presets, 0 being the fastest and 9 slowest. --extreme is an option allowing xz to use more time than the standard preset level. -M {size} is an option restricting the memory usage of xz either as a percentage of system memory or an absolute amount. -T {threads} is an option restricting the number of threads used by xz. -k prevents xz from deleting the input file.","s":"Compression","u":"/docs/data/xz","h":"#compression","p":406},{"i":413,"t":"xz -d {file}.xz This decompresses the xz archive to it's original file. -M {size} is an option restricting the memory usage of xz either as a percentage of system memory or an absolute amount. -T {threads} is an option restricting the number of threads used by xz. -k prevents xz from deleting the input file.","s":"Decompression","u":"/docs/data/xz","h":"#decompression","p":406},{"i":415,"t":"Pending Review The content in this entry may not be entirely accurate, & is pending further review to assess the quality of the information. QSV (QuickSync Video) is Intel's hardware video encoding/decoding platform integrated into many of their modern CPUs with integrated graphics processors (iGPUs) & their Arc graphics cards. It allows applications to offload video encoding, decoding, and processing tasks to the dedicated media engines on Intel's dedicated multimedia hardware, often providing significant performance gains compared to CPU-based software encoding. QSV competes with similar frameworks like Nvidia's NVENC & AMD's AMF (Since the transition to Apple Silicon, QSV on Intel Macs competes with Apple's VideoToolBox on macOS devices). The key purpose of QSV is to accelerate video encoding, decoding, and processing workloads by leveraging specialized fixed-function hardware present in Intel's graphics processors. This dedicated hardware is distinct from the general-purpose compute units, and is designed specifically for multimedia tasks. QSV aims to deliver high encoding/decoding performance while operating efficiently. QSV supports a wide range of video codecs including H.264 , H.265, VP9, and more recently AV1 on their latest discrete & integrated GPUs. A major advantage of QSV is that it is ubiquitous on most modern Intel CPUs with integrated graphics, making hardware-accelerated video encoding accessible across a wide range of systems. Applications can easily leverage QSV acceleration through APIs like Intel Media SDK, VA-API, or via integration with popular multimedia frameworks like FFmpeg, GStreamer, & others. Hardware-accelerated video encoding with QSV usually significantly improves encoding performance at low compression efficiency compared to software-based encoding solutions. It usually allows for higher-quality output at lower bitrates when encoding much faster than real time, such as at 60-200 fps. This is particularly beneficial for applications that require real-time encoding, such as live streaming, video conferencing, and game recording. While QSV is not designed for highly efficient offline file encoding, where quality is prioritized over speed. It is worth noting that QSV is almost always better than AMF from AMD & competitive with NVENC from Nvidia in terms of compression efficiency.","s":"QSV","u":"/docs/encoders_hw/qsv","h":"","p":414},{"i":417,"t":"The Android's MediaCodec framework is a part of Android's multimedia framework that provides access to low-level media encoder & decoder components. It is similar to VideoToolbox on Apple devices. Hardware acceleration with MediaCodec is used for processing audio, video, and compressed data. One of the key features of the MediaCodec framework is its support for automatic media transcoding within the operating system. Introduced in Android 12, media transcoding features of the operating system allow devices to use more modern, storage-efficient media formats for video capture while maintaining compatibility with apps. For devices with compatible media transcoding enabled, Android can automatically convert videos recorded in formats such as H.265 when the videos are opened by an app that doesn't support the format. This allows apps to function even when videos are captured in newer formats on the device.","s":"Mediacodec","u":"/docs/encoders_hw/mediacodec","h":"","p":416},{"i":419,"t":"In order to view your device's supported hardware and software encoders exposed by the MediaCodec framework, it is advised to download the open source Codec Info application. Once you know how to properly interact with your device's hardware encoders, FFmpeg will help you transcode videos easily from the command line.","s":"Usage","u":"/docs/encoders_hw/mediacodec","h":"#usage","p":416},{"i":421,"t":"Testing for this piece was done on the Google Pixel 8, which featurs the Tensor G3 SoC. It is Exynos-based, so H.264, H.265 (HEVC), and VP9 hardware acceleration for encoding are provided by the Exynos media block. AV1 encoding and decoding are available on the Tensor G3 provided by a custom Google multimedia block. The Exynos's hardware implementation for encoding H.264 and H.265 does not support CQ (Constant Quality) encoding, so a target bitrate must be provided for either CBR (Constant Bitrate) or VBR (Variable Bitrate) encoding. Google's AV1 implementation is in the same situation. Some example MediaCodec encoding commands with FFmpeg: H.264 encoding (VBR, target bitrate 4000K, 250-frame GOP size) ffmpeg -i input.mkv -c:v h264_mediacodec -codec_name c2.exynos.h264.encoder -bitrate_mode 1 -b:v 4000K -g 250 output.mp4 H.265 encoding (VBR, target bitrate 4000K, 250-frame GOP size) ffmpeg -i input.mkv -c:v hevc_mediacodec -codec_name c2.exynos.hevc.encoder -bitrate_mode 1 -b:v 4000K -g 250 output.mp4 VP9 encoding produces video that is severely distorted relative to the bitrate, and AV1 encoding produces broken files without metadata. VP9 encoding (VBR, target bitrate 9000K, 250-frame GOP size) ffmpeg -i input.mkv -c:v vp9_mediacodec -codec_name c2.exynos.vp9.encoder -bitrate_mode 1 -b:v 9000K -g 250 output.mkv AV1 encoding (VBR, target bitrate 8000K, 250-frame GOP size) ffmpeg -i input.mkv -c:v av1_mediacodec -codec_name c2.google.av1.encoder -bitrate_mode 1 -b:v 8000K -g 250 output.mp4 Just run ffmpeg -help encoder=hevc_mediacodec or ffmpeg -help encoder=h264_mediacodec for more info on how to use your MediaCodec encoders. You can choose a value for -codec_name based on what is shown in the Codec Info app. Sources (1) MediaCodec | Android Developers. https://developer.android.com/reference/android/media/MediaCodec. (2) Media | Android Open Source Project. https://source.android.com/docs/core/media.","s":"FFmpeg","u":"/docs/encoders_hw/mediacodec","h":"#ffmpeg","p":416},{"i":423,"t":"Pending Review The content in this entry may not be entirely accurate, & is pending further review to assess the quality of the information. NVENC for NVIDIA GPUs is a dedicated hardware video encoding engine integrated into NVIDIA's graphics processors. It allows applications to leverage dedicated multimedia encoding hardware to accelerate video encoding tasks, significantly improving performance when compared to CPU-based software encoding. It competes with similar frameworks like Intel's QSV & AMD's AMF. The primary purpose of NVENC is to offload the computationally intensive video encoding workloads from the CPU to the dedicated multimedia hardware on the GPU, thereby freeing up CPU resources for other tasks. This is particularly beneficial in scenarios where fast video encoding is required, such as screen recording, streaming, & video conferencing. NVENC supports a range of popular video codecs, including H.264 , H.265, VP9, and more recently AV1 on their latest GPUs. It provides hardware-accelerated encoding capabilities for these codecs, typically achieving real-time or faster than real-time encoding performance, depending on resolution, bitrate, and hardware capability. While NVENC excels in encoding speed, it generally sacrifices some compression efficiency compared to modern high-quality CPU-based software encoders at slower presets. NVENC is designed to be easily integrated into various multimedia frameworks and applications. It is supported by popular tools like FFmpeg, OBS Studio, and others, allowing developers to seamlessly leverage GPU-accelerated encoding without the need for low-level programming. When compared to AMD's AMF and Intel's QSV, NVENC is known for its high encoding performance, low latency, and broad compatibility with NVIDIA GPUs across different platforms. It is particularly popular among game streamers, content creators, and video professionals who require fast encoding speeds for their workflows. In terms of video compression efficiency, NVENC & QSV trade blows while AMF is generally left behind.","s":"NVENC","u":"/docs/encoders_hw/nvenc","h":"","p":422},{"i":425,"t":"Aurora1 AV1 is a proprietary and paid software AV1 encoder developed by Visionular. Although they do provide a contact form to get a free trial, not much is known about this encoder other than cherry-picked claims and proof provided by the company themselves that it is supposedly \"better\" than public, FOSS encoders.","s":"Aurora1 AV1","u":"/docs/encoders/Aurora1","h":"","p":424},{"i":427,"t":"Under Maintenance The content in this entry is incomplete & is in the process of being completed. Community Fork This entry is about a fork of aomenc called aom-psy101. If you'd like to learn about the mainline aomenc encoder before reading, visit our aomenc wiki entry. Mainline aomenc is unfortunately not perfect. It suffers from bad defaults, a heavy focus on the perceptually flawed PSNR metric, misleading settings, and other issues. Fortunately, there are a couple of forks developed by the encoding community that were created to combat aomenc's underlying issues. aom-av1-psy No longer maintained as of 13th January 2023 aom-av1-lavish No longer maintained as of 4th June 2024 aom-psy101 aom-av1ador These forks fix up the poor decisions made by the original AOM devs and most importantly introduce new parameters and tunes to help fine-tune the encoder even more. aom-psy101 is a fork of aomenc that aims to improve the encoding quality and speed of AV1. It is developed by damian101, a talented AV1 community developer.","s":"aom-psy101","u":"/docs/encoders/aom-psy101","h":"","p":426},{"i":429,"t":"aomenc is available in FFmpeg via libaom-av1, check if you have it by running ffmpeg -h encoder=libaom-av1. You can input non-FFmpeg standard aomenc parameters via -aom-params. Mainline aomenc Unless you compile FFmpeg yourself with aom-psy101, you will be using the mainline aomenc. Compiling from source yourself with the aomenc libraries provided by aom-psy101 is the only way to use it with FFmpeg.","s":"FFmpeg","u":"/docs/encoders/aom-psy101","h":"#ffmpeg","p":426},{"i":431,"t":"Linux & macOS Windows Clone the psy101 repo: Clone the psy101 repo git clone https://gitlab.com/damian101/aom-psy101 cd aom-psy101 && mkdir aom_build && cd aom_build Configure compilation. The following flags are set to ensure the aomenc binary is build for optimal performance: Set CMake flags cmake .. -DBUILD_SHARED_LIBS=0 -DENABLE_DOCS=0 -DCONFIG_TUNE_BUTTERAUGLI=0 -DCONFIG_TUNE_VMAF=0 -DCONFIG_AV1_DECODER=0 -DENABLE_TESTS=0 -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=\"-flto -pipe -march=native\" -DCMAKE_C_FLAGS=\"-flto -pipe -march=native\" Compile: Compile make -j$(nproc) Install to your system. This may require elevated privileges: Install make install MSYS2 is the best option for building in Windows, as it provides a Unix-like environment for compilation. Make sure you have downloaded & installed MSYS2 from the MSYS2 website before beginning the build process. Close any MSYS2 Console that you have open, start the Clang64 console & install the required dependencies: pacman -S git perl mingw-w64-clang-x86_64-clang mingw-w64-clang-x86_64-ninja mingw-w64-clang-x86_64-cmake mingw-w64-clang-x86_64-nasm Clone the psy101 repo: Clone the psy101 repo git clone https://gitlab.com/damian101/aom-psy101 cd aom-psy101 && mkdir aom_build && cd aom_build Configure compilation. The following flags are set to ensure the aomenc binary is build for optimal performance: Set CMake flags LDFLAGS=-static cmake .. -DBUILD_SHARED_LIBS=0 -DENABLE_DOCS=0 -DCONFIG_TUNE_BUTTERAUGLI=0 -DCONFIG_TUNE_VMAF=0 -DCONFIG_AV1_DECODER=0 -DENABLE_TESTS=0 -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=\"-flto -pipe -march=native\" -DCMAKE_C_FLAGS=\"-flto -pipe -march=native\" Compile: Compile ninja The resulting binary will be available within the home folder of the location where you installed MSYS2 (usually C:). Navigate there, and then to aom-psy101\\aom_build folder; the binary should be there.","s":"Installation","u":"/docs/encoders/aom-psy101","h":"#installation","p":426},{"i":433,"t":"AVM (AOM Video Model) is the reference software for next codec from Alliance for Open Media. AVM, or AOM Video Model is the reference implementation for a future codec from the Alliance for Open Media, the organization behind AV1. The codebase is under the Clear BSD license and currently only produces av01 bitstreams. The AVM codec is currently in development and is not yet ready for production use. Not much has been documented or tested.","s":"AVM","u":"/docs/encoders/AVM","h":"","p":432},{"i":435,"t":"Some things about the new encoding implementation can be confirmed via the codebase, but none of those changes are final until the codec is standardized and officially released. Some rumors about the codec include: The name of the codec is going to be AV2, superseding AV1 The codec will be based on AV1, with certain backwards compatibility features available Hardware decoding implementations could be implemented at no cost (no royalties) by utilizing GPU shaders and existing AV1 decoding hardware. AOM's strategy will be to release codecs \"mid-cycle\" relative to ISO/ITU's release schedule, meaning it is likely that \"AV2\" will compete with VVC, not ECM. A quantizer scale of 0-255 will be standard. AVM tries to address some issues with high-fidelity AV1 encoding by introducing a better denoiser to mitigate mosquito noise.","s":"Rumors","u":"/docs/encoders/AVM","h":"#rumors","p":432},{"i":438,"t":"AVM is available in the Arch User Repository (AUR) as avm and avm-git.","s":"Arch Linux","u":"/docs/encoders/AVM","h":"#arch-linux","p":432},{"i":440,"t":"Since this encoder is under heavy development, there are no pre-built binaries provided, so you will need to compile yourself. Windows users are recommended to compile via MinGW-W64 which comes with MSYS2. caution Compilation requires CMake, Nasm, and Perl. git clone https://gitlab.com/AOMediaCodec/avm.git cd avm/build cmake .. -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=0 make -j$(nproc) Since this is a huge project, compiling will take a while depending on your CPU. The resulting binary will be called aomenc, the same name that encodes content to AV1. It will be available in the same folder (build), or you can run make install on Linux to install (May need elevated permissions).","s":"Compiling","u":"/docs/encoders/AVM","h":"#compiling","p":432},{"i":442,"t":"tip To convert cq-level in aomenc and crf in SVT-AV1 to AVM's QP values, multiply by 4. For example, --cq-level 20 equals to --quantizer 60. Simple Y4M input with QP 65, and ivf output: aomenc --qp=65 -o output.ivf input.y4m Preset level 6 (higher is faster), QP 65, Y4M input: aomenc --qp=65 --cpu-used=6 -o output.ivf input.y4m FFmpeg piping: ffmpeg -v error -i input.mkv -f yuv4mpegpipe -strict -1 - | aomenc --qp=65 --cpu-used=6 -o output.ivf -","s":"Usage","u":"/docs/encoders/AVM","h":"#usage","p":432},{"i":444,"t":"You will need the aomdec binary you also compiled to be able to play your encoded video, as there are zero video players currently in the whole world that can play your encoded content.","s":"aomdec","u":"/docs/encoders/AVM","h":"#aomdec","p":432},{"i":446,"t":"Community Fork This entry is about a fork of aomenc called aom-psy101. If you'd like to learn about the mainline aomenc encoder before reading, visit our aomenc wiki entry. Mainline aomenc is unfortunately not perfect. It suffers from bad defaults, a heavy focus on the perceptually flawed PSNR metric, misleading settings, and other issues. Fortunately, there are a couple of forks developed by the encoding community that were created to combat aomenc's underlying issues. No Longer Maintained aom-av1-lavish is no longer maintained as of 4th June 2024. The information in this entry is unaffected by this, but the fact that the project is no longer maintained should be taken into consideration as a user. aom-av1-psy No longer maintained as of 13th January 2023 aom-av1-lavish No longer maintained as of 4th June 2024 aom-psy101 aom-av1ador These forks fix up the poor decisions made by the original AOM devs and most importantly introduce new parameters and tunes to help fine-tune the encoder even more. aom-av1-lavish is a fork of aomenc that aims to improve the encoding quality and speed of AV1. It is developed by Clybius, a talented AV1 community developer also well-known for working on SVT-AV1-PSY.","s":"aom-av1-lavish","u":"/docs/encoders/aom-av1-lavish","h":"","p":445},{"i":448,"t":"aomenc is available in FFmpeg via libaom-av1, check if you have it by running ffmpeg -h encoder=libaom-av1. You can input non-FFmpeg standard aomenc parameters via -aom-params. Mainline aomenc Unless you compile FFmpeg yourself with aom-av1-lavish, you will be using the mainline aomenc. Compiling from source yourself with the aomenc libraries provided by aom-av1-lavish is the only way to use it with FFmpeg.","s":"FFmpeg","u":"/docs/encoders/aom-av1-lavish","h":"#ffmpeg","p":445},{"i":450,"t":"Linux macOS Windows A precompiled AVX2-optimized binary of aom-av1-lavish can be installed for x86_64 Linux via rAV1ator CLI. However, it is always recommended to build from source. But if you want to compile the community forks, you can also do that. CMake, Perl, GNU Make, and nasm (assuming x64, if x86 use yasm) will be needed for compilation. Clone the aom-av1-lavish repo Endless_Merging branch, cd and create build folder git clone https://github.com/Clybius/aom-av1-lavish -b Endless_Merging cd aom-av1-lavish && mkdir -p aom_build && cd aom_build CMake configuration cmake .. -DBUILD_SHARED_LIBS=0 -DENABLE_DOCS=0 -DCONFIG_TUNE_BUTTERAUGLI=0 -DCONFIG_TUNE_VMAF=0 -DCONFIG_AV1_DECODER=0 -DENABLE_TESTS=0 -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=\"-flto -O3 -march=native\" -DCMAKE_C_FLAGS=\"-flto -O3 -march=native -pipe -fno-plt\" -DCMAKE_LD_FLAGS=\"-flto -O3 -march=native\" The CMake config above will statically build aomenc while disabling docs (which requires Doxygen), extra tunes, tests, and decoders. While also applying native CPU optimizations to help speed up the encoder. Compile the encoder make -j$(nproc) The resulting binary will be in the same folder you are on (aom_build). Or, optionally, you can install it to your system, which may need elevated permissions. make install macOS is very similar to Linux. Note that some commands may have to be run with sudo, which I won't explicitly include for security reasons. Homebrew Installing the Homebrew package manager is a well documented process at this point: /bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\" Installing mainline libaom is as simple as running the following: brew update && brew upgrade brew install aom FFmpeg can also be installed via brew. Building From Source If you want aom-av1-lavish instead of mainline, you'll have to compile from source. Things are very similar to Linux, with a few oddities: macOS sometimes doesn't have a /usr/local/bin by default. You can fix this by doing mkdir /usr/local/bin. Homebrew installs everything in its own directory structure. If you're building things from source that rely on libraries from libvmaf, libjxl, etc, make sure to copy them from /opt/homebrew/lib to /usr/local/lib. Finding them is a matter of ls | grep \"keyword\" & copying what looks reasonable to be associated with the tool you're using. Building most things from source will have instructions for *nix which work for both macOS & Linux. Even if it says Linux, there's a good chance it'll work on macOS as well, & it is always worth trying Linux build instructions on Mac. aom-av1-lavish requires some additional steps, though. If you want to make the most out of your hardware & eke out every last drop of quality, it may be worth building aom-av1-lavish from source. The first step is to clone it from the Endless Merging branch, which contains all of the latest lavish improvements: git clone https://github.com/Clybius/aom-av1-lavish -b Endless_Merging cd aom-av1-lavish Now, you need to make some manual changes to the source code until this commit is merged to fix build errors. Add the line #include \"aq_variance.h\" at line 19 in av1/encoder/encodeframe_utils.c Comment out line 2546 in av1/encoder/speed_features.c. This line is const int qindex_thresh_cdef_sf_s1_s3_l2[2] = { 92, 48 }; & becomes // const int qindex_thresh_cdef_sf_s1_s3_l2[2] = { 92, 48 };. Now you can continue to build according to the Linux instructions below. Obviously you'll need cmake, which you can install with homebrew along with any other tools. While still in the aom-av1-lavish directory: mkdir -p aom_build && cd aom_build cmake .. -DBUILD_SHARED_LIBS=0 -DENABLE_DOCS=0 -DCONFIG_TUNE_BUTTERAUGLI=0 -DCONFIG_TUNE_VMAF=0 -DCONFIG_AV1_DECODER=0 -DENABLE_TESTS=0 -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=\"-flto -O3 -march=native\" -DCMAKE_C_FLAGS=\"-flto -O3 -march=native -pipe -fno-plt\" -DCMAKE_LD_FLAGS=\"-flto -O3 -march=native\" make -j$(nproc) # This may need to be run as root. If it doesn't work properly, you can always copy the binary into /usr/local/bin manually: make install Now you can run aomenc --help | grep \"AOMedia\" -C 3 to see if lavish installed. If you're getting the same output as above, you may need to copy the aomenc executable to /opt/local/bin, /usr/local/bin, & /opt/homebrew/bin if you already installed mainline aomenc. Running the version info command again, the correct output should look something like this: % aomenc --help | grep AOMedia -C 3 Included encoders: av1 - AOMedia Project AV1 Encoder Psy v3.6.0 (default) Use --codec to switch to a non-default encoder. Notice how it says AOMedia Project AV1 Encoder Psy instead of AOMedia Project AV1 Encoder. You should be all set after this to start using aom-av1-lavish. You can download the pre-built versions, which can be found below (Current as of Sept 6, 2023): https://autumn.revolt.chat/attachments/download/-2EiZW1edcT9anApFZ1PJBEber-pJ6z02NiQBjbr28 Join the AV1 Discord server and head to #community-builds for updated versions, you can opt to compile it yourself with the instructions below. The Compiling Route: Full credits to u/Turbulent-Bend-7416 on Reddit for this post on how to compile aomenc. This guide requires MSYS2, specifically MinGW-W64. Install it if you haven't yet. First, install the required dependencies: pacman -S cmake git perl yasm nasm python3 doxygen mingw-w64-x86_64-gcc mingw-w64-x86_64-cmake base-devel Now, clone the aom-av1-lavish repo in the Endless_Merging branch and create the folders: git clone https://github.com/Clybius/aom-av1-lavish -b Endless_Merging cd aom-av1-lavish && mkdir -p aom_build && cd aom_build Then we can start compiling with some build optimizations for your CPU: cmake .. -DBUILD_SHARED_LIBS=0 -DENABLE_DOCS=0 -DCONFIG_TUNE_BUTTERAUGLI=0 -DCONFIG_TUNE_VMAF=0 -DCONFIG_AV1_DECODER=0 -DENABLE_TESTS=0 -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=\"-flto -O3 -march=native\" -DCMAKE_C_FLAGS=\"-flto -O3 -march=native -pipe -fno-plt\" -DCMAKE_LD_FLAGS=\"-flto -O3 -march=native\" make -j$(nproc) The resulting binary will be available within your home folder of the location where you installed MSYS2 (usually C:), navigate there and the to the aom-av1-lavish folder and it should be there. Built files should be in the \"Debug\" folder Don't share binaries compiled with native CPU optimizations unless the person you're sharing to has the same CPU architecture, as this will lead to missing instructions being used and slowing down encode speeds.","s":"Installation","u":"/docs/encoders/aom-av1-lavish","h":"#installation","p":445},{"i":453,"t":"info The way aomenc was developed requires 2-pass to take full advantage of its efficiency which include better rate controls and encoding features. So always use 2 passes when encoding. Simple Y4M input with CQ 22, 1 pass, and raw ivf bitstream output aomenc --end-usage=q --cq-level=32 --bit-depth=10 --passes=1 --ivf -o output.ivf input.y4m Pipe from FFmpeg ffmpeg -v error -i input.mkv -f yuv4mpegpipe -strict -1 - | aomenc - --end-usage=q --cq-level=32 --bit-depth=10 --passes=1 --ivf -o output.ivf Pipe from FFmpeg, 2-pass, pass 1 ffmpeg -v error -i input.mkv -f yuv4mpegpipe -strict -1 - | aomenc - --end-usage=q --cq-level=32 --bit-depth=10 --passes=2 --pass=1 --fpf-log=aom-pass.log --ivf -o output.ivf Pipe from FFmpeg, 2-pass, pass 2 ffmpeg -v error -i input.mkv -f yuv4mpegpipe -strict -1 - | aomenc - --end-usage=q --cq-level=32 --bit-depth=10 --passes=2 --pass=2 --fpf-log=aom-pass.log --ivf -o output.ivf","s":"AV1 Encoding","u":"/docs/encoders/aom-av1-lavish","h":"#av1-encoding","p":445},{"i":455,"t":"Using aomenc through avifenc is widely considered to be the best way to encode AVIF images, as SVT-AV1 only supports 4:2:0 chroma subsampling, rav1e isn't fast enough for still images, & the libaom team have put more effort into intra coding than the teams responsible for producing the other prominent open source AV1 encoders. A sample command for encoding AVIF looks like this: avifenc -c aom -s 4 -j 8 -d 10 -y 444 --min 1 --max 63 -a end-usage=q -a cq-level=16 -a tune=ssim [input] output.avif Where: -c aom is the encoder -s 4 is the speed. Speeds 4 & below offer the best compression quality at the expense of longer encode times. -j 8 is the number of threads the encoder is allowed to use. Increasing this past 12 will sometimes hurt encode times, as AVIF encoding via aomenc doesn't parallelize perfectly. Test using a speed benchmark to verify which value works best for you. -d 10 is the bit depth. Specifying a value below 10 isn't recommended, as it will hurt coding efficiency even with an 8-bit source image. -y 444 is the chroma subsampling mode. 4:4:4 chroma subsampling tends to provide better compression than 4:2:0 with AVIF, though on some images 4:2:0 chroma subsampling might be the better choice. cq-level=16 is how you specify quality. Lower values correspond to higher quality & filesize, while higher values mean a smaller, lower-quality output is desired. This is preceded by -a because it is an aomenc option, not an avifenc one. tune=ssim is how the encoder handles RDO (rate-distortion optimization). This may be redundant with the default aomenc parameters, but specifying doesn't hurt to avoid an unintended change if a default is modified sometime in the future.","s":"AVIF Encoding","u":"/docs/encoders/aom-av1-lavish","h":"#avif-encoding","p":445},{"i":457,"t":"aomenc unfortunately lacks the ability to take advantage of multiple threads, so therefore a tool like Av1an will be needed for parallelization. The parameters shown will be biased towards Av1an and aom-av1-lavish usage, so if you plan on using standalone aomenc then adjust as needed. Here are some recommended parameters: --bit-depth=10 --cpu-used=4 --end-usage=q --cq-level=24 --threads=2 --tile-columns=0 --tile-rows=0 --lag-in-frames=64 --tune-content=psy --tune=ssim --enable-keyframe-filtering=1 --disable-kf --kf-max-dist=9999 --enable-qm=1 --deltaq-mode=0 --aq-mode=0 --quant-b-adapt=1 --enable-fwd-kf=0 --arnr-strength=1 --sb-size=dynamic --enable-dnl-denoising=0 --denoise-noise-level=8 Now let's break it down. --bit-depth=10 We're using 10bit because weird linear algebra allows the video to become smaller and reduces banding. --cpu-used=4 This is the preset which ranges from 0-9, you can go to 3 if you want more efficiency, 2 if you have a lot of time, 4 is the sweet spot, and 6 if you want speed. Don't go above 6 (Worst efficiency) or even 0 (It would take WEEKS to finish). --end-usage=q --cq-level=24 This specifies that we are going to use a knockoff version of CRF level similar to x264/x265 encoders, in this case CRF 24. --tile-columns=0 --tile-rows=0 This is the tiles options, where the encoder splits the videos into tiles to encode faster. See the image below (Yellow lines): Tile usage Do NOT use tiles for 1080p and below, use 1 tile-columns at 1440p (2K), 2 tile-columns and 1 tile-rows for 2160p (4K). If you would like an easy way to calculate the necessary number of tiles for your video, you can use the AV1 Encoding Calculator online or run this local tile calculator. --lag-in-frames=64 Similar to x264/x265 rc-lookahead. Sets a number of frames to look ahead for frametype and ratecontrol, allowing for better compression decision making. Setting to a value greater than 64 is generally not considered useful. --aq-mode=0 adaptive quantization mode, a mostly debatable area nowadays. 0 is better most of the time but some say 1 is also good. --tune-content=psy --tune=ssim As the name suggests they are tunes that affect the video output, for the better, and for the worst. info Do not use tune-content=psy if you encode live action above cq-level=30. info If you use any of the VMAF tunes, you need to specify --vmaf-model-path= to where you put VMAF models in. --enable-keyframe-filtering=1 We're setting it to 1 because of compatibility reasons, 2 is more efficient but there are seeking issues and FFmpeg can't input it. --sb-size=dynamic Allows the encoder to use 128x128 block partitioning besides 64x64 which gives an efficiency boost. --deltaq-mode=0 set to 0 b its better --arnr-strength=1 Controls how strong the filtering (smoothing) will be, always been a hot topic. Most agree on the default of 4. Others think 1 is good for 3D Pixar CGI-like and 2D animation and 4 for live action content, and a higher value for lower bitrate encodes. --disable-kf --enable-fwd-kf=0 We're disabling keyframes cause Av1an already did scene detection, so we wont have to. Plus it speeds things up. --kf-max-dist=9999 Maximum keyframe interval, we're setting it at the highest possible value since Av1an's scene detection keyframe interval is already 240 by default --enable-chroma-deltaq=1 --enable-qm=1 --quant-b-adapt=1`` Parameters that give you free efficiency boost, ignore it. --enable-dnl-denoising=0 Disables the encoder's built-in denoising technique when grain synthesis is enabled, you can optionally set it to 1 when you have a pretty noisy video since it works quite well (NLMeans is the denoiser used). --denoise-noise-level=8 AV1 grain synthesis, which is a technique where the encoder puts fake grain in so it looks more natural and potentially hiding video artifacts (cause grain is hard to encode and explodes bitrate usage because of their randomness). Don't attempt to use it at high values (>12) since it creates noticeable grain patterns. info You can use photon noise tables as an alternative via --film-grain-table, which is also conveniently available in Av1an as --photon-noise=X","s":"Recommendations","u":"/docs/encoders/aom-av1-lavish","h":"#recommendations","p":445},{"i":459,"t":"Use --butteraugli-resize-factor=2 if you use any of the butteraugli-based tunes to speed it up without much losses (lavish, butteraugli) and --butteraugli-intensity-target=250 to match the content light level. Use --arnr-maxframes to set max reference frames that will be used to filter the encode, higher values would make the video blurrier at high fidelity but look better at lower bitrates.","s":"Tips & Tricks","u":"/docs/encoders/aom-av1-lavish","h":"#tips--tricks","p":445},{"i":461,"t":"aomenc, AOM-AV1, or just libaom is a command line application for encoding AV1 written in C and Assembly developed by AOMedia, which is also the reference encoder for AV1.","s":"aomenc","u":"/docs/encoders/aomenc","h":"","p":460},{"i":463,"t":"aomenc is available in FFmpeg via libaom-av1, check if you have it by running ffmpeg -h encoder=libaom-av1. You can input non-FFmpeg standard aomenc parameters via -aom-params.","s":"FFmpeg","u":"/docs/encoders/aomenc","h":"#ffmpeg","p":460},{"i":465,"t":"aomenc supports the following color spaces: Format Chroma Subsampling Supported Bit Depth(s) YUV420P 4:2:0 8-bit YUV422P 4:2:2 8-bit YUV444P 4:4:4 8-bit GBRP - 8-bit GRAY8 - 8-bit YUV420P10LE 4:2:0 10-bit YUV422P10LE 4:2:2 10-bit YUV444P10LE 4:4:4 10-bit GBRP10LE - 10-bit GRAY10LE - 10-bit YUV420P12LE 4:2:0 12-bit YUV422P12LE 4:2:2 12-bit YUV444P12LE 4:4:4 12-bit GBRP12LE - 12-bit GRAY12LE - 12-bit","s":"Supported Color Space","u":"/docs/encoders/aomenc","h":"#supported-color-space","p":460},{"i":467,"t":"Linux & macOS Windows Clone the mainline aom repo: Clone the aom repo git clone https://aomedia.googlesource.com/aom cd aom && mkdir aom_build && cd aom_build Configure compilation. The following flags are set to ensure the aomenc binary is build for optimal performance: Set CMake flags cmake .. -DBUILD_SHARED_LIBS=0 -DENABLE_DOCS=0 -DCONFIG_TUNE_BUTTERAUGLI=0 -DCONFIG_TUNE_VMAF=0 -DCONFIG_AV1_DECODER=0 -DENABLE_TESTS=0 -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=\"-flto -O3 -march=native\" -DCMAKE_C_FLAGS=\"-flto -O3 -march=native -pipe -fno-plt\" -DCMAKE_LD_FLAGS=\"-flto -O3 -march=native\" Compile: Compile make -j$(nproc) Install to your system. This may require elevated privileges: Install make install MSYS2 is the best option for building in Windows, as it provides a Unix-like environment for compilation. Make sure you have downloaded & installed MSYS2 from the MSYS2 website before beginning the build process. Start the UCRT64 console & install the required dependencies: pacman -S cmake git perl yasm nasm python3 doxygen mingw-w64-x86_64-gcc mingw-w64-x86_64-cmake base-devel Clone the mainline aom repo: Clone the aom repo git clone https://aomedia.googlesource.com/aom cd aom && mkdir aom_build && cd aom_build Configure compilation. The following flags are set to ensure the aomenc binary is build for optimal performance: Set CMake flags cmake .. -DBUILD_SHARED_LIBS=0 -DENABLE_DOCS=0 -DCONFIG_TUNE_BUTTERAUGLI=0 -DCONFIG_TUNE_VMAF=0 -DCONFIG_AV1_DECODER=0 -DENABLE_TESTS=0 -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=\"-flto -O3 -march=native\" -DCMAKE_C_FLAGS=\"-flto -O3 -march=native -pipe -fno-plt\" -DCMAKE_LD_FLAGS=\"-flto -O3 -march=native\" Compile: Compile make -j$(nproc) The resulting binary will be available within the home folder of the location where you installed MSYS2 (usually C:). Navigate there, and then to the aom folder; the binary should be there. Built files should be in the \"Debug\" folder. Sharing Native Binaries Avoid sharing binaries compiled with native CPU optimizations unless the person you're sharing to has the same CPU architecture, as this can lead to incorrect encoder behavior.","s":"Installation","u":"/docs/encoders/aomenc","h":"#installation","p":460},{"i":470,"t":"2-Pass Encoding The way aomenc was developed requires 2-pass to take full advantage of its efficiency which include better rate controls and encoding features. So always specify the encoder to use 2 passes when encoding. Simple Y4M input with CQ 22, 1 pass, and raw ivf bitstream output aomenc --end-usage=q --cq-level=32 --bit-depth=10 --passes=1 --ivf -o output.ivf input.y4m Pipe from FFmpeg ffmpeg -v error -i input.mkv -f yuv4mpegpipe -strict -1 - | aomenc - --end-usage=q --cq-level=32 --bit-depth=10 --passes=1 --ivf -o output.ivf Pipe from FFmpeg, 2-pass, pass 1 ffmpeg -v error -i input.mkv -f yuv4mpegpipe -strict -1 - | aomenc - --end-usage=q --cq-level=32 --bit-depth=10 --passes=2 --pass=1 --fpf-log=aom-pass.log --ivf -o output.ivf Pipe from FFmpeg, 2-pass, pass 2 ffmpeg -v error -i input.mkv -f yuv4mpegpipe -strict -1 - | aomenc - --end-usage=q --cq-level=32 --bit-depth=10 --passes=2 --pass=2 --fpf-log=aom-pass.log --ivf -o output.ivf","s":"AV1 Encoding","u":"/docs/encoders/aomenc","h":"#av1-encoding","p":460},{"i":472,"t":"Using aomenc through avifenc is widely considered to be the best way to encode AVIF images, as SVT-AV1 only supports 4:2:0 chroma subsampling, rav1e isn't fast enough for still images, & the libaom team have put more effort into intra coding than the teams responsible for producing the other prominent open source AV1 encoders. A sample command for encoding AVIF looks like this: avifenc -c aom -s 4 -j 8 -d 10 -y 444 --min 1 --max 63 -a end-usage=q -a cq-level=16 -a tune=ssim [input] output.avif Where: -c aom is the encoder -s 4 is the speed. Speeds 4 & below offer the best compression quality at the expense of longer encode times. -j 8 is the number of threads the encoder is allowed to use. Increasing this past 12 will sometimes hurt encode times, as AVIF encoding via aomenc doesn't parallelize perfectly. Test using a speed benchmark to verify which value works best for you. -d 10 is the bit depth. Specifying a value below 10 isn't recommended, as it will hurt coding efficiency even with an 8-bit source image. -y 444 is the chroma subsampling mode. 4:4:4 chroma subsampling tends to provide better compression than 4:2:0 with AVIF, though on some images 4:2:0 chroma subsampling might be the better choice. cq-level=16 is how you specify quality. Lower values correspond to higher quality & filesize, while higher values mean a smaller, lower-quality output is desired. This is preceded by -a because it is an aomenc option, not an avifenc one. tune=ssim is how the encoder handles RDO (rate-distortion optimization). This may be redundant with the default aomenc parameters, but specifying doesn't hurt to avoid an unintended change if a default is modified sometime in the future.","s":"AVIF Encoding","u":"/docs/encoders/aomenc","h":"#avif-encoding","p":460},{"i":474,"t":"aomenc is largely lacking in its ability to take advantage of multiple threads, so a tool like Av1an should be utilized for effective parallelization. The parameters shown will be biased towards Av1an and aom-av1-lavish usage, so if you plan on using standalone aomenc please adjust as needed. Here are some recommended parameters: --bit-depth=10 --cpu-used=4 --end-usage=q --cq-level=24 --threads=2 --tile-columns=0 --tile-rows=0 --lag-in-frames=64 --tune=ssim --enable-keyframe-filtering=1 --disable-kf --kf-max-dist=9999 --enable-qm=1 --deltaq-mode=0 --aq-mode=0 --enable-fwd-kf=0 --arnr-strength=1 --sb-size=dynamic --enable-dnl-denoising=0 --denoise-noise-level=8 Now let's break it down. --bit-depth=10 We're using 10bit because weird linear algebra allows the video to become smaller and reduces banding. --cpu-used=4 This is the preset which ranges from 0-9, you can go to 3 if you want more efficiency, 2 if you have a lot of time, 4 is the sweet spot, and 6 if you want speed. Don't go above 6 (Worst efficiency) or even 0 (It would take WEEKS to finish). --end-usage=q --cq-level=24 This specifies that we are going to use a knockoff version of CRF level similar to x264/x265 encoders, in this case CRF 24. --tile-columns=0 --tile-rows=0 This is the tiles options, where the encoder splits the videos into tiles to encode faster. See the image below (Yellow lines): Tile usage Do NOT use tiles for 1080p and below, use 1 tile-columns at 1440p (2K), 2 tile-columns and 1 tile-rows for 2160p (4K). If you would like an easy way to calculate the necessary number of tiles for your video, you can use the AV1 Encoding Calculator online or run this local tile calculator. --lag-in-frames=64 Similar to x264/x265 rc-lookahead. Sets a number of frames to look ahead for frametype and ratecontrol, allowing for better compression decision making. Setting to a value greater than 64 is generally not considered useful. --aq-mode=0 adaptive quantization mode, a mostly debatable area nowadays. 0 is better most of the time but some say 1 is also good. --enable-keyframe-filtering=1 We're setting it to 1 because of compatibility reasons, 2 is more efficient but there are seeking issues and FFmpeg can't input it. --sb-size=dynamic Allows the encoder to use 128x128 block partitioning besides 64x64 which gives an efficiency boost. --deltaq-mode=0 This value has been tested to be more perceptually efficient. --arnr-strength=1 Controls how strong the filtering (smoothing) will be, always been a hot topic. Most agree on the default of 4. Others think 1 is good for 3D Pixar CGI-like and 2D animation and 4 for live action content, and a higher value for lower bitrate encodes. --disable-kf --enable-fwd-kf=0 We're disabling keyframes cause Av1an already did scene detection, so we wont have to. Plus it speeds things up. --kf-max-dist=9999 Maximum keyframe interval, we're setting it at the highest possible value since Av1an's scene detection keyframe interval is already 240 by default --enable-chroma-deltaq=1 --enable-qm=1 Parameters that give you free efficiency boost discovered via testing. --enable-dnl-denoising=0 Disables the encoder's built-in denoising technique when grain synthesis is enabled, you can optionally set it to 1 when you have a pretty noisy video since it works quite well (NLMeans is the denoiser used). --denoise-noise-level=8 AV1 grain synthesis, which is a technique where the encoder puts fake grain in so it looks more natural and potentially hiding video artifacts (cause grain is hard to encode and explodes bitrate usage because of their randomness). Don't attempt to use it at high values (>12) since it creates noticeable grain patterns. info You can use photon noise tables as an alternative via --film-grain-table, which is also conveniently available in Av1an as --photon-noise=X","s":"Recommendations","u":"/docs/encoders/aomenc","h":"#recommendations","p":460},{"i":476,"t":"Mainline aomenc is unfortunately not perfect. It suffers from bad defaults, a heavy focus on the perceptually flawed PSNR metric, misleading settings, and other issues. Fortunately, there are a couple of forks developed by the encoding community that were created to combat aomenc's underlying issues. aom-av1-psy No longer maintained as of 13th January 2023 aom-av1-lavish No longer maintained as of 4th June 2024 aom-psy101 aom-av1ador These forks fix up the poor decisions made by the original AOM devs and most importantly introduce new parameters and tunes to help fine-tune the encoder even more.","s":"Community Forks","u":"/docs/encoders/aomenc","h":"#community-forks","p":460},{"i":478,"t":"Pending Review The content in this entry may not be entirely accurate, & is pending further review to assess the quality of the information. HM is the original H.265 reference encoder, predating alternatives like x265. In the modern day, it joins other MPEG reference encoders such as JM & VTM in their reputations for being highly niche offerings that are used rarely due to their usage complexity & speed disadvantages. x264 is more efficient than JM. HM is capable of producing higher quality streams than highly tuned x265, even at excruciatingly slow speeds. This is only a theoretical advantage, though, as HM is incapable of placing keyframes automatically with scene detection & would need a chunking too reminiscent of Av1an to do this. For videos containing few enough frames where keyframe placement isn't a concern, HM is better in practice than x265 at the expense of a massive dropoff in speed. HM doesn't have any threading capabilities & is much slower than even x265 placebo.","s":"HM","u":"/docs/encoders/HM","h":"","p":477},{"i":480,"t":"These build instructions are valid for Linux & macOS. git clone https://vcgit.hhi.fraunhofer.de/jvet/HM cd HM/ mkdir build && cd build cmake .. -DCMAKE_BUILD_TYPE=Release make -j$(nproc) The binary TAppEncoderStatic or TAppEncoder can be found within the cloned directories, & can be copied to /usr/local/bin for encoding. Decoding & other functions of the reference codec implementation aren't covered in this entry.","s":"Installation","u":"/docs/encoders/HM","h":"#installation","p":477},{"i":482,"t":"Here is a sample command: TAppEncoderStatic -i input.yuv -b out.265 -c ~/HM/cfg/encoder_randomaccess_main10.cfg -wdt 1280 -hgt 720 -fr 50 -f 500 -q 27 -xPS 0 Make sure only to use only YUV input when encoding with HM. Each parameter does the following: -i input.yuv -b out.265 Specifies a raw YUV input file & an output raw h265 bitstream. To mux into an MP4 container, it is recommended that you use mp4box instead of muxing with FFmpeg. -c [path/to/config] Specifies the desired path to your HM configuration file. This makes it easier to encode without having to manually specify a plethora of settings. -wdt 1280 -hgt 720 Sets the input & output width & height. -fr 50 -f 500 Sets the framerate (FPS) & the number of frames to encode. In this case, we are encoding 500 frames of a video that is to be played back at 50fps. -q 27 Sets a quality target for the encoder. -xPS 0 Zero clue what this does. If someone has an idea, please contribute!","s":"Usage","u":"/docs/encoders/HM","h":"#usage","p":477},{"i":484,"t":"Kvazaar is an open-source H.265 / HEVC software encoder Written in C, developed by Ultra Video Group and licensed under BSD 3-clause. uvg266 (Developed by the same group) uses Kvazaar as a base for encoding to the VVC codec. x265 is generally regarded as having better performance while producing better quality video streams.","s":"Kvazaar","u":"/docs/encoders/Kvazaar","h":"","p":483},{"i":486,"t":"Kvazaar is available in FFmpeg via libkvazaar, to check if you have it, run ffmpeg -h encoder=libkvazaar. You can input non-FFmpeg standard Kvazaar parameters via -kvazaar-params. You may need to download \"Full\" builds. As most of the time, this encoder is not included.","s":"FFmpeg","u":"/docs/encoders/Kvazaar","h":"#ffmpeg","p":483},{"i":488,"t":"Kvazaar supports the following color spaces: Format Chroma Subsampling Supported Bit Depth(s) YUV420P 4:2:0 8-bit YUV420P10LE 4:2:0 10-bit* *10-bit support requires a flag to be set during compilation with CMake.","s":"Supported Color Space","u":"/docs/encoders/Kvazaar","h":"#supported-color-space","p":483},{"i":490,"t":"Linux & macOS Windows For Arch Linux, Kvazaar is available as kvazaar. It is also available in the Arch User Repository (AUR) as kvazaar-git. Ultra Video Group does not ship any pre-built binaries of their encoders except for their AppVeyor CI, but AppVeyor deletes build artifacts after a month, so most of the time you'll have to compile Kvazaar yourself. Here are the instructions to do so: Autotoolsβ Compilation requires GNU Automake, Autoconf, Libtool, and M4. Install them via your package manager. Clone the repository and its submodules: git clone --recursive https://github.com/ultravideo/kvazaar.git cd kvazaar ./autogen.sh ./configure make -j$(nproc) Binaries will be available in src, or you can run make install on Linux to install (May need elevated permissions). CMake (10-bit support)β You will need to use CMake to specify a flag to be able to encode 10-bit with the encoder; by default Kvazaar ships with only 8-bit. git clone --recursive https://github.com/ultravideo/kvazaar.git cd kvazaar/build cmake .. -DCMAKE_C_FLAGS=\"-DKVZ_BIT_DEPTH=10\" # optional 10-bit flag make -j$(nproc) Be aware that encoding 10-bit HEVC with Kvazaar is significantly slower, as the developers only prioritized SIMD optimizations for 8-bit encoding. Be aware that this implementation can be buggy in general. Windows users are recommended to compile via MinGW-W64 which comes with MSYS2. Please be advised the usage of Clang for compiling in this situation is heavily recommended due to disabled AVX2 optimizations because of a known GCC issue from 2012 (MinGW environments-exclusive). To do this, run CC=clang ./configure during autoconf. MSYS2β Make sure you have downloaded & installed MSYS2 from the MSYS2 website before beginning the build process. Start the UCRT64 console & install the required dependencies with the pacman package manager Resume the build process as you would on a Unix-like system. See the \"Linux & macOS\" tab for more information.","s":"Installation","u":"/docs/encoders/Kvazaar","h":"#installation","p":483},{"i":492,"t":"Compilation requires GNU Automake, Autoconf, Libtool, and M4. Install them via your package manager. Clone the repository and its submodules: git clone --recursive https://github.com/ultravideo/kvazaar.git cd kvazaar ./autogen.sh ./configure make -j$(nproc) Binaries will be available in src, or you can run make install on Linux to install (May need elevated permissions).","s":"Autotools","u":"/docs/encoders/Kvazaar","h":"#autotools","p":483},{"i":494,"t":"You will need to use CMake to specify a flag to be able to encode 10-bit with the encoder; by default Kvazaar ships with only 8-bit. git clone --recursive https://github.com/ultravideo/kvazaar.git cd kvazaar/build cmake .. -DCMAKE_C_FLAGS=\"-DKVZ_BIT_DEPTH=10\" # optional 10-bit flag make -j$(nproc) Be aware that encoding 10-bit HEVC with Kvazaar is significantly slower, as the developers only prioritized SIMD optimizations for 8-bit encoding. Be aware that this implementation can be buggy in general.","s":"CMake (10-bit support)","u":"/docs/encoders/Kvazaar","h":"#cmake-10-bit-support","p":483},{"i":496,"t":"Make sure you have downloaded & installed MSYS2 from the MSYS2 website before beginning the build process. Start the UCRT64 console & install the required dependencies with the pacman package manager Resume the build process as you would on a Unix-like system. See the \"Linux & macOS\" tab for more information.","s":"MSYS2","u":"/docs/encoders/Kvazaar","h":"#msys2","p":483},{"i":498,"t":"Here are some examples of how to use Kvazaar on its own: Simple Y4M input with QP 20 and raw 265 bitstream output kvazaar -i input.y4m --input-file-format y4m --qp 20 -o output.265 Preset slow, CRF 20, Y4M input kvazaar -i input.y4m --input-file-format y4m --qp 20 --preset slow -o output.265 The command below still uses the kvazaar binary, but reads from a YUV4MPEG pipe instead of a file. This is useful for piping FFmpeg output to Kvazaar. FFmpeg piping ffmpeg -v error -i input.mkv -f yuv4mpegpipe -strict -1 - | kvazaar -i - --input-file-format y4m --qp 20 --preset slow -o output.265","s":"Usage","u":"/docs/encoders/Kvazaar","h":"#usage","p":483},{"i":500,"t":"rav1e is an open source command line application for encoding AV1 written in Assembly & Rust, co-developed by Xiph.org and Mozilla and licensed under BSD-2 Clause.","s":"rav1e","u":"/docs/encoders/rav1e","h":"","p":499},{"i":502,"t":"rav1e is available in FFmpeg via librav1e, to check if you have it, run ffmpeg -h encoder=librav1e. You can input non-FFmpeg standard rav1e parameters via -rav1e-params.","s":"FFmpeg","u":"/docs/encoders/rav1e","h":"#ffmpeg","p":499},{"i":504,"t":"rav1e supports the following color spaces: Format Chroma Subsampling Supported Bit Depth(s) YUV420P 4:2:0 8-bit YUVJ420P 4:2:0 8-bit (Full range) YUV422P 4:2:2 8-bit YUVJ422P 4:2:2 8-bit (Full range) YUV444P 4:4:4 8-bit YUVJ444P 4:4:4 8-bit (Full range) YUV420P10LE 4:2:0 10-bit YUV422P10LE 4:2:2 10-bit YUV444P10LE 4:4:4 10-bit YUV420P12LE 4:2:0 12-bit YUV422P12LE 4:2:2 12-bit YUV444P12LE 4:4:4 12-bit","s":"Supported Color Space","u":"/docs/encoders/rav1e","h":"#supported-color-space","p":499},{"i":506,"t":"Linux & macOS Windows Official pre-built rav1e binaries can be found on the releases page in rav1e's Github repository. Unofficially, the rAV1ator CLI command line tool can automatically download and install rav1e to /usr/local/bin. rav1e can also be installed with Cargo by running cargo install rav1e. Stable Release For stability & a proper version number, please reset the source to the correct release commit. In the releases page, click the icon to the right of the release tag & copy the commit in the url bar. Then, in the cloned rav1e directory, git reset --hard [commit hash] Here are instructions for resetting to release 0.7.1 (latest as of 19 Feb 2024) and building. Omit the git reset command to use the latest git, if you have a specific reason to use the latest git instead of an official tagged release. git clone https://github.com/xiph/rav1e.git cd rav1e git reset --hard a8d05d0c43826a465b60dbadd0ab7f1327d75371 RUSTFLAGS=\"-C target-cpu=native\" cargo build --release When done, the binary can be found in /target/release. You can then copy the binary wherever you desire it to go, like by doing cp /target/release/rav1e /usr/local/bin . Patched Installation with HDR10+ supportβ rav1e currently has an unmerged pull request by quietvoid, the person behind hdr10plus_tool and dovi_tool. The PR adds a new parameter called --hdr10plus-json for HDR10+ JSON dynamic metadata input. To merge it locally, do the following: git clone https://github.com/xiph/rav1e.git cd rav1e git reset --hard [release commit] git fetch origin pull/3000/head:HDR10+ Now the patch should be applied, and you may build as usual. If you would not like to build from source, official pre-built rav1e binaries can be found on the releases page in rav1e's Github repository. Stable Release For stability & a proper version number, please reset the source to the correct release commit. In the releases page, click the icon to the right of the release tag & copy the commit in the url bar. Then, in the cloned rav1e directory, git reset --hard [commit hash] Here are instructions for resetting to release 0.7.1 (latest as of 19 Feb 2024) and building. Omit the git reset command to use the latest git, if you have a specific reason to use the latest git instead of an official tagged release. git clone https://github.com/xiph/rav1e.git cd rav1e git reset --hard a8d05d0c43826a465b60dbadd0ab7f1327d75371 set RUSTFLAGS=-C target-cpu=native cargo build --release When done, the binary can be found in target/release","s":"Installation","u":"/docs/encoders/rav1e","h":"#installation","p":499},{"i":508,"t":"rav1e currently has an unmerged pull request by quietvoid, the person behind hdr10plus_tool and dovi_tool. The PR adds a new parameter called --hdr10plus-json for HDR10+ JSON dynamic metadata input. To merge it locally, do the following: git clone https://github.com/xiph/rav1e.git cd rav1e git reset --hard [release commit] git fetch origin pull/3000/head:HDR10+ Now the patch should be applied, and you may build as usual.","s":"Patched Installation with HDR10+ support","u":"/docs/encoders/rav1e","h":"#patched-installation-with-hdr10-support","p":499},{"i":510,"t":"For AV1 encoding, rav1e has very sane defaults. It is very hard to go wrong with parameters if you modify as few as possible. tip To convert cq-level in aomenc and crf in SVT-AV1 to rav1e's quantizer values, multiply by 4. For example, --cq-level 20 equals to --quantizer 80. Basic usage rav1e -i input.y4m -o output.ivf --quantizer 60 --photon-noise 8 Basic usage with FFmpeg piping, 10bit input ffmpeg -i input.mkv -pix_fmt yuv420p10le -strict -2 -f yuv4mpegpipe - | rav1e - -o output.ivf --quantizer 80 --photon-noise 8 Basic usage with FFmpeg piping, 10bit input and assuming 4K ffmpeg -i input.mkv -pix_fmt yuv420p10le -strict -2 -f yuv4mpegpipe - | rav1e - -o output.ivf --quantizer 68 --tile-columns 2 --tile-rows 1 --photon-noise 8","s":"Usage","u":"/docs/encoders/rav1e","h":"#usage","p":499},{"i":512,"t":"Use 2x1 tiles or tile-columns 2 and tile-rows 1 for 4K (2160p) encoding, this will help with both encoding and decoding speeds.","s":"Tips & Tricks","u":"/docs/encoders/rav1e","h":"#tips--tricks","p":499},{"i":514,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! ZPAQ is a lossless data compression algorithm that combines several techniques to achieve high compression ratios. It was developed by Matt Mahoney. ZPAQ uses a multitude of different compression algorithms to try to achieve the best size-to-compression-time ratio possible while producing the smallest possible archives without much concern given to decompression performance. On the official ZPAQ website, it looks like it is designed for \"realistic backups that have a lot of duplicate files and a lot of already compressed files.\" ZPAQ is also considered an \"incremental journaling archiver\" meaning you can add files to an existing archive based on if they were changed or not. This reduces the time needed to wait for a new backup to finish, if that is your use case. Since ZPAQ is so focused on compression ratio, this kind of feature may reduce the burden imposed by long compression times in practical use cases where it makes sense. Windows & macOS do not handle ZPAQ archives properly by default, and it is unlikely many Linux distros do either.","s":"ZPAQ","u":"/docs/data/zpaq","h":"","p":513},{"i":516,"t":"Under Maintenance The content in this entry is incomplete & is in the process of being completed. SVT-VP9 is a VP9 encoder developed by Intel. Like its siblings in the SVT encoder family, it scales very well on multicore processors by default. The reference encoder beats it in rate control flexibility and supports 10-bit color, but SVT-VP9 is much faster out of the box.","s":"SVT-VP9","u":"/docs/encoders/SVT-VP9","h":"","p":515},{"i":518,"t":"There are patches bundled in the SVT-VP9 source code for an FFmpeg plugin that adds the libsvt_vp9 encoder. One must recompile FFmpeg with the plugin patch applied to take advantage of it. (An easy way to do so on Windows is using media-autobuild_suite.) Operation is not too different from the SVT-AV1 FFmpeg integration. Your commands will generally look like this: ffmpeg -i video.mp4 -c:v libsvt_vp9 -qp 38 -tune ssim -preset 7 -g 255 video_vp9.webm Parameters Parameter Description -qp Quantizer value, higher = lower quality. Range is 1..51 in current patches, but -qmin/-qmax can be set as high as 69 for extreme low bitrates. -preset Speed preset. Range is 0..9, with 9 being fastest and default. -tune Quality metric. Can be \"vq\" (default), \"ssim\" or \"vmaf\". -g Size of the Group of Pictures. Range is -2..255, with -1 = no intraframe updates ever, -2 = \"auto\". We recommend you set it as high as possible for encode efficiency. -rc Rate control mode. Can be \"cqp\" (Constant Quantizer, default), \"vbr\" (Variable Bitrate) or \"cbr\" (Constant Bitrate). (Consider using vpxenc's two-pass mode if you really need to match a target bitrate.) -level Encoder level. Range is 1..6. Generally better not to set it. -socket Index of the CPU socket to use. By default it's -1, which uses \"all available processors\".","s":"FFmpeg","u":"/docs/encoders/SVT-VP9","h":"#ffmpeg","p":515},{"i":520,"t":"SVT-VP9 only supports 8-bit yuv420p.","s":"Supported Color Space","u":"/docs/encoders/SVT-VP9","h":"#supported-color-space","p":515},{"i":523,"t":"To be filled. If you believe you can help, see our Contribution Guide.","s":"Standalone","u":"/docs/encoders/SVT-VP9","h":"#standalone","p":515},{"i":525,"t":"Community Fork This entry is about a fork of SVT-AV1 called SVT-AV1-PSY. If you'd like to learn about the mainline SVT-AV1 encoder before reading, visit our SVT-AV1 wiki entry. SVT-AV1-PSY is a project that aims to enhance the Scalable Video Technology for AV1 Encoder with perceptual enhancements for psychovisually optimal AV1 encoding. The ultimate goal is to create the best encoding implementation for perceptual quality with AV1. The development of this project involves a collaborative effort from a team of dedicated developers and contributors who are committed to improving the perceptual quality of AV1 encoding. The SVT-AV1-PSY project is maintained by Gianni Rosato, Julio Barba, & Clybius, as well as a number of community contributors including BlueSwordM, the maintainer of the SVT-AV1-PSY AUR package & the original author of aom-av1-psy. The development process involves community testing and optimization to ensure that the encoder and decoder deliver optimal performance. The team uses a variety of tools and methodologies to analyze and improve the performance of the encoder and decoder, including subjective analyses. SSIMULACRA2 and XPSNR are used extensively for metrics testing, and the team is committed to improving the overall quality and performance of the encoder using these two metrics as general guidelines and benchmarks. However, the stated goal is not to improve metric scores but to improve the overall perceptual quality of the encoder; naturally, changes are often made to SVT-AV1-PSY that end up degrading metric performance in favor of perceptual fidelity per bit. SVT-AV1-PSY is a superset of SVT-AV1, meaning any valid SVT-AV1 command will work with SVT-AV1-PSY given the modified defaults do not conflict with the settings provided. SVT-AV1-PSY is used by default in Aviator and can be used in rAV1ator CLI by using the pre-compiled binaries available with the tool or by building a binary yourself. SVT-AV1-PSY contributors are not in any way affiliated with the Alliance for Open Media or any upstream SVT-AV1 project contributors who have not also contributed to the SVT-AV1-PSY project.","s":"SVT-AV1-PSY","u":"/docs/encoders/SVT-AV1-PSY","h":"","p":524},{"i":527,"t":"SVT-AV1-PSY features additions outside of mainline SVT-AV1 that are often considered to be significant, but don't line up with a major mainline release. Therefore, the SVT-AV1-PSY release framework is different, and the encoder is currently being developed around micro-releases to make this clear. Micro-releases are tagged with letters starting at A to indicate that they contain significant new features and aren't equivalent to mainline releases; for example, v2.0.0-A was a micro-release with significant new features exclusive to SVT-AV1-PSY that followed the prior v2.0.0 release (which came out with PSY + mainline features in tandem with mainline v2.0.0). Releases without letters are in-line with mainline SVT-AV1 releases, and may contain significant PSY feature additions as well.","s":"Micro-Release Framework","u":"/docs/encoders/SVT-AV1-PSY","h":"#micro-release-framework","p":524},{"i":529,"t":"SVT-AV1-PSY includes a number of new features that are not present in mainline SVT-AV1. These features are designed to improve the visual quality of AV1 encodes, and offer more flexibility when configuring the encoder for a wide range of encoding scenarios. Many of these changes are being integrated into mainline SVT-AV1 as the project matures. --variance-boost-strength 1 to 4 (Merged to Mainline) Provides control over our augmented AQ Modes 0 and 2 which can utilize variance information in each frame for more consistent quality under high/low contrast scenes. Four curve options are provided, and the default is curve 2. 1: mild, 2: gentle, 3: medium, 4: aggressive --variance-octile 1 to 8 (Merged to Mainline) Controls how \"selective\" the algorithm is when boosting superblocks, based on their low/high 8x8 variance ratio. A value of 1 is the least selective, and will readily boost a superblock if only 1/8th of the superblock is low variance. Conversely, a value of 8 will only boost if the entire superblock is low variance. Lower values increase bitrate. The default value is 6. --enable-alt-curve 0 and 1 Enable an alternative variance boost curve, with different bit allocation and visual characteristics. The default is 0. Presets -2 & -3 Terrifically slow encoding modes for research purposes. Tune 3 A new tune based on Tune 2 (SSIM) called SSIM with Subjective Quality Tuning. Generally harms metric performance in exchange for better visual fidelity. --sharpness -7 to 7 A parameter for modifying loopfilter deblock sharpness and rate distortion to improve visual fidelity. The default is 0 (no sharpness). --dolby-vision-rpu path to file Set the path to a Dolby Vision RPU for encoding Dolby Vision video. SVT-AV1-PSY needs to be built with the enable-libdovi flag enabled in build.sh (see ./Build/linux/build.sh --help for more info) (Thank you @quietvoid !) Progress 3 A new progress mode that provides more detailed information about the encoding process. --fgs-table path to file (Merged to Mainline) Argument for providing a film grain table for synthetic film grain (similar to aomenc's '--film-grain-table=' argument). Extended CRF Provides a more versatile and granular way to set CRF. Range has been expanded to 70 (from 63) to help with ultra-low bitrate encodes, and can now be set in quarter-step (0.25) increments. --qp-scale-compress-strength 0 to 3 Increases video quality temporal consistency, especially with clips that contain film grain and/or contain fast-moving objects. --enable-dlf 2 Enables a more accurate loop filter that prevents blocking, for a modest increase in compute time (most noticeable at presets 7 to 9) Higher-quality presets for 8K Lowers the minimum available preset from 8 to 2 for higher-quality 8K encoding (64 GB of RAM recommended per encoding instance) --frame-luma-bias 0 to 100 Enables frame-level luma bias to improve quality in dark scenes by adjusting frame-level QP based on average luminance across each frame","s":"Feature Additions","u":"/docs/encoders/SVT-AV1-PSY","h":"#feature-additions","p":524},{"i":531,"t":"SVT-AV1-PSY has different defaults than mainline SVT-AV1 in order to provide better visual fidelity out of the box. They include: Default 10-bit color depth when given a 10-bit input. Disable film grain denoising by default, as it often harms visual fidelity. (Merged to Mainline) Default to Tune 2 instead of Tune 1, as it reliably outperforms Tune 1 perceptually. Enable quantization matrices by default. Set minimum QM level to 0 by default. --enable-variance-boost enabled by default.","s":"Modified Defaults","u":"/docs/encoders/SVT-AV1-PSY","h":"#modified-defaults","p":524},{"i":533,"t":"--color-help Prints the information found in Appendix A.2 of the user guide in order to help users more easily understand the Color Description Options in SvtAv1EncApp. Micro-Releases In order to make SVT-AV1-PSY feature additions more clear, micro-release tags indicate when significant new feature additions have been made. Micro-release tags are letters starting with A, so new releases will be tagged as v#.#.#-A, v#.#.#-B, etc. This is discussed earlier in this entry.","s":"Other Changes","u":"/docs/encoders/SVT-AV1-PSY","h":"#other-changes","p":524},{"i":535,"t":"Building & installing SVT-AV1-PSY is the same as building & installing mainline SVT-AV1. Linux & macOS Windows A precompiled AVX2-optimized binary of SVT-AV1-PSY can be installed for x86_64 Linux via rAV1ator CLI. However, it is always recommended to build from source. To build SVT-AV1 from source, first clone the desired SVT-AV1 repository & enter the build directory. Clone SVT-AV1-PSY git clone https://github.com/gianni-rosato/svt-av1-psy cd svt-av1-psy/Build/linux In the directory, simply run ./build.sh [flags] to build. Be aware that building requires CMake version 3.16 or higher and either GCC or Clang. It is recommended to use Clang. Build release ./build.sh release Statically build just the encoder with clang and enable link-time optimization ./build.sh jobs=8 all cc=clang cxx=clang++ no-dec enable-lto static native The compiled binaries will be in the Bin/Release directory, including SvtAv1EncApp. If you just want the encoder, adding the no-dec flag will skip building SvtAv1DecApp and save on compilation time. If you'd like to build from the latest release (2.1.0 at the time of writing - last updated 23 May 2024) please run git reset --hard 5471bd78311d70ab4691af1ae54fd80e25f214f5 in the cloned directory. It is recommended that you do this, as new changes to git aren't always stable right away & a release will guarantee more stability. If you want extra performance, it is possible to build SVT-AV1 using PGO (Profile-guided Optimization). Be aware that this particular script infers that you have a .y4m file (or multiple) in /dev/shm for transcoding. You can compile statically linked SVT-AV1 with PGO (and LTO, or link-time optimization) by following this script: Bulding SVT-AV1 with profile guided optimization git clone https://gitlab.com/AOMediaCodec/SVT-AV1/ cd SVT-AV1/Build/linux ./build.sh cc=gcc cxx=g++ enable-lto enable-pgo static native jobs=$(nproc) pgo-dir=/dev/shm pgo-videos=/dev/shm release If you wish to store videos elsewhere or provide custom parameters to the SvtAv1EncApp binary, try this script: git clone https://gitlab.com/AOMediaCodec/SVT-AV1/ cd SVT-AV1/Build/linux ./build.sh cc=gcc cxx=g++ enable-lto enable-pgo static native jobs=$(nproc) pgo-dir=/dev/shm pgo-compile-gen release ../../Bin/Release/SvtAv1EncApp # Run this binary as many times as you'd like with arguments of your choice to collect data ./build.sh cc=gcc cxx=g++ enable-lto enable-pgo static native jobs=$(nproc) pgo-dir=/dev/shm pgo-compile-use release MSYS2 is the best option for building in Windows, as it provides a Unix-like environment for building SVT-AV1-PSY. This makes the compilation procedure the same as described for Linux & macOS. The full build process is detailed here. Make sure you have downloaded & installed MSYS2 from the MSYS2 website before beginning the build process. Start the UCRT64 console & install the required dependencies: pacman -Syu --needed git mingw-w64-ucrt-x86_64-toolchain mingw-w64-ucrt-x86_64-cmake mingw-w64-ucrt-x86_64-ninja mingw-w64-ucrt-x86_64-yasm [Optional] Clang is the recommended compiler for SVT-AV1 & SVT-AV1-PSY, so you may wish to download it with the following command: pacman -Syu --needed mingw-w64-ucrt-x86_64-clang Now, we may follow the steps for Linux & macOS to complete building. Please note that CMake may require you to include -G \"Ninja\" in any CMake commands. Clone SVT-AV1-PSY git clone https://github.com/gianni-rosato/svt-av1-psy cd SVT-AV1/Build/linux In the directory, simply run ./build.sh [flags] to build. Be aware that building requires CMake version 3.16 or higher and either GCC or Clang. It is recommended to use Clang, and ideally it will be installed as per Step 2. Build release ./build.sh release Statically build just the encoder with clang and enable link-time optimization ./build.sh jobs=8 all cc=clang cxx=clang++ no-dec enable-lto static native The compiled binaries will be in the Bin/Release directory, including SvtAv1EncApp. If you just want the encoder, adding the no-dec flag will skip building SvtAv1DecApp and save on compilation time. If you'd like to build from the latest release (2.1.0 at the time of writing - last updated 23 May 2024) please run git reset --hard 5471bd78311d70ab4691af1ae54fd80e25f214f5 in the cloned directory.","s":"Installation","u":"/docs/encoders/SVT-AV1-PSY","h":"#installation","p":524},{"i":537,"t":"Some projects that feature SVT-AV1-PSY include: Aviator ~ an AV1 encoding GUI by @gianni-rosato rAV1ator CLI ~ a TUI for video encoding with Av1an by @gianni-rosato SVT-AV1-PSY on the AUR ~ by @BlueSwordM SVT-AV1-PSY in CachyOS ~ by @BlueSwordM Custom Handbrake Builds ~ by @vincejv Staxrip ~ a video & audio encoding GUI for Windows by @Dendraspis","s":"Projects Featuring SVT-AV1-PSY","u":"/docs/encoders/SVT-AV1-PSY","h":"#projects-featuring-svt-av1-psy","p":524},{"i":539,"t":"Up to v0.8.7, SVT-AV1 is licensed under the BSD-2-clause license and the Alliance for Open Media Patent License 1.0. Starting from v0.9, SVT-AV1 is licensed under the BSD-3-clause clear license and the Alliance for Open Media Patent License 1.0. SVT-AV1-PSY does not feature license modifications from mainline SVT-AV1.","s":"License","u":"/docs/encoders/SVT-AV1-PSY","h":"#license","p":524},{"i":541,"t":"Under Maintenance The content in this entry is incomplete & is in the process of being completed. Apple's VideoToolbox is a low-level framework that provides direct access to hardware encoders and decoders. It offers services for video compression and decompression, as well as conversion between raster image formats stored in CoreVideo pixel buffers. The VideoToolbox encoder works by compressing video data for various applications such as low-latency conferencing, live streaming, and offline transcoding. It supports hardware encoding on most Macs from 2011 and later, and uses Apple's Media Engine on devices with Apple T2 chips or Apple Silicon. The encoder can be configured to optimize encoding for specific applications, and it supports various video codecs including H.264, H.265, with support for H.265 8-bit and 10-bit encoding. It is worth noting that the VideoToolbox encoder is designed for applications that require direct access to hardware encoders and decoders. Apple's VideoToolbox framework also supports hardware accelerated video decoding for a number of video codecs. As of the Apple M3, these include H.264, H.265, ProRes, ProRes RAW, and AV1.","s":"VideoToolbox","u":"/docs/encoders_hw/videotoolbox","h":"","p":540},{"i":543,"t":"Encoding with Videotoolbox on macOS is possible via FFmpeg, a versatile command line utility, or Handbrake, a GUI for video encoding.","s":"Usage","u":"/docs/encoders_hw/videotoolbox","h":"#usage","p":540},{"i":545,"t":"To use H.264 or H.265 (HEVC) hardware encoding in macOS via VideoToolbox, just use the encoder -c:v h264_videotoolbox or -c:v hevc_videotoolbox for H.264 or HEVC respectively. Here are some example commands for encoding with VideoToolbox on Apple Silicon via FFmpeg: H.264 encoding (high profile) ffmpeg -i input.mkv -c:v h264_videotoolbox -profile 100 -q:v [0-100] output.mp4 8-bit HEVC encoding (main profile) ffmpeg -i input.mkv -c:v hevc_videotoolbox -profile 1 -q:v [0-100] -tag:v hvc1 output.mp4 10-bit HEVC encoding (main10 profile) ffmpeg -i input.mkv -c:v hevc_videotoolbox -profile 2 -q:v [0-100] -tag:v hvc1 output.mp4 Just run ffmpeg -help encoder=hevc_videotoolbox or ffmpeg -help encoder=h264_videotoolbox for more info.","s":"FFmpeg","u":"/docs/encoders_hw/videotoolbox","h":"#ffmpeg","p":540},{"i":547,"t":"To be filled. Sources (1) Video Toolbox | Apple Developer Documentation. https://developer.apple.com/documentation/videotoolbox. (2) HandBrake Documentation β Apple VideoToolbox. https://handbrake.fr/docs/en/latest/technical/video-videotoolbox.html. (3) Apple's T2 chip makes a giant difference in video encoding for most .... https://appleinsider.com/articles/19/04/09/apples-t2-chip-makes-a-giant-difference-in-video-encoding-for-most-users.","s":"Handbrake","u":"/docs/encoders_hw/videotoolbox","h":"#handbrake","p":540},{"i":549,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! JM is the original H.264 reference encoder, predating alternatives like x264. Since x264 became highly performant and perceptually driven, it joins other MPEG reference encoders such as HM & VTM in their reputations for being highly niche offerings that are used rarely due to their usage complexity & speed disadvantages. x264 is generally more efficient than JM.","s":"JM","u":"/docs/encoders/JM","h":"","p":548},{"i":551,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! VTM is the original H.266 (better known as VVC) reference encoder, in competition with alternatives like VVenC. In the modern day, it joins other MPEG reference encoders such as HM & JM in their reputations for being highly niche offerings that are used rarely due to their usage complexity & speed disadvantages; however, VTM may be more useful due to the current difficulty facing VVC encoding regardless of the encoding implementation one chooses to use. x264 is more efficient than JM.","s":"VTM","u":"/docs/encoders/VTM","h":"","p":550},{"i":553,"t":"SVT-HEVC (Scalable Video Technology for HEVC) is an open source H.265 / HEVC software encoder developed by Intel made specifically to only support x86. As the name suggests, it is part of the \"Scalable Video Technology\" project lineup by Intel. The encoder is written in C with some parts in Assembly and licensed under BSD+Patent. info It is recommended to use x265 instead as it performs much better in quality.","s":"SVT-HEVC","u":"/docs/encoders/SVT-HEVC","h":"","p":552},{"i":555,"t":"SVT-HEVC is only available in FFmpeg when it is compiled with their provided plugin. Otherwise it is via libsvt_hevc, to check if you have it, run ffmpeg -h encoder=libsvt_hevc.","s":"FFmpeg","u":"/docs/encoders/SVT-HEVC","h":"#ffmpeg","p":552},{"i":557,"t":"SVT-HEVC supports the following color spaces: Format Chroma Subsampling Supported Bit Depth(s) YUV420P 4:2:0 8-bit YUV420P10LE 4:2:0 10-bit","s":"Supported Color Space","u":"/docs/encoders/SVT-HEVC","h":"#supported-color-space","p":552},{"i":559,"t":"Linux & macOS Windows To build SVT-AV1 from source, first clone the SVT-HEVC repository & enter the build directory. Clone SVT-HEVC and cd git clone https://github.com/OpenVisualCloud/SVT-HEVC cd SVT-HEVC/Build/linux In the directory, simply run ./build.sh [flags] to build. Be aware that building requires CMake version 3.5.1 or higher and either GCC or Clang. It is recommended to use Clang when building SVT-HEVC. Build release ./build.sh release Statically build release ./build.sh static release The compiled binaries will be in the Bin/Release directory. To be filled. If you believe you can help, see our Contribution Guide.","s":"Installation","u":"/docs/encoders/SVT-HEVC","h":"#installation","p":552},{"i":562,"t":"Simple Y4M input with QP 20, and raw 265 bitstream output SvtHevcEncApp -i input.y4m -q 20 -b output.265 Preset 4, CRF 20, Y4M input SvtHevcEncApp -i input.y4m -q 20 -encMode 4 -b output.265 FFmpeg piping ffmpeg -v error -i input.mkv -f yuv4mpegpipe -strict -1 - | SvtHevcEncApp -i stdin -q 20 -encMode 4 -b output.265 danger SVT-HEVC currently does not have the ability to detect EOF (End of File) within Y4M pipes (issue), so the encode process will go on indefinitely until your drive is full. The only known solution is to manually set the number of frames to encode via -n.","s":"Normal usage","u":"/docs/encoders/SVT-HEVC","h":"#normal-usage","p":552},{"i":564,"t":"SvtHevcEncApp -i input.y4m -q 20 -encMode 4 -hdr -max-cll XXX -max-fall XXX -master-display G(0.265,0.690)B(0.150,0.060)R(0.680,0.320)WP(0.3127,0.3290)L(1000,0.0100) -b output.265 Remember sure to adjust the HDR metadata accordingly.","s":"Encoding HDR","u":"/docs/encoders/SVT-HEVC","h":"#encoding-hdr","p":552},{"i":566,"t":"The ability to encode with Dolby Vision via RPU file is surprisingly present within SVT-HEVC, although it is limited to Profile 8.1. SvtHevcEncApp -i input.y4m -q 20 -encMode 4 -hdr -dolby-vision-rpu RPUFile.bin -dolby-vision-profile 81 -max-cll XXX -max-fall XXX -master-display G(0.265,0.690)B(0.150,0.060)R(0.680,0.320)WP(0.3127,0.3290)L(1000,0.0100) -b output.265 Remember sure to adjust the HDR metadata accordingly.","s":"Encoding with Dolby Vision","u":"/docs/encoders/SVT-HEVC","h":"#encoding-with-dolby-vision","p":552},{"i":568,"t":"As with every SVT line-up, they redirect you to read their \"User Guide\" instead of presenting what each and every parameter do in the --help page. The table below is an exact copy from their user guide taken from GitHub for backup purposes. Feel free to read them. Encoder Parameter as shown in the configuration file Command Line parameter Range Default Description Channel Number -nch [1 - 6] 1 Number of encode instances ConfigFile -c any string null Configuration file path InputFile -i any string null Input file path and name StreamFile -b any string null Output bitstream file path and name ErrorFile -errlog any string stderr Error log displaying configuration or encode errors ReconFile -o any string null Output reconstructed yuv used for debug purposes. Note: using this feature will affect the speed of the encoder significantly. This should only be used for debugging purposes. UseQpFile -use-q-file [0, 1] 0 When set to 1, overwrite the picture qp assignment using qp values in QpFile QpFile -qp-file any string null Path to qp file SegmentOvFile -segment-ov-file any string null Path to segment override file which will allow for sharpness improvement and bit rate reduction on a per segment basis. Refer to config/SVTSegmentOvFile.txt for details. EncoderMode -encMode [0 - 11] 7 A preset defining the quality vs density tradeoff point that the encoding is to be performed at. (e.g. 0 is the highest quality mode, 11 is the highest density mode). Section 3.4 outlines the preset availability per resolution EncoderBitDepth -bit-depth [8, 10] 8 Specifies the bit depth of input video EncoderColorFormat -color-format [1, 2, 3] 1 Specifies the chroma subsampling of input video(1: 420, 2: 422, 3: 444) CompressedTenBitFormat -compressed-ten-bit-format [0, 1] 0 Offline packing of the 2bits: requires two bits packed input (0: OFF, 1: ON) SourceWidth -w [64 - 8192] 0 Input source width SourceHeight -h [64 - 4320] 0 Input source height FrameToBeEncoded -n [0 - 2^31 -1] 0 Number of frames to be encoded, if number of frames is > number of frames in file, the encoder will loop to the beginning and continue the encode. 0 encodes the full clip. BufferedInput -nb [-1, 1 to 2^31 -1] -1 number of frames to preload to the RAM before the start of the encode. If -nb = 100 and βn 1000 --> the encoder will encode the first 100 frames of the video 10 times. Use -1 to not preload any frames. This parameter is best used to eliminate the impact of disk reading on encoding speed and is most noticeable when frames sizes are 4k or 8k. Because frames are repeated when value specified (-nb) is less than the total frame count (-n), you should expect bitstreams to be different. Profile -profile [1,2] 2 1: Main, 2: Main 10 Tier -tier [0, 1] 0 0: Main, 1: High Level -level [1, 2, 2.1,3, 3.1, 4, 4.1, 5, 5.1, 5.2, 6, 6.1, 6.2] 0 0 to 6.2 [0 for auto determine Level] FrameRate -fps [0 - 2^64 -1] 60 If the number is less than 1000, the input frame rate is an integer number between 1 and 60, else the input number is in Q16 format (shifted by 16 bits) [Max allowed is 240 fps]. If FrameRateNumerator and FrameRateDenominator are both !=0 the encoder will ignore this parameter FrameRateNumerator -fps-num [0 - 2^64 -1] 0 Frame rate numerator e.g. 6000 When zero, the encoder will use βfps if FrameRateDenominator is also zero, otherwise an error is returned FrameRateDenominator -fps-denom [0 - 2^64 -1] 0 Frame rate denominator e.g. 100 When zero, the encoder will use βfps if FrameRateNumerator is also zero, otherwise an error is returned Injector -inj [0,1] 0 Enable injection of input frames at the specified framerate (0: OFF, 1: ON) InjectorFrameRate -inj-frm-rt [1 - 240] 60 Frame Rate used for the injector. Recommended to match the encoder speed. SpeedControlFlag -speed-ctrl [0,1] 0 Enables the Speed Control functionality to achieve the real-time encoding speed defined by βfps. When this parameter is set to 1 it forces βinj to be 1 and -inj-frm-rt to be set to βfps. InterlacedVideo -interlaced-video [0,1] 0 1 : encoder will signal interlaced signal in the stream
0 : assumes progressive signal SeparateFields -separate-fields [0,1] 0 1 : Interlaced input, application will separate top and bottom fields and encode it as progressive.
0 : Treat video as progressive video HierarchicalLevels -hierarchical-levels [0 β 3] 3 0 : Flat
1: 2-Level Hierarchy
2: 3-Level Hierarchy
3: 4-Level Hierarchy
Minigop Size = (2^HierarchicalLevels)
(e.g. 3 == > 7B pyramid, 2 ==> 3B Pyramid)
Refer to Appendix A.1 BaseLayerSwitchMode -base-layer-switch-mode [0,1] 0 0 : Use B-frames in the base layer pointing to the same past picture
1 : Use P-frames in the base layer
Refer to Appendix A.1 PredStructure -pred-struct [0 β 2] 2 0: Low Delay P
1: Low Delay B
2: Random Access
Refer to Appendix A.1 IntraPeriod -intra-period [-2 - 255] -2 Distance between Intra Frame inserted.
-1 denotes no intra update.
-2 denotes auto. IntraRefreshType -irefresh-type [-1,N] -1 -1: CRA (Open GOP)
>=0: IDR (Closed GOP, N is headers insertion interval, 0 supported if CQP, >=0 supported if VBR)
Refer to Appendix A.3 QP -q [0 - 51] 32 Initial quantization parameter for the Intra pictures used when RateControlMode 0 (CQP) LoopFilterDisable -dlf [0, 1] 0 When set to 1 disables the Deblocking Loop Filtering SAO -sao [0,1] 1 When set to 0 the encoder will not use the Sample Adaptive Filter UseDefaultMeHme -use-default-me-hme [0, 1] 1 0 : Overwrite Default ME HME parameters
1 : Use default ME HME parameters, dependent on width and height HME -hme [0,1] 1 Enable HME, 0 = OFF, 1 = ON SearchAreaWidth -search-w [1 - 256] Depends on input resolution Motion vector search area width SearchAreaHeight -search-h [1 - 256] Depends on input resolution Motion vector search area height ConstrainedIntra -constrd-intra [0,1] 0 Allow the use of Constrained Intra, when enabled, this features yields to sending two PPSs in the HEVC Elementary streams
0 = OFF, 1 = ON RateControlMode -rc [0,1] 0 0 : CQP , 1 : VBR TargetBitRate -tbr Any Number 7000000 Target bitrate in bits / second. Only used when RateControlMode is set to 1 vbvMaxrate -vbv-maxrate Any Number 0 VBVMaxrate in bits / second. Only used when RateControlMode is set to 1 vbvBufsize -vbv-bufsize Any Number 0 VBV BufferSize in bits / second. Only used when RateControlMode is set to 1 vbvBufInit -vbv-init [0 - 100] 90 Sets the initial percentage size that the VBV buffer is filled to hrdFlag -hrd [0,1] 0 Sets the HRD (Hypothetical Reference Decoder) Flag in the encoded stream, 0 = OFF, 1 = ON When hrdFlag is set to 1, vbvMaxrate and vbvBufsize must be greater than 0 MaxQpAllowed -max-qp [0 - 51] 48 Maximum QP value allowed for rate control use. Only used when RateControlMode is set to 1. Has to be >= MinQpAllowed MinQpAllowed -min-qp [0 - 50] 10 Minimum QP value allowed for rate control use. Only used when RateControlMode is set to 1. Has to be < MaxQpAllowed LookAheadDistance -lad [0 - 250] Depending on BRC mode When RateControlMode is set to 1 it's best to set this parameter to be equal to the Intra period value (such is the default set by the encoder). When CQP is chosen, then a (2 * minigopsize +1) look ahead is recommended. SceneChangeDetection -scd [0,1] 1 Enables or disables the scene change detection algorithm
0 = OFF, 1 = ON BitRateReduction -brr [0,1] 0 Enables visual quality algorithms to reduce the output bitrate with minimal or no subjective visual quality impact.
0 = OFF, 1 = ON ImproveSharpness -sharp [0,1] 0 This is a visual quality knob that allows the use of adaptive quantization within the picture and enables visual quality algorithms that improve the sharpness of the background. This feature is only available for 4k and 8k resolutions
0 = OFF, 1 = ON VideoUsabilityInfo -vid-info [0,1] 0 Enables or disables sending a vui structure in the HEVC Elementary bitstream. 0 = OFF, 1 = ON HighDynamicRangeInput -hdr [0,1] 0 When set to 1, signals HDR10 input in the output HEVC elementary bitstream and forces VideoUsabilityInfo to 1.
0 = OFF, 1 = ON AccessUnitDelimiter -ua-delm [0,1] 0 SEI message, 0 = OFF, 1 = ON BufferingPeriod -pbuff [0,1] 0 SEI message, 0 = OFF, 1 = ON PictureTiming -tpic [0,1] 0 SEI message, 0 = OFF, 1 = ON.
If 1, VideoUsabilityInfo should be also set to 1. RegisteredUserData -reg-user-data [0,1] 0 SEI message, 0 = OFF, 1 = ON UnregisteredUserData -unreg-user-data [0,1] 0 SEI message, 0 = OFF, 1 = ON RecoveryPoint -recovery-point [0,1] 0 SEI message, 0 = OFF, 1 = ON TemporalId -temporal-id [0,1] 1 0 = OFF
1 = Insert temporal ID in NAL units AsmType -asm [0,1] 1 Assembly instruction set
(0: C Only, 1: Automatically select highest assembly instruction set supported) LogicalProcessors -lp [0, total number of logical processor] 0 The number of logical processor which encoder threads run on.Refer to Appendix A.2 FirstLogicalProcessor -flp [0, the index of last logical processor] 0 The index of first logical processor which encoder threads run on.Refer to Appendix A.2 TargetSocket -ss [-1,1] -1 For dual socket systems, this can specify which socket the encoder runs on. Refer to Appendix A.2 ThreadCount -thread-count [0,N] 0 The number of threads to get created and run, 0 = AUTO SwitchThreadsToRtPriority -rt [0,1] 1 Enables or disables threads to real time priority, 0 = OFF, 1 = ON (only works on Linux) FPSInVPS -fpsinvps [0,1] 1 Enables or disables the VPS timing info, 0 = OFF, 1 = ON TileRowCount -tile_row_cnt [1,22] 1 Tile count in the Row TileColumnCount -tile_col_cnt [1,20] 1 Tile count in the column TileSliceMode -tile_slice_mode [0,1] 0 Per slice per tile, only valid for multi-tile UnrestrictedMotionVector -umv [0,1] 1 Enables or disables unrestricted motion vectors
0 = OFF(motion vectors are constrained within frame or tile boundary)
1 = ON.
For MCTS support, set -umv 0 with valid TileRowCount and TileColumnCount MaxCLL -max-cll [0 , 2^16-1] 0 Maximum content light level (MaxCLL) as required by the Consumer Electronics Association 861.3 specification. Applicable for HDR content. If specified, signaled only when HighDynamicRangeInput is set to 1 MaxFALL -max-fall [0 , 2^16-1] 0 Maximum Frame Average light level (MaxFALL) as required by the Consumer Electronics Association 861.3 specification. Applicable for HDR content. If specified, signaled only when HighDynamicRangeInput is set to 1 UseMasterDisplay -use-master-display [0,1] 0 Enables or disables the MasterDisplayColorVolume
0 = OFF
1 = ON MasterDisplay -master-display For R, G, B and whitepoint [0, 2^16-1]. For max, min luminance [0, 2^32-1] 0 SMPTE ST 2086 mastering display color volume SEI info, specified as a string. The string format is βG(%hu,%hu)B(%hu,%hu)R(%hu,% hu)WP(%hu,%hu)L(%u,%u)β where %hu are unsigned 16bit integers and %u are unsigned 32bit integers. The SEI includes X, Y display primaries for RGB channels and white point (WP) in units of 0.00002 and max, min luminance (L) values in units of 0.0001 candela per meter square. Applicable for HDR content. Example for a P3D65 1000-nits monitor,G(13250,34500)B(7500,3 000)R(34000,16000)WP(15635,16 450)L(10000000,1) DolbyVisionRpuFile -dolby-vision-rpu any string null Path to the file containing Dolby Vision RPU metadata DolbyVisionProfile -dolby-vision-profile 8.1 or 81 0 Generate bitstreams confirming to the specified Dolby Vision profile 8.1. When specified, enables HighDynamicRangeInput automatically. Applicable only for 10-bit input content. MasterDisplay should be set for using dolby vision profile 81. Pass the dynamic metadata through DolbyVisionRpuFile option NaluFile -nalu-file any string null Path to the file containing CEA 608/708 metadata. Text file should contain the userSEI in POC order as per below format: /. Currently only PREFIX_SEI messages are supported","s":"List of all configuration parameters","u":"/docs/encoders/SVT-HEVC","h":"#list-of-all-configuration-parameters","p":552},{"i":570,"t":"Under Maintenance The content in this entry is incomplete & is in the process of being completed. vpxenc is part of the libvpx library for working with the VP9 & VP8 video codecs. It is capable of encoding & decoding both formats, where vpxenc is the multipurpose encoder. VP9 competes with HEVC (h265) & AVC (h264) in coding efficiency, and has been superseded by AV1. VP8 competes with AVC. By default, vpxenc isn't as competitive as it could be, but even when used properly, most tests show that h265 offers slightly better quality per bit with efficient encoders like x265.","s":"vpxenc","u":"/docs/encoders/vpxenc","h":"","p":569},{"i":572,"t":"vpxenc is available in FFmpeg via libvpx for VP8 and libvpx-vp9 for VP9, to check if you have it, run ffmpeg -h encoder=libvpx or ffmpeg -h encoder=libvpx-vp9. Non-FFmpeg standard VP8/VP9 parameters are not supported.","s":"FFmpeg","u":"/docs/encoders/vpxenc","h":"#ffmpeg","p":569},{"i":574,"t":"vpxenc supports the following color spaces: Format Chroma Subsampling Supported Bit Depth(s) YUV420P 4:2:0 8-bit YUVA420P 4:2:0 8-bit (Alpha Channel) YUV422P 4:2:2 8-bit YUV440P 4:4:0 8-bit YUV444P 4:4:4 8-bit GBRP - 8-bit YUV420P10LE 4:2:0 10-bit YUV422P10LE 4:2:2 10-bit YUV440P10LE 4:4:0 10-bit YUV444P10LE 4:4:4 10-bit GBRP10LE - 10-bit YUV420P12LE 4:2:0 12-bit YUV422P12LE 4:2:2 12-bit YUV440P12LE 4:4:0 12-bit YUV444P12LE 4:4:4 12-bit GBRP12LE - 12-bit","s":"Supported Color Space","u":"/docs/encoders/vpxenc","h":"#supported-color-space","p":569},{"i":576,"t":"Windows builds are available on Lastrosade's website and can be downloaded here. For Linux and MacOS, it may be be available when searching \"vpxenc\" or \"libvpx\" in their respective package managers.","s":"Installing (Binary)","u":"/docs/encoders/vpxenc","h":"#installing-binary","p":569},{"i":578,"t":"Windows users are recommended to compile via MinGW-W64 which comes with MSYS2. nasm/yasm, and the GNU build tools (make, configure) are required for this operation.","s":"Compiling (Windows/MacOS/Linux)","u":"/docs/encoders/vpxenc","h":"#compiling-windowsmacoslinux","p":569},{"i":580,"t":"First, cloning git clone https://chromium.googlesource.com/webm/libvpx cd libvpx mkdir libvpx_build && cd libvpx_build","s":"Cloning","u":"/docs/encoders/vpxenc","h":"#cloning","p":569},{"i":582,"t":"Now here comes the annoying part, the configure file have really bad defaults. So you will need to adjust them, here are some recommended options you should use: ../configure --cpu=native --extra-cxxflags=\"-flto\" --extra-cflags=\"-flto\" --as=auto --enable-vp9-highbitdepth --enable-libyuv --enable-webm-io --enable-vp9 --enable-runtime-cpu-detect --enable-internal-stats --enable-postproc --enable-vp9-postproc --enable-static --disable-shared --enable-vp9-temporal-denoising --disable-unit-tests --disable-docs --enable-multithread Now let's break down what each of them do. --cpu=native Native CPU optimizations. --extra-cxxflags=\"-flto\" --extra-cflags=\"-flto\" More CPU optimizations for faster encoding. --as=auto Set the assembler to auto, so it can choose between yasm and nasm. --enable-vp9-highbitdepth Enables high bit depth (>=10 bits) when encoding VP9. --enable-libyuv Enables YUV4MPEG input support (IMPORTANT), otherwise it will only accept RAW. --enable-webm-io Enables input and output support for WebM container. --enable-vp9 Enables VP9 encoding support. --enable-runtime-cpu-detect Enables runtime CPU detection. --enable-internal-stats Enables internal statistics for the encoder for debug purposes. --enable-postproc Enables postprocessing stuff for better video quality. --enable-vp9-postproc Enables VP9-specific postprocessing stuff for better video quality. --enable-static Enables static builds. --disable-shared Disables shared builds. --enable-vp9-temporal-denoising Disables spatial denoising for VP9 and enables temporal instead. --disable-unit-tests Disables unit tests, unless you want to test the encoder as a developer. This should be disabled. --disable-docs Disables documentation, as enabling this also requires doxygen. --enable-multithread Enables the usage of multiple CPU threads for encoding and decoding.","s":"./configure file","u":"/docs/encoders/vpxenc","h":"#configure-file","p":569},{"i":584,"t":"There are other options you may want use to either speed up compiliation or drop unwanted features. --disable-vp8 --disable-vp9-decoder --disable-vp8-decoder Disables VP8 encoding and vpxdec (decoder) to be compiled. --enable-small Prioritizes smaller encoder binary size over encoding speed. --target= Enables target compilation for a specific operating system or CPU architecture. There's a lot of them. Here's an exhaustive list of all of them based on the configure file: arm64-android-gcc arm64-darwin-gcc arm64-darwin20-gcc arm64-darwin21-gcc arm64-darwin22-gcc arm64-darwin23-gcc arm64-linux-gcc arm64-win64-gcc arm64-win64-vs15 arm64-win64-vs16 arm64-win64-vs16-clangcl arm64-win64-vs17 arm64-win64-vs17-clangcl armv7-android-gcc armv7-darwin-gcc armv7-linux-rvct armv7-linux-gcc armv7-none-rvct armv7-win32-gcc armv7-win32-vs14 armv7-win32-vs15 armv7-win32-vs16 armv7-win32-vs17 armv7s-darwin-gcc armv8-linux-gcc loongarch32-linux-gcc loongarch64-linux-gcc mips32-linux-gcc mips64-linux-gcc ppc64le-linux-gcc sparc-solaris-gcc x86-android-gcc x86-darwin8-gcc x86-darwin8-icc x86-darwin9-gcc x86-darwin9-icc x86-darwin10-gcc x86-darwin11-gcc x86-darwin12-gcc x86-darwin13-gcc x86-darwin14-gcc x86-darwin15-gcc x86-darwin16-gcc x86-darwin17-gcc x86-iphonesimulator-gcc x86-linux-gcc x86-linux-icc x86-os2-gcc x86-solaris-gcc x86-win32-gcc x86-win32-vs14 x86-win32-vs15 x86-win32-vs16 x86-win32-vs17 x86_64-android-gcc x86_64-darwin9-gcc x86_64-darwin10-gcc x86_64-darwin11-gcc x86_64-darwin12-gcc x86_64-darwin13-gcc x86_64-darwin14-gcc x86_64-darwin15-gcc x86_64-darwin16-gcc x86_64-darwin17-gcc x86_64-darwin18-gcc x86_64-darwin19-gcc x86_64-darwin20-gcc x86_64-darwin21-gcc x86_64-darwin22-gcc x86_64-darwin23-gcc x86_64-iphonesimulator-gcc x86_64-linux-gcc x86_64-linux-icc x86_64-solaris-gcc x86_64-win64-gcc x86_64-win64-vs14 x86_64-win64-vs15 x86_64-win64-vs16 x86_64-win64-vs17 generic-gnu For Windows compilation with MinGW you may need to use --target=x86_64-win64-gcc and --target=arm64-darwin22-gcc for MacOS.","s":"Other ./configure options","u":"/docs/encoders/vpxenc","h":"#other-configure-options","p":569},{"i":586,"t":"After successfully running the configure command above, run make -j $(nproc) to start compiling with your CPU count. The resulting binary will be called vpxenc and you can copy it wherever you like.","s":"Running GNU make","u":"/docs/encoders/vpxenc","h":"#running-gnu-make","p":569},{"i":588,"t":"Incomplete","s":"VP8","u":"/docs/encoders/vpxenc","h":"#vp8","p":569},{"i":590,"t":"For encoding VP9, vpxenc's default parameters are not considered optimal. There are a lot of options that are either disabled without reason or are simply misconfigured, hurting coding efficiency at little cost otherwise. As of mid-2021, some parameters (the TPL-model, lag-in-frames and auto-alt-ref frames) were changed (since libvpx 1.9.0 and libvpx 1.10.0) which means that there's not much use of setting these three parameters unless you're in FFmpeg. This section covers the most important options libvpx-vp9 has to offer, recommended settings, & what they do. It is important to note that the vpxenc parameters provided below are considered optimal because they are efficient, but VP9 Profile 2 isn't compatible with many hardware-accelerated VP9 decoding implementations.","s":"VP9","u":"/docs/encoders/vpxenc","h":"#vp9","p":569},{"i":592,"t":"--codec=vp9 Self-explanatory. --passes=2 vpxenc's 2-pass mode is quite fast compared to 2-pass in x264 and x265. Only use 1-pass mode for real-time applications, which won't be covered here yet. It is the default in the standalone vpxenc libvpx-vp9 encoder. --webm Enables WebM output for the encoder, and passes the encoder flags set. It is not necessary to enable it, but since it passes the encoder flags, I would use it. Can be changed to --ivf for an ivf video stream. --good This is a sort of quality deadline, the minimum speed the encoder is allowed to go to. It isn't recommended to use --best as it is slow for the quality uplift you get. Do not use RT for anything but real-time encoding. --threads=8 Dictates the number of threads the encoder should spawn. It doesnβt mean itβll scale all that well over those 8 threads. On a 16 thread CPU with a single encoder instance, I would use 8 threads. With multiple encoder instance encoding(with qencoder/av1an/neav1e), I would set it to 2 threads. --profile=2 VP9 profile 2 is obligatory if you want 10-bit & 12-bit support for HDR, and improved quality from 8-bit. --lag-in-frames=25 Lag-in-frames is the libvpx equivalent of lookahead in x264. The higher the number, the slower the encoder will be, but at the upside of making it more efficient. Going above βlag-in-frames=12 also activates another setting called alternate reference frames. 25 is the maximum you can get in libvpx-vp9. It is the default in the standalone vpxenc libvpx-vp9 encoder. --end-usage=q Q mode is the closest equivalent to CRF that libvpx-vp9 offers, so use it if maximum quality is desired. --cq-level=25 For 1080p30 8-bit content, it is recommended to go with a Q of 25; you can go lower if you value higher quality over pure efficiency. For 1080p60 8-bit content, I would recommend going with a higher Q value with a delta of around 15. So, a Q of 30 to 40 is usually recommended. Depending on the content, you may have to tune this value, so this advice is only useful in choosing a starting point. --kf-max-dist=[input FPS * 10] This tells the encoder to have a maximum number of frames between keyframes. It will usually place a lower number of keyframes in content like movies, TV shows, or animated shows, so you can set it to a very high number or not set it at all if you want maximum efficiency for this kind of content. Otherwise, I would go with the 10-second rule: --kf-max-dist=240 for 24FPS content, 300 for 30FPS content, 600 for 60FPS content, and so on. --cpu-used=3 This is where the biggest balance of quality to speed is with libvpx-vp9. This is similar to presets in x264 and x265, except the lower the number, the slower the encoder takes. Using --cpu-used=3 & below enables RDO, which increases quality at the expense of speed. info --cpu-used=5 and above are slower in the 1st pass, so it isn't recommended to use them anyway. --auto-alt-ref=6 Activates alternate reference frames. Alternate reference frames are \"invisible\" frames which are used as references when creating the final display frames. More alternate reference frames is typically more efficient. Setting this greater than 1 activates overlay frames and isn't compatible with the 8-bit color profiles. --arnr-maxframes=7 This is the maximum number of alternate reference frames the encoder is allowed to use. For most content, 7 is usually a good bet, and it is the default. With animated content, going with a value of 12 or to the max is a good bet, as animated content benefits from more additional alt-ref frames than other content. Be aware that increasing this value will impact encode speed. --arnr-strength=4 This setting dictates how much denoising will occur in the alt-ref frames. Lowering it to 2 or 3 is usually a good bet for noisier/grainy content to try and retain more detail, but 4 is a sane starting place. The default setting is 5, which is fine for most content, but it can be beneficial going a bit lower. For animation, keeping the default of 5 is likely a better option. --aq-mode=0 Adaptive quantization is the way for an encoder to spend more bits in certain areas to improve psychovisual fidelity. --aq-mode=0 works well on clean content (animation, video games, screen content). --aq-mode=2 is recommended when you want to give more detail to more complex parts of a video. --frame-boost=1 This flag lets the encoder periodically boost the bitrate of a scene/frame if it needs it. Leaving it at the default --frame-boost=0 is usually a good bet, & this isn't a particularly salient change. --tune-content=default This determines how the encoder is tuned. In libvpx-vp9, there are three options: default, screen, and film. Default is for most scenarios, screen is for screen content(video games, live-streaming content like web pages & your screen), and film is for heavily dithered/grainy video. Leaving it at the default for about everything but screen content as described above is probably the best option. --tune-content=screen with --aq-mode=2 is not recommended, as it creates some odd artifacts. It is advised to use --aq-mode=0 if --tune-content=screen is activated, or if you want better perceptual quality, --aq-mode=1. --row-mt=1 Enables row multi-threading in libvpx-vp9. Always enable it no matter what, as it does not hurt efficiency, but boosts speed considerably. This feature is disabled by default. --bit-depth=10 Always use 10-bit for maximum efficiency & minimal banding, even with an 8-bit source. Make sure to enable --profile=2 as mentioned above. --tile-columns=1 This setting divides the video into tile columns for easier parallelization when encoding & decoding. Setting --tile-columns=1, you will get 2ΒΉ tile columns. Setting it higher is a trade-off between parallelization & coding efficiency, as more tiles means less information your encoder can work with, and this will result in decreased efficiency. Do note there is an upper threshold in regards to the number of tile columns you can get due to the fixed minimum tile width of 256 pixels. So, this means 4 tile columns (2Β²) for 720p and 1080p, 8 tile columns (2β΄) for 1440p/4k, and so on. If you set a tile column number that is too high, it will drop down to the lowest supported number of tile columns at the input resolution. --tile-rows=0 This setting divides the video into tile rows. This option is different from columns because although it also makes decoding performance higher, it does not scale as well as tile columns & doesnβt increase encoder threading nearly as much. Always use more tile-columns than rows, or leave the number of tile rows at default (0). Leaving the encoder defaults at --tile-rows=0 & --tile-columns=0 will result in the highest overall coding efficiency possible with these options. --enable-tpl=1 This option enables a temporal layer model, which helps with coding efficiency. It is the default in the standalone vpxenc libvpx-vp9 encoder. All of these options are only available for the standalone vpxenc program. Here is a sample FFmpeg command line interpretation of the commands above, with some options missing: ffmpeg -i input.mkv -c:v libvpx-vp9 -pix_fmt yuv420p10le -pass 1 -quality good -threads 4 -profile:v 2 -lag-in-frames 25 -crf 25 -b:v 0 -g 240 -cpu-used 3 -auto-alt-ref 6 -arnr-maxframes 7 -arnr-strength 4 -aq-mode 0 -tune-content default -tile-rows 0 -tile-columns 1 -enable-tpl 1 -row-mt 1 -f null - ffmpeg -i input.mkv -c:v libvpx-vp9 -pix_fmt yuv420p10le -pass 2 -quality good -threads 4 -profile:v 2 -lag-in-frames 25 -crf 25 -b:v 0 -g 240 -cpu-used 3 -auto-alt-ref 6 -arnr-maxframes 7 -arnr-strength 4 -aq-mode 0 -tune-content default -tile-rows 0 -tile-columns 1 -enable-tpl 1 -row-mt 1 output.mkv Alternatively, you can pass a raw .y4m stream to standalone vpxenc & encode that way. VP9 section written based on work by BlueSwordM, who has granted written permission for this wiki page to exist in its current fashion","s":"Encoding","u":"/docs/encoders/vpxenc","h":"#encoding","p":569},{"i":594,"t":"Under Maintenance The content in this entry is incomplete & is in the process of being completed. SVT-AV1 (Scalable Video Technology for AV1) is an AV1-compliant software encoder/decoder library. Jointly developed by Intel and Netflix, SVT-AV1 is written almost entirely in C with some parts written in C++ and Assembly. As the name suggests, it is part of the \"Scalable Video Technology\" project lineup by Intel. This entry discusses the SVT-AV1 encoder, also known as the \"Production\" AV1 encoder (while aomenc is the \"reference\" AV1 encoder), & refers to SVT-AV1 as such. SVT-AV1 is known for its parallelization, high coding efficiency, & active development. SVT-AV1 scales across multiple CPU cores much more effectively than aomenc or rav1e, so the use of tools like Av1an is less helpful albeit still helpful for scene detection.","s":"SVT-AV1","u":"/docs/encoders/SVT-AV1","h":"","p":593},{"i":596,"t":"SVT-AV1 is available in FFmpeg via libsvtav1, to check if you have it, run ffmpeg -h encoder=libsvtav1. You can input non-FFmpeg standard SVT-AV1 parameters via -svtav1-params.","s":"FFmpeg","u":"/docs/encoders/SVT-AV1","h":"#ffmpeg","p":593},{"i":598,"t":"SVT-AV1 supports the following color spaces: Format Chroma Subsampling Supported Bit Depth(s) YUV420P 4:2:0 8-bit YUV420P10LE 4:2:0 10-bit","s":"Supported Color Space","u":"/docs/encoders/SVT-AV1","h":"#supported-color-space","p":593},{"i":600,"t":"Linux & macOS Windows A precompiled AVX2-optimized binary of SVT-AV1-PSY can be installed for x86_64 Linux via rAV1ator CLI. However, it is always recommended to build from source. To build SVT-AV1 from source, first clone the desired SVT-AV1 repository & enter the build directory. Clone mainline SVT-AV1 git clone https://gitlab.com/AOMediaCodec/SVT-AV1/ git reset --hard bbcff785881b320f7e1b1f77a2f5ed025f8bfd75 # Reset to release 2.1.0 cd SVT-AV1/Build/linux Clone SVT-AV1-PSY git clone https://github.com/gianni-rosato/svt-av1-psy cd SVT-AV1/Build/linux In the directory, simply run ./build.sh [flags] to build. Be aware that building requires CMake version 3.16 or higher and either GCC or Clang. It is recommended to use clang when building SVT-AV1. Build release ./build.sh release Statically build just the encoder with clang and enable link-time optimization ./build.sh jobs=8 all cc=clang cxx=clang++ no-dec enable-lto static native The compiled binaries will be in the Bin/Release directory, including SvtAv1EncApp. If you just want the encoder, adding the no-dec flag will skip building SvtAv1DecApp and save on compilation time. If you'd like to build from the latest release (2.1.0 at the time of writing - last updated 16 Apr 2024) please run git reset --hard 2aeeb4f1a1d495b84bf5c21dbb60ae10e991fada in the cloned directory. It is recommended that you do this, as new changes to git aren't always stable right away & a release will guarantee more stability. If you want extra performance, it is possible to build SVT-AV1 using PGO (Profile-guided Optimization). Be aware that this particular script infers that you have a .y4m file (or multiple) in /dev/shm for transcoding. You can compile statically linked SVT-AV1 with PGO (and LTO, or link-time optimization) by following this script: Bulding SVT-AV1 with profile guided optimization git clone https://gitlab.com/AOMediaCodec/SVT-AV1/ cd SVT-AV1/Build/linux ./build.sh cc=gcc cxx=g++ enable-lto enable-pgo static native jobs=$(nproc) pgo-dir=/dev/shm pgo-videos=/dev/shm release If you wish to store videos elsewhere or provide custom parameters to the SvtAv1EncApp binary, try this script: git clone https://gitlab.com/AOMediaCodec/SVT-AV1/ cd SVT-AV1/Build/linux ./build.sh cc=gcc cxx=g++ enable-lto enable-pgo static native jobs=$(nproc) pgo-dir=/dev/shm pgo-compile-gen release ../../Bin/Release/SvtAv1EncApp # Run this binary as many times as you'd like with arguments of your choice to collect data ./build.sh cc=gcc cxx=g++ enable-lto enable-pgo static native jobs=$(nproc) pgo-dir=/dev/shm pgo-compile-use release To be filled. If you believe you can help, see our Contribution Guide.","s":"Installation","u":"/docs/encoders/SVT-AV1","h":"#installation","p":593},{"i":603,"t":"SVT-AV1's greatest strength is its parallelization capability, where it outclasses other AV1 encoders by a significant margin. SVT-AV1's parallelization techniques do not involve tiling & don't harm video quality, & can comfortably utilize up to 16 cores given 1080p source video. This is while maintaining competitive coding efficiency to mainline aomenc. Perceptually, mainline SVT-AV1 is outperformed by well-tuned community forks of aomenc, but according to many the gap has begun to close with the introduction of SVT-AV1-PSY.","s":"Strengths","u":"/docs/encoders/SVT-AV1","h":"#strengths","p":593},{"i":605,"t":"SVT-AV1 is strongest on x86 CPUs, & while ARM NEON assembly is available and has been slowly improving since its introduction in version 1.8.0, SVT-AV1 still underperforms on ARM. For this reason, it is not a good cross-architecture CPU benchmark. SVT-AV1's support for various AV1 features is also limited; it only supports up to 4:2:0 chroma subsampling with no support for 12-bit color, and it does not support scene change detection (there are no plans to implement this, either). The smallest possible video that SVT-AV1 can produce is 64x64.","s":"Weaknesses","u":"/docs/encoders/SVT-AV1","h":"#weaknesses","p":593},{"i":607,"t":"Aside from build optimizations for speed, there is further tweaking to be done to the SvtAv1EncApp binary parameters when encoding. The following applies to mainline SVT-AV1, but does not apply to SVT-AV1-PSY. --film-grain & --film-grain-denoise Most live-action sources feature hard-to-compress digital noise that is easily smoothed out by AV1 compression. To add this grain back, or even denoise through the encoder and then add grain, it is possible to use the --film-grain parameter to specify an amount of film grain to add to the encode (& --film-grain-denoise to specify how to denoise the input video before encoding for potentially better appeal). Denoising a video always removes fine details, so sticking with just --film-grain is recommended in most cases. According to SVT-AV1 documentation, a level of 8 should be used for live-action content with a normal amount of grain while a level of 4 works well for hand-drawn animation or other smoother-looking sources that still stand to benefit from some grain synthesis. --input-depth 10 10-bit output from AV1 encoding is always desirable for coding efficiency, even if your source is 8-bit. This option only produces a 10-bit AV1 bitstream if the source provided to the encoder is 10-bit. --tune 2 There are three tunes in mainline SVT-AV1: Tune 1 is for PSNR RDO, Tune 2 is for SSIM RDO, & Tune 0 is a psychovisual tune labeled VQ. It has been common practice to lean away from the PSNR tune, as it is not designed for visual quality but rather to perform better on the PSNR metric which is widely considered to be inconsistent with our human perception of fidelity. Using the VQ tune is a safe bet for now, but many believe the newer SSIM tune provides better visual fidelity. Using SVT-AV1-PSY, the custom Subjective SSIM tune (Tune 3) provides the best of both Tune 2 & Tune 0 with additional improvements as well. --enable-qm 1 Enables quantization matrices, disabled by default. Improves coding efficiency mainly by improving encoding speed while producing similar quality video. --qm-min 0 Sets the minimum flatness of quantization matrices to 0, down from the default 8. This is recommended unless you are dealing with extremely heavy grain. The maximum quantization matrix flatness is 15 by default, and should be left alone --keyint [FPS*10] Similar to --kf-max-dist in vpxenc, this tells the encoder when to place keyframes. Because SVT-AV1 doesn't have scene detection, this isn't the maximum distance between keyframes, but rather a fixed interval for placing keyframes. If using Av1an, set to -1 to disable keyframe insertion as Av1an handles that instead. --irefresh-type 2 Intra refresh is specified through this option, & lets the user decide between Closed GOP & Open GOP. GOP stands for Group of Pictures. Open GOP allows GOPs to reference one another, but support for this feature is currently incomplete. Therefore, it is recommended to use Closed GOP for the time being via --irefresh-type 2 until this is rectified. --preset X SVT-AV1 can be used in 14 different presets, labeled -1 through 13. Preset -1 is the slowest, but provides the best coding efficiency; it is also dubbed a research preset that is not recommended for regular use. Preset 13 is the fastest, and is also not recommended for regular use as it makes serious trade-offs to achieve unrealistically fast speeds at the cost of the encoder's coding efficiency. Using presets 2 through 8 is the best course of action for non-realtime applications if you desire reasonable speed, while 9 through 12 are useful for real-time encoding at 1080p or lower, even on low-end consumer computer hardware. --crf X CRF is the best way to target quality for optimal visual fidelity. VBR & CBR lose efficiency due to their inherently limited rate control capabilities.","s":"Encoder Optimization","u":"/docs/encoders/SVT-AV1","h":"#encoder-optimization","p":593},{"i":609,"t":"Currently, there is only one noteworthy community fork of SVT-AV1 called SVT-AV1-PSY.","s":"Community Forks","u":"/docs/encoders/SVT-AV1","h":"#community-forks","p":593},{"i":611,"t":"SVT-AV1-PSY is a community fork of SVT-AV1 that strives to improve the perceptual fidelity and quality of life provided by the encoder. The goal of this project is to create the best encoding implementation for perceptual quality with AV1, and it aims to surpass previous community forks of aomenc in speed and visual quality. SVT-AV1-PSY has a number of feature additions to the mainline SVT-AV1 encoder as well as modified defaults that aim to make it easier to produce a more perceptually optimal bistream. For a full list of the encoder's feature additions and modifications to defaults, see the project's README.","s":"SVT-AV1-PSY","u":"/docs/encoders/SVT-AV1","h":"#svt-av1-psy","p":593},{"i":613,"t":"uavs3e is an open-source encoder for the AVS3 codec, developed by Chinese research institutions including Peking University Shenzhen Graduate School, Peng Cheng Laboratory, and Guangdong Bohua UHD Innovation Corporation. The encoder receives infrequent updates, which may result in bugs and compilation issues. It supports AMD64 with AVX2, ARM with NEON, and LoongArch CPUs.","s":"uavs3e","u":"/docs/encoders/uavs3e","h":"","p":612},{"i":615,"t":"Linux & macOS Windows The developers do not provide pre-built binaries, so you'll need to compile it yourself. Compilation requires GNU Make and CMake. As of August 2024, uavs3e does not compile with GCC 14, using Clang 18 instead resolves this issue. git clone https://github.com/uavs3/uavs3e.git cd uavs3e mkdir build/linux && cd build/linux cmake ../.. -DCOMPILE_10BIT=0 -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ make -j 8 If you encounter undefined calls to close or lseek64 functions, add the following two lines at the top of the test/utest.c file: #define _LARGEFILE64_SOURCE #include Binaries will be available in the build/linux folder. On Linux, you can run make install to install the encoder (may require elevated permissions). Ensure you have the following prerequisites installed before starting the build process: Microsoft C++ Build Tools: Select \"Desktop development with C++\". Git Open Developer PowerShell for VS 2022. Run the following commands: git clone https://github.com/uavs3/uavs3e.git cd uavs3e .\\version.bat cd build\\x86_windows devenv uavs3e.sln /Upgrade msbuild uavs3e.sln /p:Configuration=Release /p:WindowsTargetPlatformVersion=10.0 Binaries will be available in the bin folder.","s":"Installation","u":"/docs/encoders/uavs3e","h":"#installation","p":612},{"i":617,"t":"Linux & macOS Windows To enable 10-bit support, set -DCOMPILE_10BIT=1 in the CMake command. However, the encoder compiled with this flag has been reported to cause segmentation faults on some systems. To enable 10-bit support, change the line #define COMPILE_10BIT 0 to #define COMPILE_10BIT 1 in the inc/com_api.h file.","s":"10-bit Support","u":"/docs/encoders/uavs3e","h":"#10-bit-support","p":612},{"i":619,"t":"The encoder cannot parse .y4m files, they need to be converted to raw video (.yuv) format. Simple 8-bit FHD 23.976 fps input with QP 20 and raw avs3 bitstream output uavs3enc -i input.yuv -w 1920 -h 1080 -d 8 --fps_num 24000 --fps_den 1001 -q 20 -o output.avs3 Speed 2, CRF 20, intra period 120, multithreaded uavs3enc -i input.yuv -w 1920 -h 1080 -d 8 --fps_num 24000 --fps_den 1001 -p 120 --wpp_threads 8 --frm_threads 8 --speed_level 2 --rc_type 1 -q 20 -o output.avs3 Currently, uavs3e does not support piping.","s":"Usage","u":"/docs/encoders/uavs3e","h":"#usage","p":612},{"i":621,"t":"Usable speed presets range from 0 to 4, where 0 is the slowest and 4 is the fastest. The encoder can be quite competitive, even compared with the newest AV1 and VVC encoders in terms of visual fidelity. uavs3d can be used to decode the output bitstream. For real-time playback, you need to have FFmpeg compiled with --enable-libuavs3d.","s":"Notes","u":"/docs/encoders/uavs3e","h":"#notes","p":612},{"i":623,"t":"uvg266 is an open-source software encoder for encoding to the H.266 / VVC codec. Developed by the Ultra Video Group, written in C and licensed under BSD 3-clause. The encoder is based on Kvazaar, their open source HEVC encoder solution.","s":"uvg266","u":"/docs/encoders/uvg266","h":"","p":622},{"i":626,"t":"uvg266 is available in the Arch User Repository (AUR) as uvg266 and uvg266-git.","s":"Arch Linux","u":"/docs/encoders/uvg266","h":"#arch-linux","p":622},{"i":628,"t":"Ultra Video Group does not ship any pre-built binaries of their encoders so you'll have to compile them yourself. Windows users are recommended to compile via MinGW-W64 which comes with MSYS2. caution Compilation requires GNU Make and CMake tip The following build procedure should work across all common operating systems. Using Clang instead of GCC works. You may add -DCMAKE_CXX_FLAGS=\"-flto -O3 -march=native\" -DCMAKE_C_FLAGS=\"-flto -O3 -march=native -pipe -fno-plt\" -DCMAKE_C_FLAGS_INIT=\"-flto=8 -static\" in CMake for better performance. git clone https://github.com/ultravideo/uvg266.git cd uvg266/build cmake .. -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=0 make -j 8 Binaries will be available in the same folder (build), or you can run make install on Linux to install (May need elevated permissions).","s":"Compiling","u":"/docs/encoders/uvg266","h":"#compiling","p":622},{"i":630,"t":"You need to compile with -DUVG_BIT_DEPTH=10 in the CMake -DCMAKE_C_FLAGS option to enable support for encoding 10-bit videos. For example: -DCMAKE_C_FLAGS=\"-DUVG_BIT_DEPTH=10\" With native optimizations: -DCMAKE_C_FLAGS=\"-flto -O3 -march=native -pipe -fno-plt -DUVG_BIT_DEPTH=10\" warning Encoding 10-bit with uvg266 is significantly slower as the developers only prioritized SIMD optimizations for 8-bit, and can get really buggy.","s":"10-bit Support","u":"/docs/encoders/uvg266","h":"#10-bit-support","p":622},{"i":632,"t":"Simple Y4M input with QP 20 and raw 266 bitstream output uvg266 -i input.y4m --input-file-format y4m --qp 20 -o output.266 Preset slow, CRF 20, Y4M input uvg266 -i input.y4m --input-file-format y4m --qp 20 --preset slow -o output.266 FFmpeg piping ffmpeg -v error -i input.mkv -f yuv4mpegpipe -strict -1 - | uvg266 -i - --input-file-format y4m --qp 20 --preset slow -o output.266","s":"Usage","u":"/docs/encoders/uvg266","h":"#usage","p":622},{"i":634,"t":"Could not find a strategy for crc32c_8x8! - You're out of luck, uvg266 failed to initialize its block partitioning strategy for your specific CPU instruction set, so you can't use the encoder or encode that specific video.","s":"Troubleshooting","u":"/docs/encoders/uvg266","h":"#troubleshooting","p":622},{"i":636,"t":"x265 is a software library and command line application for encoding H.265 / HEVC developed by MulticoreWare, written in C++ and x86 assembly, and released in 2013. By default, x265 is tuned for low-bitrate content due to the blurring filters it applies. However, it can be tuned using CLI options to be very effective for high-fidelity content as well. It is a more efficient and modern encoder compared to x264, and is currently a popular choice for both high-fidelity and mini encodes. x265 is currently not recommended for lossless encoding. For that niche, x264 is considerably faster without meaningful efficiency loss.","s":"x265","u":"/docs/encoders/x265","h":"","p":635},{"i":638,"t":"x265 is available in FFmpeg via libx265, to check if you have it, run ffmpeg -h encoder=libx265.","s":"FFmpeg","u":"/docs/encoders/x265","h":"#ffmpeg","p":635},{"i":640,"t":"Pre-built binary (Recommended): http://msystem.waw.pl/x265/","s":"Installation","u":"/docs/encoders/x265","h":"#installation","p":635},{"i":642,"t":"This section will overview the most important parameters for controlling output and quality in x265. The parameters will be listed in the format used by the standalone x265 binary, but all of the parameters should also be usable in ffmpeg in the format e.g. -x265-params pass=1.","s":"Parameters","u":"/docs/encoders/x265","h":"#parameters","p":635},{"i":644,"t":"--preset slow If encoding speed is a priority, x265 is probably not the best choice. x264 at --preset veryslow will likely be faster than x265 at --preset fast, while providing comparable efficiency. However, x265 finds its sweet spot at --preset slow, and this is the preset most people should use. This preset provides high quality while not being unreasonably slow. The exception where you may want to tax your CPU by going to --preset veryslow is when doing lower bitrate encodes (e.g. crf >=22). This is because the veryslow preset provides better motion estimation at low bitrates. However, it is exceptionally slow, so it is not generally recommended for everyday use.","s":"Preset","u":"/docs/encoders/x265","h":"#preset","p":635},{"i":646,"t":"--crf CRF, standing for Constant Rate Factor, is a method for selecting a level of quality-to-filesize tradeoff. CRF is preferable to bitrate targeting because CRF only requires one encoding pass, so bitrate targeting should only be used if you need to target a specific filesize. Nowadays, those situations are uncommon and it is preferred to use CRF to target a quality level. CRF is preferable to QP because CRF allows the encoder to vary the quality level from frame to frame for better viewing quality in areas of the video that need it the most. What CRF to use will vary depending on your goals. The range of valid CRF values is 0-51, with larger values providing smaller filesize but lower quality. Some amount of experimentation may be needed to find the value you prefer. A decent \"balanced\" target will be around 17 or 18, providing good quality without inflating filesize too much. For a focus on maximum quality, a value of 12 or 13 will result in visually lossless output for most videos, but will result in a much larger filesize. For miniature encodes, try raising the CRF as much as you feel comfortable before the quality becomes unbearable. CRFs of 22 or higher are generally considered \"low bitrate\", so how high you raise the CRF depends on how low of a filesize you are trying to achieve.","s":"CRF","u":"/docs/encoders/x265","h":"#crf","p":635},{"i":648,"t":"--bframes B-frames are bi-directional predictive frames, this means that they can reference frames both before and after themselves, which makes them very efficient. The --bframes parameter controls how many B-frames can be used consecutively. Higher values can result in better compression, but this value has diminishing returns, as the encoder won't use extra B-frames in situations where it would reduce efficiency. The default value at preset slow is 4. It is recommended to increase this to --bframes 5 for live action and CGI content, or --bframes 8 for anime and cartoons. Content with little motion benefits more from high B-frames values, but even on anime where there are many still scenes, there is no measurable benefit to using a value higher than 8, and it would just slow down the encoder for no benefit.","s":"bframes","u":"/docs/encoders/x265","h":"#bframes","p":635},{"i":650,"t":"--sao, --limit-sao, --no-sao SAO stands for Sample Adaptive Offset, and is a loop filter used by x265 to prevent artifacting. However, it has the side effect of losing sharpness on details. It is recommended to leave this on (default) at high CRF values (>=22). For medium values between 17-21, you can use --limit-sao which will limit the effects of SAO to have less of a significant effect. For low CRF values (<=16), you can safely use --no-sao to prefer detail preservation, as the higher bitrates will naturally lead to fewer artifacts.","s":"SAO","u":"/docs/encoders/x265","h":"#sao","p":635},{"i":652,"t":"--deblock Deblock is another loop filter, this one intended to reduce blocking in videos, but may have a blurring effect at high strengths. For most encodes, it is fine to leave this at the default value. At lower CRF values, it may be desirable to lower this to --deblock -1:-1 for anime or --deblock -2:-2 for live action, in order to preserve more grain and detail.","s":"Deblock","u":"/docs/encoders/x265","h":"#deblock","p":635},{"i":654,"t":"--psy-rd and --psy-rdoq The parameters control psychovisual rate distribution. What this means is the redistribution of bits to make a video more pleasing to human eyes. These options may be harmful to metrics that compare videos mathematically, but are better for viewing human eyes because they prioritize facets of the video that humans prefer. --psy-rd biases toward matching the level of energy in the source image, which makes it good for retaining detail. For standard anime, it is recommended to use --psy-rd 1.0. The more grain, detail, and dark scenes in a source, the higher this should be raised. Many modern anime tends to have more detailed backgrounds and surfaces, so --psy-rd 1.5 may be a better default for modern anime. For live action, a --psy-rd 1.5 or possibly even 2.0 may be preferred, as live action naturally has more detail and grain than anime. --psy-rdoq biases toward energy in general, which makes it key for preserving grain. --psy-rdoq 1.0 is a safe default for anime. Like psy-rd, this value should be increased more for sources with more grain. For grainy anime, --psy-rdoq 2.0 or even 3.0 can be preferable. Likewise, for many live action series, a default of --psy-rdoq 3.0 can be preferable, or even 4.0 with heavy grain. These are two settings that should be tweaked according to the source material.","s":"Psy-RD","u":"/docs/encoders/x265","h":"#psy-rd","p":635},{"i":656,"t":"--aq-mode 3 --aq-strength Adaptive quantization, shortened to AQ, is a mechanism to redistribute bitrate within a frame to improve visual quality by reducing artifacts. x265 has several different AQ modes, and --aq-mode 3 is nearly always best, because this mode adds a bias favoring dark scenes, which greatly reduces the effects of banding and blocking. The strength of AQ can also be set with --aq-strength. The optimal setting for this may vary depending on the type of content you are encoding. For anime, --aq-strength 0.7 will typically produce good results. For live action, a slightly higher 0.8 may be a better default. Higher values, up to --aq-strength 1, can be helpful for sources with heavy grain, although this will also increase overall bitrate.","s":"Adaptive Quantization","u":"/docs/encoders/x265","h":"#adaptive-quantization","p":635},{"i":658,"t":"--no-cutree CU-Tree is a mechanism very similar to MB-Tree in x264, which is intended to redistribute bitrate in a more optimal psychovisual manner. However, many people find CU-Tree to be harmful to quality, especially when attempting to encode videos with considerable amounts of grain, and therefore many people recommend disabling this with --no-cutree.","s":"CU-Tree","u":"/docs/encoders/x265","h":"#cu-tree","p":635},{"i":660,"t":"VVenC is an open source command line application for encoding H.266/VVC written in C++ and developed by Fraunhofer Heinrich-Hertz-Institute (HHI).","s":"VVenC","u":"/docs/encoders/VVenC","h":"","p":659},{"i":662,"t":"Linux & macOS Windows Arch Linux users may check the AUR for the vvenc and vvenc-git packages. Compilation from source as shown belowe requires CMake. git clone https://github.com/fraunhoferhhi/vvenc.git cd vvenc mkdir build cd build cmake .. -DCMAKE_BUILD_TYPE=Release cmake --build . Binaries will be available in bin/release-static/ FFmpeg Integrationβ Since 15th June 2024, you can officially compile your own FFmpeg binary with libvvenc. If you use earlier FFmpeg revisions you will need to apply a custom patch. A comprehensive tutorial can be found in the official VVenC wiki (archive). A VVC decoder has been integrated into FFmpeg in early 2024, FFmpeg 7.0 is the first release to support it. However if you still want to compile FFmpeg with the vvdec decoder, you can refer to the official VVenC wiki (archive). There is an FFmpeg fork called FFmpeg-VVC. It is maintained by Marten Martin Eesmaa, who is the author of the VVCEasy GitHub repo featuring guides and more general information about working with VVC. Please be aware the script below produces a build of FFmpeg licensed under LGPL version 2.1 or later. It is legal to distribute the resulting binary, given you do not interfere with the flags provided below. Build FFmpeg with libvvdec, libvvenc, and fdk-aac brew install libxml2 ffmpeg nasm # macOS-only; if on Linux, use your native package manager. Package names may differ. git clone https://github.com/fraunhoferhhi/vvenc git clone https://github.com/fraunhoferhhi/vvdec git clone https://github.com/mstorsjo/fdk-aac cd vvenc && mkdir build && cd build cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local .. sudo cmake --build . --target install -j $nproc cd ../../ cd vvdec && mkdir build && cd build cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local .. sudo cmake --build . --target install -j $nproc cd ../../ cd fdk-aac && ./autogen.sh && ./configure make -j sudo make install cd ../ git clone --depth=1 https://github.com/MartinEesmaa/FFmpeg-VVC cd FFmpeg-VVC export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig ./configure --enable-libfdk-aac --enable-libvvenc --enable-libvvdec --enable-static --enable-pic --enable-libxml2 --pkg-config-flags=\"--static\" --enable-sdl2 make -j Binaries will be available in the final directory you end up in after the build process is complete. To be filled. If you believe you can help, see our Contribution Guide.","s":"Installation","u":"/docs/encoders/VVenC","h":"#installation","p":659},{"i":664,"t":"Since 15th June 2024, you can officially compile your own FFmpeg binary with libvvenc. If you use earlier FFmpeg revisions you will need to apply a custom patch. A comprehensive tutorial can be found in the official VVenC wiki (archive). A VVC decoder has been integrated into FFmpeg in early 2024, FFmpeg 7.0 is the first release to support it. However if you still want to compile FFmpeg with the vvdec decoder, you can refer to the official VVenC wiki (archive). There is an FFmpeg fork called FFmpeg-VVC. It is maintained by Marten Martin Eesmaa, who is the author of the VVCEasy GitHub repo featuring guides and more general information about working with VVC. Please be aware the script below produces a build of FFmpeg licensed under LGPL version 2.1 or later. It is legal to distribute the resulting binary, given you do not interfere with the flags provided below. Build FFmpeg with libvvdec, libvvenc, and fdk-aac brew install libxml2 ffmpeg nasm # macOS-only; if on Linux, use your native package manager. Package names may differ. git clone https://github.com/fraunhoferhhi/vvenc git clone https://github.com/fraunhoferhhi/vvdec git clone https://github.com/mstorsjo/fdk-aac cd vvenc && mkdir build && cd build cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local .. sudo cmake --build . --target install -j $nproc cd ../../ cd vvdec && mkdir build && cd build cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local .. sudo cmake --build . --target install -j $nproc cd ../../ cd fdk-aac && ./autogen.sh && ./configure make -j sudo make install cd ../ git clone --depth=1 https://github.com/MartinEesmaa/FFmpeg-VVC cd FFmpeg-VVC export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig ./configure --enable-libfdk-aac --enable-libvvenc --enable-libvvdec --enable-static --enable-pic --enable-libxml2 --pkg-config-flags=\"--static\" --enable-sdl2 make -j Binaries will be available in the final directory you end up in after the build process is complete.","s":"FFmpeg Integration","u":"/docs/encoders/VVenC","h":"#ffmpeg-integration","p":659},{"i":666,"t":"There are two encoders, the simple encoder (vvencapp) and the full-featured expert mode encoder (vvencFFapp) which is based on the VTM configuration scheme. VVenC used to only accept YUV files input until support was added for Y4M. Here are some examples: Standard VVenC input vvencapp -i input.y4m --qp 20 -o output.266 Preset slow + qpa (already default) + YUV420P10 vvencapp -i input.y4m --preset slow --qpa on --qp 20 -c yuv420_10 -o output.266 Piping with FFmpeg ffmpeg -hide_banner -loglevel error -i input.mkv -pix_fmt yuv420p10le -strict -1 -f yuv4mpegpipe - | vvencapp -i - --y4m --preset medium --qpa on --qp 20 -c yuv420_10 -o output.266 FFmpeg preset fast + qp 32 muxing to mp4 ffmpeg -i input.mkv -c:v libvvenc -qp 32 -preset fast out.mp4 info FFmpeg vvenc plugin only supports yuv420p10le output pixel format which means resulting video will always have 10 bit color depth. QPA VVenC by default operates with QP (Quantization Parameter), which is basically fixed quality. For \"CRF-like\" rate control, QPA is enabled by default QPA (provided by --qpa) enables perceptually motivated QP adaptation based on XPSNR. QPA modifies the QP value on the fly spatially and temporally as well as enabling temporal RDO.","s":"Usage","u":"/docs/encoders/VVenC","h":"#usage","p":659},{"i":668,"t":"x266 is an upcoming software encoder for the H.266 / VVC codec. Very little is currently known about the encoder other than the fact it is still being developed by MulticoreWare and it's dedicated FAQ page after the 31st January 2023 Webinar FAQ. According to their FAQ, H2 2023 is their \"very rough and approximate\" ETA for a v1.0 public release, but so far there have been no updates as of 29th March, 2024.","s":"x266","u":"/docs/encoders/x266","h":"","p":667},{"i":670,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor!","s":"Antialiasing","u":"/docs/filtering/antialiasing","h":"","p":669},{"i":672,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor!","s":"Deband","u":"/docs/filtering/deband","h":"","p":671},{"i":674,"t":"x264 is a software library and command line application for encoding H.264 / AVC developed by VideoLAN, the people behind the ever-popular VLC Media Player and released under GNU GPL. It is written in C and Assembly with almost two decades worth of development and threading optimizations which makes it the fastest software video encoder available, which also happens to be extremely popular. x264 has great fine detail retention which makes it perfect for high fidelity content.","s":"x264","u":"/docs/encoders/x264","h":"","p":673},{"i":676,"t":"x264 is available in FFmpeg via libx264, to check if you have it, run ffmpeg -h encoder=libx264. You can input non-FFmpeg standard x264 parameters via -x264-params.","s":"FFmpeg","u":"/docs/encoders/x264","h":"#ffmpeg","p":673},{"i":678,"t":"x264 supports the following color spaces: Format Chroma Subsampling Supported Bit Depth(s) YUV420P 4:2:0 8-bit YUVJ420P 4:2:0 8-bit (Full range) YUV422P 4:2:2 8-bit YUVJ422P 4:2:2 8-bit (Full range) YUV444P 4:4:4 8-bit YUVJ444P 4:4:4 8-bit (Full range) NV12 Semi-planar 8-bit NV16 Semi-planar 8-bit NV21 Semi-planar 8-bit (reversed) GRAY8 - 8-bit YUV420P10LE 4:2:0 10-bit YUV422P10LE 4:2:2 10-bit YUV444P10LE 4:4:4 10-bit GBRP10LE - 10-bit GRAY10LE - 10-bit","s":"Supported Color Space","u":"/docs/encoders/x264","h":"#supported-color-space","p":673},{"i":680,"t":"Pre-built binary [Recommended]: https://code.videolan.org/videolan/x264 Choose your operating system there, or you can try using your package manager.","s":"Installation","u":"/docs/encoders/x264","h":"#installation","p":673},{"i":682,"t":"x264 has been praised for its simple, no-fuss settings. Here are some examples: Simple raw Y4M input with CRF 20 and raw 264 bitstream output x264 --crf 20 -o output.264 input.y4m Preset slow, CRF 20, Y4M input x264 --preset slow --crf 20 -o output.264 input.y4m These next couple of examples utilize FFmpeg to pipe video into x264. FFmpeg piping ffmpeg -v error -i input.mkv -f yuv4mpegpipe -strict -1 - | x264 --preset slow --crf 20 --demux y4m - -o output.264 FFmpeg piping, MKV output ffmpeg -v error -i input.mkv -f yuv4mpegpipe -strict -1 - | x264 --preset slow --crf 20 --demux y4m - -o output.mkv Output Containers x264 can output 264, MKV, FLV (Flash Video), and MP4 (If compiled with GPAC or L-SMASH support). For more information about what containers are, see the \"Terminology\" section on containers. caution x264 will use Haali Matroska Muxer for MKV outputs, which has seeking issues. It is recommended to remux back using FFmpeg or mkvmerge/MKVToolNix.","s":"Usage","u":"/docs/encoders/x264","h":"#usage","p":673},{"i":684,"t":"As x264 is made to \"just work\", there aren't many advanced parameters to modify. The general guideline is to encode as slowly as you can tolerate.","s":"Recommendations","u":"/docs/encoders/x264","h":"#recommendations","p":673},{"i":686,"t":"--preset veryslow The most obvious way to increase fidelity per bit is to allow the encoder to spend more effort, and therefore time, encoding. This preset is decently slow, but preset placebo is even slower.","s":"Preset","u":"/docs/encoders/x264","h":"#preset","p":673},{"i":688,"t":"--threads X This is set to 1.5x the number of available cores by default, and shouldn't need to be adjusted unless you need to reduce the number of threads for some reason. x264's threading is very efficient, and in most cases, x264 should be able to completely saturate most consumer CPUs.","s":"Threads","u":"/docs/encoders/x264","h":"#threads","p":673},{"i":690,"t":"--open-gop Enables Open GOP (Group of Pictures), where each GOP can reference one another, thus improving compression with little speed loss. For unknown reasons it is disabled by default in x264.","s":"Open GOP","u":"/docs/encoders/x264","h":"#open-gop","p":673},{"i":692,"t":"--aq-mode 3 In short, will make x264 bias to dark areas and spend more bitrate there, thus dark scenes will look less bad. Basically no speed loss.","s":"AQ Mode","u":"/docs/encoders/x264","h":"#aq-mode","p":673},{"i":694,"t":"--bframes 8 --ref 12 These parameters are responsible for the amount of reference frames x264 will use for compression, the more the better. Maximum of 16, will definitely increase compute time the higher you go.","s":"Reference Frames","u":"/docs/encoders/x264","h":"#reference-frames","p":673},{"i":696,"t":"--no-mbtree This option disables mb-tree rate control. mb-tree rate control is intended to redistribute bitrate to give more bits to frames that have less motion, and less bits to frames that have more motion (because artifacts in those frames will be less noticeable when the video is playing). However, many users claim this can have negative effects on video quality, especially when encoding videos that have significant amounts of grain.","s":"MB Tree","u":"/docs/encoders/x264","h":"#mb-tree","p":673},{"i":698,"t":"x264 can also encode lossless video, allowing it to compete with lossless video codecs like FFV1 and UT Video. To encode lossless video, use --qp 0. Slower presets will decrease the size even further while the video remains lossless. However, when space is not a concern, such as when encoding an intermediate lossless for later encoding to a lossy format, x264 can become the fastest lossless codec available, providing super fast encoding and decoding. For the absolute fastest encoding and decoding, one can use both --preset ultrafast and --tune fastdecode. However, --preset superfast still provides incredibly fast encoding and decoding speed, with 20-30% space savings for lossless compared to ultrafast, so it may be the ideal choice for many users. Why QP 0 instead of CRF 0? CRF automatically adjusts a number of quantization parameters to achieve a desired quality output. QP stands for Quantization Parameter, and allows full control over the resulting video quality. In this case, all-intra or lossless, using QP is necessary, but for lossy encoding CRF will produce better visual fidelity per bit.","s":"Lossless Encoding","u":"/docs/encoders/x264","h":"#lossless-encoding","p":673},{"i":701,"t":"Multimedia encoding & the digital compression space is an incredible field that many tech enthusiasts, professionals, & laymen have no easy entry point to. Wikipedia has a vast amount of information on many of the individual topics covered here but doesn't offer a cohesive way to engage with the entire sphere of knowledge as a whole. While this site started as a lighthearted guide (you'll see the remnants of this strewn about the various wiki entries), it has quickly become an endeavor to unite digital compression aficionados to make the knowledge more accessible for all.","s":"Why are you doing this?","u":"/docs/FAQ","h":"#why-are-you-doing-this","p":699},{"i":703,"t":"While this is true, this is easier said than done. Multimedia Wiki is not as active as it used to be, & a new effort makes sense to carry past efforts forward. guide.encode.moe is stagnant and mostly focused on fansub, docs & personal experiences that are scattered around the Internet. There are sources littered about that explain pieces of the larger puzzle, but these serve as small drops in a bucket of vast incoherency & don't meaningfully remedy the steep learning curve for understanding multimedia compression without background knowledge.","s":"But alternatives exist. Why not contribute there?","u":"/docs/FAQ","h":"#but-alternatives-exist-why-not-contribute-there","p":699},{"i":705,"t":"See our Contribution Guide page in the sidebar.","s":"How do I get started as a contributor?","u":"/docs/FAQ","h":"#how-do-i-get-started-as-a-contributor","p":699},{"i":707,"t":"This wiki is mostly going to be focused on multimedia compression, & the term \"Codec\" is already widely recognized & understood. While other topics like video filtering & general compression algorithms may be covered, the main focus remains multimedia compression.","s":"Why \"Codec Wiki\"?","u":"/docs/FAQ","h":"#why-codec-wiki","p":699},{"i":709,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor!","s":"Decombing","u":"/docs/filtering/decombing","h":"","p":708},{"i":711,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor!","s":"Deinterlace","u":"/docs/filtering/deinterlace","h":"","p":710},{"i":714,"t":"In order to do filtering, we are going to need some filters. Vapoursynth includes some basic filters, but there are a lot more useful filters built by the community that can be incredibly useful to us. For example, we are going to make use of some of the functions in vs-tools. If you are on Arch, the recommended way to install this is via the AUR, using the vapoursynth-plugin-vstools-git package. For other operating systems, it is simpler to install all of the JET plugins at once, using the vs-jet utility. Note that these do require Python, which you should already have installed if you've installed Vapoursynth. You may have noticed in the previous section that all Vapoursynth scripts use Python syntax. It is not necessary to know Python in order to write Vapoursynth scripts, although it can be helpful for some more advanced techniques.","s":"Intro to Filters","u":"/docs/filtering/basics","h":"#intro-to-filters","p":712},{"i":716,"t":"Order matters when applying filters to a video. There are certain actions that must be taken at the very start, and certain actions that are better to be done later in a filter chain. The following image shows a recommended order for many of the most common filter actions: If this seems a bit complicated at first, that's okay. We will learn about each of these steps one at a time, and when each of these filters might be needed.","s":"Filter Order","u":"/docs/filtering/basics","h":"#filter-order","p":712},{"i":718,"t":"In most cases, the first action we want to take on a video is to ensure the correct matrix, primaries, and transfer characteristics are set, and convert the video to 16-bit. The colorimetry properties are important for ensuring accurate colors, and 16-bit helps filters to work in higher quality and create fewer artifacts. The currently recommended way to handle this is with the vstools.initialize_clip function. This function will handle both ensuring that colorimetry is set, and converting to 16-bit with appropriate dithering if needed. We can add it to a script in the following way: import vapoursynth as vs core = vs.core clip = core.lsmas.LWLibavSource(source=\"myinput.mkv\") import vstools clip = vstools.initialize_clip(clip) clip.set_output(0) If the video has existing colorimetry data set on it, initialize_clip will keep the existing data. Otherwise, it will assume colorimetry based on the video resolution. Usually, those assumptions will be correct. However, there may be cases where we know that the assumptions are incorrect, and we need to set them manually. For example, we may have a 4k video that we know was upscaled from a 1080p video, and we want to force BT.709 colorimetry. In that case, we can specify the properties to initialize_clip, and it will use the properties we specify instead of making guesses. from vstools import Primaries, Matrix, Transfer clip = vstools.initialize_clip(clip, primaries=Primaries.BT709, matrix=Matrix.BT709, transfer=Transfer.BT709) initialize_clip also has a companion function called finalize_clip which will, by default, convert the video down to 10-bit with dithering, and if the video is in limited color range, clip all pixels to fit within the limited color range (this would be done by the player on playback, but if we do it now, we can save a few bits for the encoder). The function also takes a bits parameter in case you would like to finalize to 8-bits instead. As a result, a very basic template for a Vapoursynth script would be something like this: import vapoursynth as vs core = vs.core clip = core.lsmas.LWLibavSource(source=\"myinput.mkv\") import vstools clip = vstools.initialize_clip(clip) // // Add filters inside this section // clip = vstools.finalize_clip(clip) clip.set_output(0)","s":"Bit Depth and Colorimetry","u":"/docs/filtering/basics","h":"#bit-depth-and-colorimetry","p":712},{"i":720,"t":"Sometimes, you may encounter a source that has black bars on the sides or on the top and bottom. Generally, we don't like to keep these, and want to remove them. This process is called \"cropping\". First, we need to figure out how many pixels to crop from each side. Fortunately, vs-preview has a tool to make this easy. Using this tool, which is found under the \"Misc\" section in the bottom right of the app, allows us to experiment with crop values and find the correct ones without having to reload the Vapoursynth script. Once we flip the tool to \"On\", we can adjust each of the dimensions until we see the black borders disappear. We want to try to get as exact as possible, although the crop values must be a multiple of 2 when working with YUV420 video (which is the most common). In this case, we know we want to remove from the top and bottom, so we will adjust those values until we find the right ones. Note that the size of the bars may often be the same on both sides, but not always, so do be careful to check both sides. vs-preview's zoom tool shown here can also be helpful to ensure we've gotten the right values. Once we've found the correct values, we can click \"Copy cropping command\" to copy our value to the clipboard. We can then paste it into our Vapoursynth script, being sure to add the appropriate variable name for our clip. clip = clip.std.Crop(0, 0, 138, 138) Remember to turn off the crop tool in vs-preview, or else we'll be doubling the crops from here on. Now, go ahead and reload the preview with Ctrl+R. No more black bars.","s":"Cropping","u":"/docs/filtering/basics","h":"#cropping","p":712},{"i":722,"t":"Resizing is the process of changing the resolution of a video, whether that's making it bigger or smaller. Note that although the general idea is that \"higher resolution is better\", we generally don't want to upscale content. This simply makes output videos larger in file size, but doesn't result in an increase in quality, because the higher resolution is artificial. However, there are times when you may want or need to resize a video. Unfortunately, this is not as simple as it sounds, because we want to select a high-quality resizing method. With this section, we'll introduce slow.pics, a very useful tool for comparing screenshots. Here, we'll be using it to show the differences between various resizers. Vapoursynth comes with several resizers built-in, which will be the easiest to use. Of these, we'll be showcasing the following: clip = clip.resize.Bicubic(1920, 1080) clip = clip.resize.Lanczos(1920, 1080) clip = clip.resize.Spline36(1920, 1080) Bicubic is the most widely used resizer. It is very fast and produces decent quality, but may produce less sharpness than more advanced scalers. Lanczos produces more sharpness, but may also produce more ringing. Spline36 uses a spline-based resizing method, and produces results somewhere between Bicubic and Lanczos. However, using plugins, we have access to some more advanced scalers. clip = vskernels.Catrom().scale(clip, 1920, 1080) clip = vskernels.Catrom(sigmoid=True).scale(clip, 1920, 1080) clip = vskernels.Hermite(linear=True).scale(clip, 1920, 1080) clip = vsscale.SSIM().scale(clip, 1920, 1080) Here we will look at downscalers. Catrom, short for Catmull-Rom, is a variant of Bicubic which uses different parameters in order to achieve a better balance between sharpness and ringing. It is often considered the best of the Bicubic kernels for image quality. In the second example, you can see that we pass an additional sigmoid=True parameter to Catrom. This performs the scaling using a sigmoid function, which flattens the ends of the scaling curve to reduce ringing. The third example here, Hermite, is another variant of Bicubic, and we are passing linear=True to perform the rescaling in linear light rather than in gamma light. This kernel is very good in regard to not producing ringing, but it does have the effect of thinning line art. Therefore, although some users prefer it, other users dislike it. Fourth, we have SSIM, which is a downscaler tuned around the SSIM video quality metric. This downscaler is intended to prioritize preserving detail and sharpness when downscaling. In this comparison, we can see the differences between the different scalers, on an image that was upscaled to 4k by nnedi3, then downscaled back using the scaler noted in the image title. Here we have a couple of other examples comparing in particular the higher-quality scalers. Of these, my current recommendation would be Catrom(sigmoid=True) for anime, and SSIM for live action. Some users may prefer Hermite(linear=True) for anime. For upscaling, we have the following high quality options: clip = vskernels.EwaLanczos().scale(clip, 3840, 2160) clip = vsaa.Nnedi3().scale(clip, 3840, 2160) clip = vsscale.Waifu2x().scale(clip, 3840, 2160) Ewa-Lanczos is a variant of lanczos intended for high sharpness, making it good for upscaling, where we want an image to look sharper so that it does not appear upscaled. Nnedi3 is a neural-network-based upscaler which attempts to interpolate every second line to produce a sharp, upscaled image. It is generally safe for use on all content, and produces good results. Waifu2x is another AI-assisted upscaler, this one tuned specifically on anime. It produces very high quality and sharp images. However, the results of using it on live action content are unknown, so it is recommended to only use it on anime. Here we can see the effects of these upscalers. My current recommendation would be EwaLanczos for live action, and Waifu2x for anime. clip = soifunc.good_resize(clip, 1920, 1080) clip = soifunc.good_resize(clip, 1920, 1080, anime=True) Lastly, we have soifunc.good_resize, which is a smart hybrid scaler which automatically chooses the best scaler depending on whether we are upscaling or downscaling, and may use a different scaler for chroma than it does for luma for even better quality. Note that the scalers chosen by this function may change over time as new knowledge surfaces, and are based on the above recommendations. We can also add the anime=True parameter (false by default) to change the scalers to be tuned more toward the fine line art that is present in anime, rather than the highly detailed elements such as skin and hair that are present in live action. With these features, it is intended to be an easy-to-use general purpose high-quality upscaler.","s":"Resizing","u":"/docs/filtering/basics","h":"#resizing","p":712},{"i":724,"t":"Sometimes, we may need to cut out a portion of the video, for example, if we want to encode only the opening theme, or if our source has commercials that we want to remove. Vapoursynth supports this using Python's slice syntax. clip = clip[1805:3185] This snippet will trim the clip to include frames 1805-3184. Note that frame 3185 is not included. Now, let's say we wanted to exclude this segment. We can do this by making clips of the segments we'd like to keep, and joining them together. clip = clip[:1805] + clip[3185:] This snippet will create trims for the start of the clip to and including frame 1804, and from frame 3185 to the end of the clip. It then joins them together into one clip, in the order we specify. If we wanted to reverse the segments, so that segment 2 comes first, we could do that simply by changing the order: clip = clip[3185:] + clip[:1805]","s":"Trimming","u":"/docs/filtering/basics","h":"#trimming","p":712},{"i":726,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor!","s":"Dehalo","u":"/docs/filtering/dehalo","h":"","p":725},{"i":728,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor!","s":"Denoise","u":"/docs/filtering/denoise","h":"","p":727},{"i":730,"t":"Denoising involves removing random noise from the video. Such noise can result from film grain, signal interference or simply low light conditions. In any case, noise can greatly reduce compression efficiency especially if the video codec doesn't support film grain synthesis. In FFmpeg there are two filters available for denoising: hqdn3d nlmeans","s":"Overview","u":"/docs/filtering/denoise","h":"#overview","p":727},{"i":732,"t":"hqdn3d is a fast, high quality 3d denoising filter which improves compressibility. Can be applied to images and videos.","s":"hqdn3d","u":"/docs/filtering/denoise","h":"#hqdn3d","p":727},{"i":734,"t":"ffmpeg -i input.mp4 -vf hqdn3d output.mp4 The default configuration should be fine for most use cases. If you still see too much noise you can adjust the luma_spatial parameter (other parameters are derived from it by default). Higher luma_spatial value will result in stronger denoising. By default it is set to 4. ffmpeg -i input.mp4 -vf hqdn3d=8 output.mp4 # which is the same as ffmpeg -i input.mp4 -vf hqdn3d=8:6:12:9 output.mp4 caution Setting luma_spatial to larger values could result in ghosting and banding artifacts. For description of all four parameters take a look here.","s":"Usage","u":"/docs/filtering/denoise","h":"#usage","p":727},{"i":736,"t":"nlmeans uses Non-Local Means algorithm to do denoising. Each pixel is compared to similar pixels based on their surroundings (context). The size of such context is expressed as rxr. The filter is rather slow and doesn't parallelize well. Only use it in cases the video contains a lot of noise or you need very high quality denoising. In all other cases hqdn3d will be more efficient.","s":"nlmeans","u":"/docs/filtering/denoise","h":"#nlmeans","p":727},{"i":738,"t":"ffmpeg -i input.mp4 -vf nlmeans output.mp4 The default configuration should be fine for most use cases. ffmpeg -i input.mp4 -vf nlmeans=s=3.0:r=31:p=15 output.mp4 Stronger denoising with larger research and patch size. Might be useful for ultra high quality denoising in 4K+ resolutions but you might struggle to achieve even 0.1 fps. ffmpeg -i input.mp4 -vf nlmeans=s=1.0:r=5:p=3 output.mp4 Prioritize speed over quality.","s":"Usage","u":"/docs/filtering/denoise","h":"#usage-1","p":727},{"i":740,"t":"s - Denoising Strength where 1.0 is the lightest and also the default and the strongest is 30.0 although I wouldn't recommend going above 10.0. r - Research Size where 15 is the default, it must be an odd number ranging from 0 to 99. The higher the value, the slower denoising will be. p - Research Size where 7 is the default and, it must be an odd number ranging from 0 to 99. For description of all possible parameters take a look here.","s":"Parameters","u":"/docs/filtering/denoise","h":"#parameters","p":727},{"i":742,"t":"hqdn3d may create visual artifacts like ghosting, banding and blocking nlmeans creates much less noticeable artifacts like cartoonish look but only for very noisy inputs","s":"Notes","u":"/docs/filtering/denoise","h":"","p":727},{"i":744,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor!","s":"Inverse Telecine","u":"/docs/filtering/ivtc","h":"","p":743},{"i":746,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor!","s":"Stabilizing","u":"/docs/filtering/stabilizing","h":"","p":745},{"i":748,"t":"Stabilizing is the process of reducing unwanted camera movement and shakes in video clips using FFmpeg. This improves overall encoding efficiency by minimizing unpredictable global movement, such as that from handheld cameras. The recommended method for stabilizing videos with FFmpeg is to use the VidStab library, which requires a build of FFmpeg compiled with --enable-libvidstab. VidStab offers two filters within FFmpeg: ffmpeg -hide_banner -filters | grep vidstab ... vidstabdetect V->V Extract relative transformations, pass 1 of 2 for stabilization (see vidstabtransform for pass 2). ... vidstabtransform V->V Transform the frames, pass 2 of 2 for stabilization (see vidstabdetect for pass 1). The vidstabdetect filter is used in the first pass to generate a video transformations file (.trf), while vidstabtransform is employed in the second pass to apply those transformations.","s":"Overview","u":"/docs/filtering/stabilizing","h":"#overview","p":745},{"i":750,"t":"To stabilize a video using default parameters, follow these two steps: ffmpeg -i input.mp4 -vf vidstabdetect -f null - ffmpeg -i input.mp4 -vf vidstabtransform output.mp4 After running the first command, a transforms.trf file will be created in the directory where you executed FFmpeg. Once the stabilization process is complete, you can safely delete this file. The resulting output.mp4 video will have reduced shakiness. For stabilizing high-framerate videos with strong camera movement: ffmpeg -i input.mp4 -vf vidstabdetect=shakiness=8:result=a.trf -f null - ffmpeg -i input.mp4 -vf vidstabtransform=smoothing=30:zoom=-5:input=a.trf output.mp4 tip Remember to set appropriate video/audio codec parameters in the command before output.mp4. You must not use -c:v copy, as the video will undergo transformations.","s":"Usage","u":"/docs/filtering/stabilizing","h":"#usage","p":745},{"i":752,"t":"result - Sets the output .trf file location shakiness - Adjusts movement reduction, with 1 being the least and 10 the most reduction (highest stabilization). Default is 5. accuracy - Controls movement reduction accuracy. Lower values use less CPU but may be less accurate. FFmpeg's minimum allowed value is 3. Processing speed was approximately 21 fps at 3 and 14 fps at 15. For a complete list of parameters, refer to the vidstabdetect documentation.","s":"vidstabdetect Parameters","u":"/docs/filtering/stabilizing","h":"#vidstabdetect-parameters","p":745},{"i":754,"t":"input - Specifies the input .trf file created by vidstabdetect smoothing - Determines the number of frames considered for future and past movement estimation. Default is 10. zoom - Adjusts the zoom percentage, with 0% being the default. Negative values create a zoom-out effect. interpol - Sets the type of interpolation used: no - No interpolation linear - Only horizontal bilinear - Faster but may result in blurry output (default) bicubic - Slower See the vidstabtransform documentation for more details.","s":"vidstabtransform Parameters","u":"/docs/filtering/stabilizing","h":"#vidstabtransform-parameters","p":745},{"i":756,"t":"Stabilization is a lossy process that can reduce video quality due to zoom and interpolation effects. Some users may notice overall wobbliness in stabilized videos, especially at higher stabilization levels. This is an inherent characteristic of this filter. Depending on your use case, consider employing two-pass encoding along with these stabilization steps.","s":"Notes","u":"/docs/filtering/stabilizing","h":"#notes","p":745},{"i":758,"t":"VapourSynth is an application for video manipulation. Or a plugin. Or a library. Itβs hard to tell because it has a core library written in C++ and a Python module to allow video scripts to be created. Fredrik Mellbin, creator of VapourSynth","s":"Vapoursynth","u":"/docs/filtering/vapoursynth","h":"","p":757},{"i":760,"t":"In the realm of video processing, one will frequently encounter media with various quality issues. These can range from minor imperfections to significant degradation, including: Excessive film grain or noise that significantly increases bitrate due to its unpredictable nature Visible banding artifacts Unwanted halos around objects Interlacing issues in older, unrestored footage Telecine artifacts from improper film-to-video conversion And more, on our video artifacts page. To address these challenges, video filtering techniques are employed. Currently, there are three primary software frameworks used for video filtering: FFmpeg VapourSynth AviSynth VapourSynth is designed as a 21st-century upgrade and rewrite of AviSynth, which was originally created by Ben Rudiak-Gould, Edwin van Eggelen, Klaus Post, Richard Berg, and Ian Brabham in May 2000. One of the most attractive features of this complete rewrite is its improved multithreading capability, an area where AviSynth struggled due to its aging infrastructure. Some longtime AviSynth users are reluctant to switch to VapourSynth, preferring to stick with a familiar workflow. There is nothing wrong with this preference, as both tools have their merits in video processing; that being said, the Codec Wiki's Filtering section focuses primarily on VapourSynth (and occaisonally FFmpeg). It is important to note that working with VapourSynth requires a basic understanding of Python, as the filtering process involves scripting.","s":"Introduction","u":"/docs/filtering/vapoursynth","h":"","p":757},{"i":763,"t":"At the time of writing, Python 3.12 is required. This will change in the future so consult from their website Download the installer (.exe) unless you require portability Install it","s":"Microsoft Windows","u":"/docs/filtering/vapoursynth","h":"#microsoft-windows","p":757},{"i":765,"t":"Currently, Arch is the best Linux distribution for working with Vapoursynth due to the fact that the vast majority of filters and plugins are available in the AUR. This makes installing and updating filters easy. If you are not already, we recommend using an AUR helper such as paru or yay. To install vapoursynth, simply install the vapoursynth package from the official repositories using pacman or your preferred AUR helper. Plugins are all prefixed with vapoursynth-plugin-, such as vapoursynth-plugin-lsmashsource-git, and as such can be discovered easily.","s":"Arch Linux","u":"/docs/filtering/vapoursynth","h":"#arch-linux","p":757},{"i":767,"t":"Vapoursynth is supported on all Linux distributions. Installation methods may vary by distribution. Contributions would be helpful to provide instructions for more distributions.","s":"Other Linux","u":"/docs/filtering/vapoursynth","h":"#other-linux","p":757},{"i":769,"t":"There are currently two leading previewers for Vapoursynth. If you want to preview your scripts with capabilities such as seeking, you will need to use one of these applications. The first is YomkioR's Vapoursynth Editor, which includes a built-in code editor alongside a video previewer. This makes it extremely easy to set up for users who are new to Vapoursynth. The second is JET's fork of vs-preview, which is a standalone previewer with utilities for tasks such as cropping, screenshotting, and uploading comparisons. This tool is more advanced than Vapoursynth Editor, but does not include an editor, so you will need to pair it with an editor such as Visual Studio Code. The Github for vs-preview includes instructions for setting this up.","s":"Previewing","u":"/docs/filtering/vapoursynth","h":"#previewing","p":757},{"i":771,"t":"Vapoursynth provides a command-line utility called vspipe for outputting filtered video. Using this utility to pipe y4m video is the most common way to use Vapoursynth with an encoder. For example, the following command would pipe the output from a Vapoursynth script into x264: vspipe -c y4m input.vpy - | x264 --demuxer y4m -o output.mkv -","s":"Output","u":"/docs/filtering/vapoursynth","h":"#output","p":757},{"i":773,"t":"For Vapoursynth to produce output, it has to load a video in some way. This way is with source filters. The most basic method is using BlankClip to create a clip of a certain resolution and frame rate. For example, the following script would give us a blank clip at 640x480: import vapoursynth as vs core = vs.core clip = core.std.BlankClip(width=640, height=480) clip.set_output(0) But a plain black video isn't very useful, is it? We want to load real videos so we can do filtering on them. For this, there are a few different source filters we can look at.","s":"Source Filters","u":"/docs/filtering/vapoursynth","h":"#source-filters","p":757},{"i":775,"t":"LSmashSource is a source filter using lsmash as the underlying source library. It is the most commonly used source filter, and is generally reliable for most source formats, though may have frame accuracy issues when seeking with certain input formats, such as VC-1. If this is a concern, it can be recommended to encode to lossless first before using any encoding methods that require seeking, such as av1an. Here is an example of loading a video file using LSmashSource: import vapoursynth as vs core = vs.core clip = core.lsmas.LWLibavSource(source=\"input.mkv\") clip.set_output(0)","s":"LSmashSource","u":"/docs/filtering/vapoursynth","h":"#lsmashsource","p":757},{"i":777,"t":"ffms2 is a source filter based on ffmpeg. It generally should give the same results as LSmashSource, especially when using the git version which has fixed support for newer formats such as AV1. If having issues with a source file with LSmashSource, ffms2 can be a good fallback to try. import vapoursynth as vs core = vs.core clip = core.ffms2.Source(source=\"input.mkv\") clip.set_output(0)","s":"ffms2","u":"/docs/filtering/vapoursynth","h":"#ffms2","p":757},{"i":779,"t":"BestSource is also based on ffmpeg, but uses additional techniques to ensure frame accuracy in all scenarios with all input formats. The downside is that it must decode the entire video during indexing, which means the first load of a given video will take longer. Subsequent loads will be quick, because BestSource caches the index in a file. import vapoursynth as vs core = vs.core clip = core.bs.VideoSource( source=\"input.mkv\", cachepath=\"/\" ) clip.set_output(0) We add the cachepath=\"/\" setting to emulate the behavior of the other source filters, which is to place the index file next to the source video. This helps with portability and cleaning up after ourselves. The default behavior is to place the index file in a temporary directory local to the machine.","s":"BestSource","u":"/docs/filtering/vapoursynth","h":"#bestsource","p":757},{"i":781,"t":"Under Maintenance The content in this entry is incomplete & is in the process of being completed. AVIF, which stands for AV1 Image File Format, is a newer image codec that is based on the AV1 video codec. AVIF supersedes HEIC, & uses the same HEIF container as HEIC. AVIF is designed to have a better featureset & better general lossy compression than older image codecs, including WebP, HEIC, & JPEG. AVIF is often compared to JPEG-XL, though in practice, the two have very different strengths. There are two AVIF profiles available for encoding: Baseline & Advanced, which are based on AV1's Main & High profiles respectively. The AVIF Baseline profile supports up to 8,192*4,352 resolution specified by the requirement of using AV1 Level 5.1 or lower. Using tiling, it is possible to increase the maximum resolution of the AVIF Baseline profile to 65536*65536, although this hurts coding efficiency as visual anomalies may be encountered along the edges of the tile boundaries. AVIF is also limited to 10 bit color precision in its Baseline profile. In the AVIF Advanced profile, the maximum image dimensions extend to 16,384*8,704. Tiling may be used in the Advanced profile to create larger images, but the same limitations regarding visual artifacts apply. The AVIF Advanced profile extends the allowed AV1 Level to 6.0 or lower, & the highest bit depth offered by this profile is 12 BPC. It is worth noting that while it is currently a near certainty that AVIF implementations will support both the Baseline & Advanced profiles, this may not always be the case. This is a problem that affects HEIC currently, & is a known potential weakness of video-based image codecs.","s":"AVIF","u":"/docs/images/AVIF","h":"","p":780},{"i":783,"t":"Lossless? Poorly Lossy? Yes Supported Bit Depths: 8 BPC, 10 BPC, 12 BPC HDR/Wide Gamut? Yes Animation? Yes Transparency? Yes Progressive Decode? No Royalty Free? Yes Compatible Browsers (full support) Google Chrome 85+ Safari 16.4+ Firefox 113+ Opera 71+ GNOME Web Thorium Mercury","s":"Performance Checklist","u":"/docs/images/AVIF","h":"#performance-checklist","p":780},{"i":786,"t":"AVIF is known for its extremely strong lossy compression performance for non-photographic images as well as photographic images in the low to medium fidelity range. AVIF is consistently better than JPEG visually, except for with complex images that contain a lot of highly entropic data like random noise. AVIF compatibility has grown rapidly since its adoption in Google Chrome in 2020. For a relatively new image format, its level of penetration has been stellar, especially in the browser market. At this point in time, it would be a safe bet to ship AVIF images for your site given they compress better than JPEG & have older formats provided as fallbacks. AVIF's wider featureset enables new experiences through images, including HDR. AVIF also presents astonishing animation prowess, as it is capable of using AV1's inter-frame coding techniques which make it easily the best animated image format for most use cases where it is compatible.","s":"Advantages","u":"/docs/images/AVIF","h":"#advantages","p":780},{"i":788,"t":"AVIF encoding implementations are difficult to use, and images require much longer encoding times for what can be considered competitive quality. Making encoding more difficult, AVIF's use of intra-frame coding techniques that share data between blocks reduces parallelization capability & worsens generation loss. Theoretically, this improves coding efficiency, though. Via the AVIF Encoding section of the aomenc page: AVIF Encoding with aomenc through avifenc Using aomenc through avifenc is widely considered to be the best way to encode AVIF images, as SVT-AV1 only supports 4:2:0 chroma subsampling, rav1e isn't fast enough for still images, & the libaom team have put more effort into intra coding than the teams responsible for producing the other prominent open source AV1 encoders. A sample command for encoding AVIF looks like this: avifenc -c aom -s 4 -j 8 -d 10 -y 444 --min 1 --max 63 -a end-usage=q -a cq-level=16 -a tune=ssim [input] output.avif Where: -c aom is the encoder -s 4 is the speed. Speeds 4 & below offer the best compression quality at the expense of longer encode times. -j 8 is the number of threads the encoder is allowed to use. Increasing this past 12 will sometimes hurt encode times, as AVIF encoding via aomenc doesn't paralellize perfectly. Test using a speed benchmark to verify which value works best for you. -d 10 is the bit depth. Specifying a value below 10 isn't recommended, as it will hurt coding efficiency even with an 8 bit source image. -y 444 is the chroma subsampling mode. 4:4:4 chroma subsampling tends to provide better compression than 4:2:0 with AVIF, though on some images 4:2:0 chroma subsampling might be the better choice. cq-level=16 is how you specify quality. Lower values correspond to higher quality & filesize, while higher values mean a smaller, lower-quality output is desired. This is preceded by -a because it is an aomenc option, not an avifenc one. tune=ssim is how the encoder handles RDO (rate-distortion optimization). This may be redundant with the default aomenc parameters, but specifying doesn't hurt to avoid an unintended change if a default is modified sometime in the future. Additionally, AVIF tends to be underwhelming at high fidelity with photographic images. Compared to older codecs it usually outperforms the competition, but since medium to high fidelity tends to be the target for a lot of modern web delivery, it is disappointing to see AVIF not performing as well here. AVIF also does not have progressive decode. This is a common weakness of video-based image codecs. While there is a hacky way to do progressive AVIF by encoding a low fidelity frame & then a high fidelity frame in an animated AVIF at a high framerate so the low fidelity frame is loaded & plays first, this is far from ideal for the average user & adds to an already burdensome encoding process. Additionally, this has issues with Firefox, which only recently got support for animated AVIF. Finally, AVIF's lossless mode is incredibly underwhelming, often producing larger files than PNG. When compressing losslessly, avoid AVIF entirely.","s":"Limitations","u":"/docs/images/AVIF","h":"#limitations","p":780},{"i":790,"t":"While AVIF is certainly promising, its shortcomings","s":"Conclusion","u":"/docs/images/AVIF","h":"#conclusion","p":780},{"i":792,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! Graphics Interchange Format (GIF) is an image file format first released by CompuServe in 1987. It remains popular due to it's widespread support for animated images despite its obsolete efficency. Other animated image formats like Animated AVIF & Animated WebP have since surpassed GIF in functionality, as has the animated PNG variant APNG.","s":"GIF","u":"/docs/images/GIF","h":"","p":791},{"i":794,"t":"Lossless? Yes Lossy? No Supported Bit Depth: 256 colors HDR/Wide Gamut? No Animation? Yes Transparency? Yes Progressive Decode? No Royalty Free? Yes","s":"Performance Checklist","u":"/docs/images/GIF","h":"#performance-checklist","p":791},{"i":796,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! The HEIC image format, also known as the High Efficiency Image Format, is a newer image codec that was developed to provide improved compression and better performance compared to traditional image formats like JPEG. HEIC files use HEVC internally, meaning the format is not royalty free. While this has limited its adoption across the Web, this format is supported by many modern devices including the entire Apple ecosystem. iPhones shoot HDR HEIC photos by default by utilizing the iPhone's HEVC hardware video encoder to capture these images. Some Android phones are capable of shooting HEIC as well, but these are often transcoded from JPEG. HEIC has largely been surpassed by AVIF, which uses the same container to store AV1-compressed images.","s":"HEIC","u":"/docs/images/HEIC","h":"","p":795},{"i":798,"t":"Lossless? No Lossy? Yes Supported Bit Depths: 8 BPC, 10 BPC Higher bit depths not widely supported HDR/Wide Gamut? Yes Animation? Yes Transparency? Yes Progressive Decode? No Royalty Free? No","s":"Performance Checklist","u":"/docs/images/HEIC","h":"#performance-checklist","p":795},{"i":800,"t":"Under Maintenance The content in this entry is incomplete & is in the process of being completed. Pending Review The content in this entry may not be entirely accurate, & is pending further review to assess the quality of the information. JPEG (Joint Photographic Experts Group) compression is a widely used method for reducing the size of digital images while preserving visual quality. It's based on the principles of lossy compression, which means that some image data is discarded to achieve a smaller filesize.","s":"JPEG","u":"/docs/images/JPEG","h":"","p":799},{"i":802,"t":"Lossless? No Lossy? Yes Supported Bit Depth: 8 BPC HDR/Wide Gamut? Kinda Animation? No Transparency? No Progressive Decode? Yes Royalty Free? Yes","s":"Performance Checklist","u":"/docs/images/JPEG","h":"#performance-checklist","p":799},{"i":804,"t":"Learning how JPEG compresses images is immensely helpful for understanding how other compression methods work in other codecs. It is definitely worth reading to get a useful background in understanding concepts like entropy coding, the DCT, and color spaces other than RGB. Here's a step-by-step explanation of how JPEG compression works: Color Space Conversionβ Most digital images are originally in the RGB (Red, Green, Blue) color space. The first step in JPEG compression is to convert the image to the YCbCr color space. Y represents the luminance (brightness), while Cb and Cr represent the chrominance (color information). The Cb & Cr components are subsampled to a quarter of the resolution of the original image, meaning the resulting color space is chroma subsampled with 4:2:0 subsampling. Image Tilingβ The image is divided into smaller blocks or tiles, typically 8x8 pixels each. Each of these blocks will be processed separately. Discrete Cosine Transform (DCT)β For each 8x8 block, a mathematical transformation called the Discrete Cosine Transform is applied. This transformation converts the pixel values into a set of frequency components, taking spatial data and transforming it to the frequency domain. The DCT is applied to each color channel in the YCbCr color space. This algorithm is a particularly good choice for image (and music/speech) compression because it has high energy compaction relative to our understanding of images & their perceptual quality. High energy compaction means the DCT is able to represent a signal with a small number of significant coefficients, in this case mainly in the lower frequencies. Quantizationβ After the DCT, the frequencies are quantized in a table representing frequency coefficients & their corresponding frequencies. Less perceptually important details can be omitted to reduce filesize by discarding coefficients in the table that correspond to less visually salient frequencies. This is \"lossy\" compression, and is the key step in achieving a high compression ratio while still maintaining an image that looks reasonable. The quantization table used in this step can vary in the number of frequencies it attempts to retain, affecting the trade-off between compression & image quality. Zigzag Scanningβ The quantized coefficients are then reordered using a zigzag pattern. This is done to prepare the data for the next step. Run-Length Encodingβ The zigzag-ordered coefficients are run-length encoded. This means that sequences of zeroes are compressed into a shorter representation. For example, if there are many consecutive zeroes in the data, they can be represented as (0, 10) instead of listing ten individual zeroes. Entropy Encodingβ The run-length encoded data is further compressed using entropy encoding. JPEG uses Huffman coding, which assigns shorter codes to more frequently occurring values in the table of DCT coefficients, reducing the overall file size. Saving the Fileβ The compressed luminance and chrominance data, along with information about color space conversion, quantization tables, and EXIF data, are saved in the JPEG file format. Decodingβ When you open a JPEG image, the reverse process occurs. The file is decoded, and the DCT coefficients are dequantized, the inverse DCT is applied, and the image is converted back to the RGB color space to be displayed on a screen. It's important to note that JPEG compression is lossy, meaning that some image quality is discarded in the pursuit of smaller file sizes. This makes it different than codecs designed for lossless compression like PNG, WebP's lossless mode, and JPEG-XL's lossless mode. The degree of compression and the quality of the compressed image can be adjusted through settings when saving a JPEG, allowing for a trade-off between file size & image fidelity. While JPEG is certainly not the most state of the art lossy image codec compared to its newer and (usually) better successors like JPEG-XL (an actual direct successor) & AVIF, it enjoys near universal compatibility with (likely) most utilities you would work with in your everyday life that have anything to do with images.","s":"Compression","u":"/docs/images/JPEG","h":"#compression","p":799},{"i":806,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! JPEG-2000 is an older image compression format that uses wavelet technology to achieve high compression ratios while maintaining image quality. It supports both lossy and lossless compression, and is commonly used in applications such as digital photography, medical imaging, and video surveillance. JPEG-2000 files can be transparently compressed and decompressed using a variety of software tools and libraries, making it a flexible and widely-supported format for image storage & transmission. JPEG-2000 never effectively took off on the Web, but digital cinema distribution is often done with JPEG-2000. A \"DCP\" is a \"Digital Cinema Package,\" which is a format used to distribute and play back digital movies in theaters. These DCPs are often compressed losslessly with JPEG-2000.","s":"JPEG 2000","u":"/docs/images/JPEG2000","h":"","p":805},{"i":808,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor!","s":"Graining","u":"/docs/filtering/graining","h":"","p":807},{"i":810,"t":"QOI (Quite OK Image Format) is an image compression format that aims to provide a simple, fast, and efficient way to compress and decompress images losslessly. It was designed to be easy to implement while offering better compression ratios than the widely used but more complex PNG format while achieving much faster encoding & decoding speeds.","s":"QOI","u":"/docs/images/QOI","h":"","p":809},{"i":812,"t":"Lossless? Yes Lossy? No Supported Bit Depths: 8 BPC HDR/Wide Gamut? No Animation? No Transparency? Yes Progressive Decode? No Royalty Free? Yes","s":"Performance Checklist","u":"/docs/images/QOI","h":"#performance-checklist","p":809},{"i":814,"t":"QOI compression is based on a simple and fast algorithm that exploits spatial redundancy in images. The algorithm uses a combination of run-length encoding (RLE), a small lookup table, delta encoding, and full-color pixel storage to achieve efficient compression. Depending on the algorithm's decision, a chunk (pixel) can take up one to five bytes. The QOI format supports images with 3 or 4 channels (RGB or RGBA) and 8 bits per channel. The format supports two colorspaces: Linear RGB & sRGB with linear alpha. These do not affect the way pixels are encoded. Here is a breakdown of the various chunk types in QOI: QOI_OP_RGB: Full RGB pixel value using 8 bits (1 byte) for each of the red, green, and blue channels. The alpha channel is 255 in RGB images, and always remains unchanged. QOI_OP_RGBA: Full RGBA pixel value using 8 bits for each of the red, green, blue, & alpha channels. QOI_OP_DIFF: The difference between the current pixel and the previous pixel for the red, green, and blue channels are stored using 2 bits for each channel. The differences are stored with a bias of 2 and wrap (so 1 minus 2 would be 255). The alpha channel remains unchanged. QOI_OP_LUMA: These pixels encode the green channel difference from the previous pixel using 6 bits, and then encode the red and blue channel differences relative to the green channel difference using 4 bits each. This allows for more efficient encoding of small color changes. The alpha channel remains unchanged. QOI_OP_RUN: These are the simplest, encoding a run-length of pixels that are identical to the previous pixel. The run length is stored using 6 bits with a bias of -1, allowing for runs of 1 to 62 pixels. QOI_OP_INDEX: These are stored by referencing a previously seen pixel value from a rolling array of 64 recent pixel values by using a simple hash on each pixel as it is identified. If another pixel matches a previously seen hash value in the array, the index of the referenced pixel is stored. The QOI format also includes a simple 14-byte header that stores the image dimensions, color space, and channel depth information. The end of file is signaled by an 8-byte end marker.","s":"Format Breakdown","u":"/docs/images/QOI","h":"#format-breakdown","p":809},{"i":816,"t":"The creator of QOI benchmarked the format against libpng & stbi_image_write using the C implementation in QOI via qoibench.c on a collection of 2,879 screenshots, icons, photos, & textures (source). The results are as follows: Library Decode (ms) Encode (ms) Decode MP/s Encode MP/s Size (kb) Compression Rate libpng 7.0 83.8 66.56 5.54 398 24.2% stbi 7.0 60.5 66.63 7.67 561 34.2% qoi 2.1 2.9 226.03 161.99 463 28.2% The results show that QOI is significantly faster than libpng and stb_image_write, and it also achieves better compression ratios than libpng on average.","s":"Benchmarks","u":"/docs/images/QOI","h":"#benchmarks","p":809},{"i":818,"t":"Some of the key advantages of QOI include: Super simple: the spec is only one page Extremely fast encoding & decoding speeds Data chunks are byte-aligned, so data can be streamed to a decoder one byte at a time Better compression ratios compared to PNG for many types of images Supports transparency Royalty-free, open-source (CC0), & easy to integrate into any application","s":"Advantages","u":"/docs/images/QOI","h":"#advantages","p":809},{"i":820,"t":"Limited to 8 bits per channel (no support for higher bit depths) Not suitable for images with high-frequency noise or very little spatial redundancy Lacks advanced features like progressive loading, interlacing, or custom metadata Despite its limitations, QOI provides a compelling alternative to PNG for many use cases where simplicity, speed, and good compression ratios are desired. QOI is not especially well supported at present, but adoption is rapidly growing as developers can easily integrate support into their applications due to the format's simplicity.","s":"Limitations","u":"/docs/images/QOI","h":"#limitations","p":809},{"i":822,"t":"Under Maintenance The content in this entry is incomplete & is in the process of being completed. Lossless compression is a method of data compression that allows the original data to be perfectly reconstructed from the compressed data. This is particularly important in applications where perfectly preserving the original fidelity of a medium is critical, such as in archiving, generic data compression, & professional media editing. To understand how lossless compression works, we will first delve into the concepts of redundancy, entropy, and specific compression techniques oft used in lossless compression.","s":"Lossless Compression","u":"/docs/introduction/lossless","h":"","p":821},{"i":824,"t":"The concepts of redundancy & entropy are important to understand as you continue reading. Redundancy refers to the repetitive or predictable elements in data. These elements do not add new information and can be efficiently encoded to reduce the overall data size without losing any information. Entropy, in the context of information theory, is a measure of the unpredictability or randomness of data. Lower entropy implies higher redundancy, implying that the data is theoretically more compressible. In lossless compression, the goal is to reduce redundancy and encode data as efficiently as possible based on its entropy.","s":"Redundancy & Entropy","u":"/docs/introduction/lossless","h":"#redundancy--entropy","p":821},{"i":826,"t":"Run-Length Encoding (RLE): RLE is a simple form of lossless compression where sequences of the same data value (runs) are stored as a single data value and a count. This technique is effective for compressing data with long runs of identical samples, such as silence or constant tones. For example, the sequence AAAAABBBCC could be encoded as 5A3B2C. Huffman Coding: Huffman coding is an entropy encoding algorithm used for lossless data compression that works by separating an input into component symbols and replacing each symbol with a code. The algorithm builds a binary tree, with each leaf node representing a symbol separated from the input data, and the path from the root to the leaf representing the binary code for that symbol. Huffman coding is effective when the probability distribution of the input characters is known and can be exploited. Imagine you are storing the state of a traffic light; it is either green, yellow, red, or off for maintenance. As the operator, you have determined that it is green 50% of the time, red 40% of the time, yellow 9% of the time, and disabled 1% of the time. Because there are four options, you can accurately represent all of the possible symbols in our example using two bits. Green could be 00, red 01, yellow 10, and off 11. While assigning two-bit codes accurately conveys the information, we're storing an average of two bits per symbol; we can reduce the average number of bits per symbol by taking the probabilities into account here. We'll assign green to 0 since it appears the most frequently; this is the first leaf on our binary tree. Next, we have the leaves that stem from the 1 code; red can simply be 11, while yellow can be 100 and the disabled symbol can be represented by 101. This gives us the following Huffman codes: Green (50%): 0 Red (40%): 11 Yellow ( 9%): 100 Disabled ( 1%): 101 Represented by the following tree: Now, if we do the math by multiplying the probability by the length of each code and taking the weighted sum: (50% β’ 1) + (40% β’ 2) + (9% β’ 3) + (1% β’ 3) = 1.6 We end up with an average of 1.6 bits per symbol. Even in our relatively simple example, this shows how Huffman coding can save space quite effectively while still losslessly representing the same information. Arithmetic Coding Arithmetic coding is another entropy encoding technique that represents an entire message as a single number in the interval [0, 1). Unlike Huffman coding, which assigns fixed binary codes to component symbols separated from an input, arithmetic coding represents multiple symbols with a single floating-point number q which must be within the range 0.0 β€ q < 1.0. Arithmetic coding is particularly effective when the probability distribution of the symbols is skewed, as it can produce a more compact representation than Huffman coding. It is usually slower than Huffman coding. Let's return to our traffic light example. Let's say you aren't satisfied with our previous Huffman coding result, which has produced an average of 1.6 bits per symbol, and you'd like to use arithmetic coding instead. In arithmetic coding, each of our symbols should first be placed on a range within the interval [0, 1) based on its probability. Then, we can narrow down this range as we encode more symbols, eventually arriving at a single number that represents the entire sequence. The steps follow below. Let's use the same probabilities from before: Green (50%) Red (40%) Yellow ( 9%) Disabled ( 1%) We'll assign ranges to each symbol as follows: Green: [0.00, 0.50) Red: [0.50, 0.90) Yellow: [0.90, 0.99) Disabled: [0.99, 1.00) Now, let's encode a sequence of traffic light states: \"Green, Red, Yellow, Green\" Start with the interval [0, 1) First symbol (Green): new_low = low + (high - low) β’ cumulative(s) / total new_high = low + (high - low) β’ (cumulative(s) + prob(s)) / total new_low = 0 + (0.5 - 0) β’ 0 / 1.0 new_high = 0 + (0.5 - 0) β’ (0 + 0.5) / 1.0 Narrow range to [0.00, 0.50) Second symbol (Red): From previous range: [0.00, 0.50) new_low = 0 + (0.50 - 0) * 0.50 / 1 new_high = 0 + (0.50 - 0) * (0.50 + 0.40) / 1 Red is in the range [0.25, 0.45) Third symbol (Yellow): From previous range: [0.25, 0.45) new_low = 0.25 + (0.45 - 0.25) * (0.50 + 0.40) / 1 new_high = 0.25 + (0.45 - 0.25) * (0.50 + 0.40 + 0.09) / 1 Yellow is in the range [0.43, 0.448) Fourth symbol (Disabled): From previous range: [0.43, 0.448) new_low = 0.43 + (0.448 - 0.43) * (0.50 + 0.40 + 0.09) / 1 new_low = 0.43 + (0.448 - 0.43) * (0.50 + 0.40 + 0.09 + 0.01) / 1 Green is in the range [0.44782, 0.448) The final interval is [0.44782, 0.448). Any number in this range (we'll pick the lower bound, which is inclusive of 0.44782) can represent our entire sequence \"Green, Red, Yellow, Disabled\". To decode, we would start with 0.44782 and use our original probability ranges to determine which symbol it corresponds to, then update the value and repeat the process. In this example, we aren't saving any space by using arithmetic coding because our sample is too short to have any pattern to effectively exploit. With longer sequences, arithmetic coding approaches the theoretical entropy limit of 1.408 bits per symbol: -(0.50 * log2(0.50) + 0.40 * log2(0.40) + 0.09 * log2(0.09) + 0.01 * log2(0.01)) β 1.408 It is important to note that in practice, there often are additional considerations not present in this simplified example with regard to managing precision. Prediction and Residual Encoding: Prediction involves using previous data to predict future data. The difference between the predicted and actual data (residual) is encoded instead of the actual data. Linear Predictive Coding (LPC) is a common method where a linear function of previous samples is used to predict the current sample. The residuals typically have lower entropy and can be encoded more efficiently.","s":"Techniques in Lossless Compression","u":"/docs/introduction/lossless","h":"#techniques-in-lossless-compression","p":821},{"i":828,"t":"Help Wanted This section is in need of contributions. If you believe you can help, please see our Contribution Guide to get started as a contributor! Portable Network Graphics (PNG) is a free lossless image file format released in 1996. It was ceated as an alternative to GIF, which was at the time a proprietary format. It gained animation support similar to GIF with the release of APNG in 2008, which is now supported by all popular web browsers.","s":"PNG","u":"/docs/images/PNG","h":"","p":827},{"i":830,"t":"Lossless? Yes Lossy? No Supported Bit Depths: 8 BPC, 16 BPC HDR/Wide Gamut? Yes Animation? Yes Transparency? Yes Progressive Decode? Kinda Royalty Free? Yes","s":"Performance Checklist","u":"/docs/images/PNG","h":"#performance-checklist","p":827},{"i":832,"t":"HDR (High Dynamic Range) is a technology used in modern TVs and displays to produce more vibrant and lifelike images. In simple terms, it allows your TV to display a wider range of colors and brightness levels than standard displays. This means that you can see more details in both bright and dark areas of an image, which can make movies, TV shows, and video games look much more realistic. HDR10 works by using metadata that tells your TV how to display the content in the best way possible. This metadata includes information about the maximum brightness level and color gamut of the content, which allows your TV to adjust its settings to match the content being displayed. In other words, HDR10 helps your TV display images that are closer to what the content creators intended you to see, resulting in a more immersive viewing experience.","s":"High Dynamic Range","u":"/docs/introduction/high-dynamic-range","h":"","p":831},{"i":834,"t":"HLG (Hybrid log-gamma) is a type of HDR video format that was developed to optimize video for both standard dynamic range (SDR) and HDR displays, jointly developed by the BBC and NHK. To understand how HLG works, it's helpful to know that the way we perceive brightness and color in a video is different from how it's captured and displayed on a screen. Brightness and color information is usually captured in a logarithmic curve, while SDR displays typically reproduce the image with a gamma curve. HDR displays, on the other hand, reproduce the image with a different type of curve, known as the Perceptual Quantizer (PQ) curve. The HLG curve is a hybrid of these two curves, which means that it's optimized for both SDR and HDR displays. It's designed to work with a wider range of brightness levels than SDR displays, but also be backward compatible with SDR displays. In simpler terms, the HLG curve is a way of capturing and displaying video that works well on both SDR and HDR displays. It's like a bridge between the way video is captured and the way it's displayed, and it's designed to optimize the video for a wider range of brightness levels than traditional SDR video. The result is video content that looks more realistic and vivid on both SDR and HDR displays.","s":"HLG","u":"/docs/introduction/high-dynamic-range","h":"#hlg","p":831},{"i":836,"t":"HDR10 is an open high-dynamic-range video (HDR) standard announced on 27 August 2015 by the Consumer Technology Association. It is the most widespread of the HDR formats. It only allows static metadata.","s":"HDR10","u":"/docs/introduction/high-dynamic-range","h":"#hdr10","p":831},{"i":838,"t":"HDR10+ is basically an upgrade to the previous HDR10 by adding dynamic metadata support (in .json) to optimize each scene's content light level as the director intended.","s":"HDR10+","u":"/docs/introduction/high-dynamic-range","h":"#hdr10-1","p":831},{"i":840,"t":"Dolby Vision is proprietary HDR format developed by Dolby Laboratories and a direct competitor to HDR10+.","s":"Dolby Vision","u":"/docs/introduction/high-dynamic-range","h":"#dolby-vision","p":831},{"i":842,"t":"Under Maintenance The content in this entry is incomplete & is in the process of being completed. Lossy multimedia compression reduces the file size of multimedia data by permanently removing some of the information. This process leverages the limitations of the human senses, fidelity metrics, or appeal metrics to discard information that is considered less salient. The goal of lossy compression is to reduce the file size of multimedia data while maintaining a desired level of quality.","s":"Lossy Compression","u":"/docs/introduction/lossy","h":"","p":841},{"i":844,"t":"Lossy compression works by analyzing the input signal and removing parts of it that are less salient. Some processes by which this is done include: Perceptual Coding (audio): This technique removes audio frequencies that are outside the range of human hearing or masked by other sounds. Quantization (audio): This process reduces the precision of certain audio components, which can significantly reduce file size without a noticeable impact on perceived quality. More coming soon","s":"How Lossy Compression Works","u":"/docs/introduction/lossy","h":"#how-lossy-compression-works","p":841},{"i":846,"t":"Multimedia compression as a whole has revolutionized our ability to communicate on the Web & beyond. It has enabled rich experiences across many breakthrough platforms that wouldn't have been feasible otherwise, and it has allowed us to communicate information, expression, and human connection in novel ways. It is the unsung hero of the modern Web. Despite this, it is often difficult to uncover information about codec technology that is accurate, informed, and battle-tested by passionate individuals who care about the proliferation of knowledge. This wiki aims to demystify the realm of multimedia compression while connecting codec enthusiasts to create a sink of knowledge for the benefit of everyone.","s":"Prologue","u":"/docs/introduction/prologue","h":"","p":845},{"i":848,"t":"The Codec Wiki is not a highly accurate source for understanding the mathematics, research, adoption/patent politics, or specifications of specific coder/decoder implementations. Sources like Wikipedia cover these details with great accuracy & reliability. What we are focused on is making higher level information - especially related to the usage & application of compression tools - highly accessible; we are focused on application, not theory, for the time being. Knowing how a codec works in theory is different than knowing when and how to best use a codec and its accompanying tools.","s":"What This Isn't","u":"/docs/introduction/prologue","h":"#what-this-isnt","p":845},{"i":850,"t":"A codec, shortened from coder/decoder, is a system that handles digital media or data according to a specification. Usually, this means it compresses and decompresses digital media. Codecs are used to encode media for storage and transmission - among other things - and then decode that media for playback, editing, etc. Multimedia codecs compress by either discarding less salient data using lossy compression to reduce filesize, or they use clever lossless compression tricks to maintain a mathematically identical stream to the input media while still reducing filesize. Lossless compression can be reversed to be the exact same as the input data, while lossy compression does not share this quality as it discards data for smaller filesizes. Some common uses of codecs include: Video compression: Video codecs like H.264, VP9 & AV1 allow digital video files to be compressed to much smaller sizes for streaming & storage, among other things. A video codec can encode a video stream while it is being recorded or before it is distributed, and decode it when it is played back. This allows videos to be shared more quickly and use less storage & bandwidth. Audio compression: Audio codecs like MP3, AAC, and Opus compress audio files like songs & podcasts. This allows them to be easily distributed & stored. Image Compression: Image codecs, whether tried and true like JPEG or brand new like JPEG-XL, have fundamentally the same goal: compress images well while maintaining a versatile featureset for the myriad of ways one may decide they'd like to compress an image. Color depth, HDR, transparency, color space information, EXIF data, and many other factors are at play when working with images that make compressing them easier said than done. Data Compression: General compression algorithms like ZIP & zstd are designed to compress any kind of data, not just multimedia specific data. This includes web assets, executables, text archives, and even entire filesystems. In summary, codecs use complex algorithms to encode and decode media for efficient storage and transmission. They are essential for recording, compressing, delivering and playing back digital media. Different codecs balance factors like compression efficiency, quality, computational requirements, compatibility, & features depending on their application.","s":"What is a Codec","u":"/docs/introduction/prologue","h":"#what-is-a-codec","p":845},{"i":852,"t":"A rather informal list of requirements follows. You will benefit greatly from: Patience. A willingness to learn, engage in curiousity, & follow instructions Basic to intermediate computer proficiency. If you're only here to learn the tools, it will be very beneficial to have: Higher-end CPU hardware, which will decrease wait times for some larger encoding workloads discussed here. A level of comfort with CLI utilities, or enough motivation to engage with them in the absence of background knowledge. A device running an Arch-based Linux distribution, excluding Manjaro Why Arch? Most encoding tools are readily available in the package manager, & it is a bleeding edge Linux distribution which ensures your utilities are always kept up to date. For filtering, all Vapoursynth plugins are already available in the Arch User Repository (AUR) which makes it extremely easy to install and version control with an AUR helper like yay","s":"What You Need","u":"/docs/introduction/prologue","h":"#what-you-need","p":845},{"i":854,"t":"Under Maintenance The content in this entry is incomplete & is in the process of being completed. Pending Review The content in this entry may not be entirely accurate, & is pending further review to assess the quality of the information. \"Traditionally, the encoder tends to favor blurred reconstructed blocks over blocks which have wrong motion. The human eye generally prefers the wrong motion over the blur. Psycho-visual options combat this. While technically less βcorrectβ, which is why they are disabled for research purposes, they should be enabled for content intended for βhuman eyesβ. \" -- Kokomins' x265 guide","s":"Psychovisual","u":"/docs/introduction/psychovisual","h":"","p":853},{"i":856,"t":"Traditionally, encoders prioritize blurring out fine details to maintain performance on simple metrics like PSNR, but the human visual system is much more complex. This discrepancy can lead to encoders rejecting sharp blocks that don't fit perfectly with adjacent blocks, resulting in a loss of detail. Psychovisual options address this issue by providing the encoder with the necessary \"confidence\" to incorporate these sharper blocks, even if they don't strictly adhere to metric performance. Blurring can be beneficial at lower bitrates to help reduce the visibility of blocking artifacts, and it is generally preferable to view a slightly blurry image rather than a blocky one with distracting artifacts. However, it is essential to strike a balance when setting psychovisual options, as over-reliance on these settings can also have negative consequences. Experimentation is key to finding the optimal sweet spot for specific types of content.","s":"Breakdown","u":"/docs/introduction/psychovisual","h":"#breakdown","p":853},{"i":858,"t":"JPEG XL (JXL) is a compression format for images that was developed by the Joint Photographic Experts Group (JPEG) in 2020. It is designed to provide improved compression efficiency compared to the traditional JPEG format, while still maintaining image quality. JPEG XL uses a combination of techniques such as perceptual color encoding, advanced entropy coding, and a new image prediction method to achieve its improved compression performance. It also has a lossless JPEG recompression mode, where an existing JPEG file can be turned into a JXL that can be decoded for a bit-for-bit exact replica of the original JPEG.","s":"JPEG XL","u":"/docs/images/JXL","h":"","p":857},{"i":860,"t":"Lossless? Yes Lossy? Yes Supported Bit Depths: Up to 32 BPC HDR/Wide Gamut? Yes Animation? Yes Transparency? Yes Progressive Decode? Yes Royalty Free? Yes","s":"Performance Checklist","u":"/docs/images/JXL","h":"#performance-checklist","p":857},{"i":862,"t":"JPEG XL has a number of standout features that make it an appealing image codec to work with for many use cases. From the JPEG XL Info page, JXL has the following features: Best lossless image compression: It offers about 35% smaller file sizes than PNG (50% smaller for HDR). High-fidelity lossy image compression: JPEG XL provides about 60% smaller file sizes than JPEG for the same visual quality. Progressive decoding: This allows an image to be displayed in lower quality before the entire file has been downloaded, improving user experience on slow connections. Lossless JPEG transcoding: JPEG images can be converted to JPEG XL without any mathematical loss, and the resulting file is about 20% smaller. Designed for both photographic and synthetic images: JPEG XL works well with a wide range of image types, including photos, graphics, and illustrations. Fast software encoding and decoding: The codec is designed to be efficient and fast, enabling quick image loading and saving. Full support for wide gamut and HDR: JPEG XL supports a wide range of colors and high dynamic range, making it suitable for modern displays. Perceptually optimizing reference encoder: The encoder is designed to optimize image quality based on how humans perceive images.","s":"Format Breakdown","u":"/docs/images/JXL","h":"#format-breakdown","p":857},{"i":864,"t":"JPEG XL offers excellent lossless compression capabilities. While lossless WebP was an improvement over PNG for 8-bit lossless image encoding, JPEG XL manages not only to outdo lossless WebP in encoding efficiency but also be more versatile for bit depths greater than 8-bit (a category PNG previously dominated). 16-bit lossless imagery, especially HDR images that are becoming more popular & rarely utilize 8-bit color depth, are where JPEG XL shines, and it is the only codec to compete with PNG in that regard while providing better coding efficiency. Example: JPEG XL compresses this 16-bit AdobeRGB PNG better than PNG. Using: cjxl 16bit.png 16bit.jxl -d 0.0 -e 9 -I 100 -g 3 -E 11 16-bit PNG: 1533373 bytes. 16-bit JXL: 1211029 bytes.","s":"Lossless Compression","u":"/docs/images/JXL","h":"#lossless-compression","p":857},{"i":866,"t":"JPEG XL is also adept at lossy compression, especially at quality levels that we as humans care about. It promises to be around 60% better than JPEG. While video-based codecs like AVIF can be competitive when given lots of CPU time, JPEG XL is both fast and efficient for medium to high fidelity photographic imaging.","s":"Lossy Compression","u":"/docs/images/JXL","h":"#lossy-compression","p":857},{"i":868,"t":"JPEG XL supports up to 32 bits per channel of bit depth, making it future proof for the increasingly popular HDR photos coming out of smartphones. There is essentially zero downside to encoding high bit depth with JXL relative to the resulting encode's size. Considering many smartphones take HDR photos now, JXL offers a compelling pipeline for these photos to make their way to the Web in the future especially as companies like Adobe & Apple have already embraced the new codec.","s":"Supported Bit Depth(s)","u":"/docs/images/JXL","h":"#supported-bit-depths","p":857},{"i":870,"t":"JPEG XL provides actual progressive decode support that you can experiment with here on a supported browser like Safari, Waterfox, Thorium, Mercury, or any browser on iOS. Progressive decode is a feature only JPEG is able to offer a real implementation of, rendering low frequency transform coefficients before the rest of the image arrives to allow an image to display before the entire thing has been sent over the network. Blurhashes do not replace this technology, but rather compliment it, allowing another layer of progressive decode that can be used even before the image begins to load progressively. This is an important feature to improve the user experience on websites featuring large images, or on any website if your Internet connection isn't strong.","s":"Progressive Decode","u":"/docs/images/JXL","h":"#progressive-decode","p":857},{"i":872,"t":"An incredibly unique JPEG XL feature is lossless JPEG re-compression, or the ability to take a JPEG input and provide an output with a smaller filesize (on average, 20% smaller) that is pixel-for-pixel identical. This is why companies like Meta have endorsed JPEG XL, as it offers a path forward for the existing JPEGs on the Internet.","s":"Lossless JPEG Re-compression","u":"/docs/images/JXL","h":"#lossless-jpeg-re-compression","p":857},{"i":874,"t":"From the JPEG XL Wikipedia page: Besides Cloudinary and Google originally, throughout JPEG XL's preliminary implementation in web browsers, various representatives of well-known industry brand names have publicly voiced support for JPEG XL as their preferred choice, including Facebook, Adobe, Intel and the Video Electronics Standards Association, The Guardian, Flickr and SmugMug, Shopify, the Krita Foundation, and Serif Ltd. Apple also features ecosystem-wide JPEG XL support as of iOS 17 & macOS Sonoma.","s":"Industry Support","u":"/docs/images/JXL","h":"#industry-support","p":857},{"i":876,"t":"JPEG XL has the potential to replace popular formats like TIFF for authoring workflows due to its broad feature set. From the JXL Wikipedia, some additional features include: Image dimensions of over a billion (2^30-1) pixels on each side. Up to 4099 channels, including support for alpha transparency There can be multiple frames with zero duration, allowing support for layers in graphics software Animation support, allowing JXL to rival GIF Images can be stored in tiles to reduce the time needed to decode them. Graceful quality degradation across a large range of bitrates means quality loss isn't as abrupt as with older formats. Perceptually optimized reference encoder which uses a perceptual color space, adaptive quantization, and conservative default settings. Support for wide color gamut and HDR Efficient encoding and decoding without requiring specialized hardware: JPEG XL is about as fast to encode and decode as old JPEG using libjpeg-turbo and an order of magnitude faster to encode and decode compared to HEIC with x265. It is also parallelizable. Royalty-free format with an open-source reference implementation available on GitHub.","s":"Other Features","u":"/docs/images/JXL","h":"#other-features","p":857},{"i":878,"t":"JPEG XL has a couple of noteworthy encoders currently available to work with. Because JPEG XL is so new, most encoders aren't yet intelligent enough to take advantage of the whole format yet. Here's a quote from Jon Sneyers in the JPEG XL discord that sums it up nicely: Encode side: 80% or so of the coding tools are used in one way or another by the encoder (the 20% is splines and super large VarDCT blocks, and also the things that are not used by default without using special experimental options, such as delta palette and noise). But the coding tools that are used, are typically used in a specific, limited way that doesn't come anywhere close to exhausting the bitstream expressivity. Sneyers is talking about libjxl's cjxl encoder, which will be discussed further below.","s":"Encoders","u":"/docs/images/JXL","h":"#encoders","p":857},{"i":880,"t":"The reference libjxl implementation has the capability to both decode and encode JPEG XL image files. Both are discussed below. Encoding Decoding Building libjxl's encoder cjxl has more options to play around with. It takes a few primary arguments, distance (-d), quality (-q), and effort (-e). Distance and quality Distance and quality are two ways of specifying how much loss you are willing to tolerate, and as such, they are mutually exclusive, as they pull the same levers under the hood. Distance is designed to map to how 'close' one must be to the source to notice any loss. It is represented as a scale between 0.0 & 25.0. 0.0 is mathematically lossless, every pixel will have the exact same value as the source. 1.0 is designed to be visually lossless, look the same at a normal viewing distance, and higher values have more loss. Quality is designed to roughly map to JPEG's quality argument. A range 0-100, where 100 is mathematically lossless, 90 is intended to be visually lossless, and 0 is almost unrecognizable as the original image. Effort Effort is similar to cpu-used in video encoding. It specifies the amount of effort the encoder will make in order to get the smallest file size it can. It takes the form of a range 1-9, where higher numbers will spend more resources to get diminishing returns in terms of smaller size, while lower values do the opposite, leaving file size on the table for faster encoding. Encoding with effort 9 and distance 1.0 cjxl -e 9 -d 1.0 example.png example.jxl This, by default uses lossless JPEG compression. cjxl example.jpg example.jxl Decoding a .jxl image is straightforward with libjxl's decoder, djxl: djxl example.jxl example.png djxl can decode to pixels via pipes, png, apng for animated jxl, jpg, ppm, and pfm. By default, if the .jxl file was encoded with lossless jpeg recompression, djxl will rebuild the exact jpeg file that was originally compressed. To avoid this, and create a new jpeg file: djxl -j example.jxl example.jpg Keep in mind this is now a lossy process as djxl will decode to pixels, then encode a new .jpg with those pixels. A full build guide is provided in the libjxl build instructions in the GitHub repo. This guide is simplified, and is only focused on building a working efficient encoder & decoder. These instructions should work for macOS and Linux, although macOS support isn't guaranteed. 1. Clone the repo git clone https://github.com/libjxl/libjxl.git --recursive --shallow-submodules 2. Install dependencies. May have to run these commands with root apt install cmake pkg-config libbrotli-dev clang # Debian Linux pacman -Syu cmake pkgconf brotli clang # Arch Linux brew install cmake pkg-config brotli # macOS 3. Set CC & CXX variables before building (Recommended) export CC=clang CXX=clang++ 4. cjxl & djxl will be available in the build/tools directory. cd libjxl && mkdir build && cd build cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=\"-O3 -march=native\" -DCMAKE_C_FLAGS=\"-O3 -march=native\" -DBUILD_TESTING=OFF -DJPEGXL_WARNINGS_AS_ERRORS=OFF -DJPEGXL_ENABLE_SJPEG=OFF .. cmake --build . -- -j$(nproc) This will build cjxl and djxl with O3 optimization for your CPU architecture on Linux or macOS. Again, be aware that macOS support is not a priority. Via the libjxl OS X build guide: OSX builds have \"best effort\" support, i.e. build might not work at all, some tests may fail and some sub-projects are excluded from build.","s":"libjxl","u":"/docs/images/JXL","h":"#libjxl","p":857},{"i":882,"t":"libjxl-tiny contains a simpler encoder implementation of JPEG XL, aimed at photographic images without an alpha channel. The goal is to guide hardware implementations of the encoder where support for the full set of encoding tools is not feasible. The color management is outside the scope of this library, the encoder input is given as a portable float map (PFM) in the linear sRGB colorspace, where individual sample values can be outside the [0.0, 1.0] range for out-of-gammut colors. For more details, see the overview of the coding tools. The last commit was ten months ago, so it is uncertain whether libjxl-tiny could be considered active.","s":"libjxl-tiny","u":"/docs/images/JXL","h":"#libjxl-tiny","p":857},{"i":884,"t":"Hydrium is a fast, ultra-low-memory, streaming JPEG XL encoder written in portable C. It is maintained by Traneptora.","s":"Hydrium","u":"/docs/images/JXL","h":"#hydrium","p":857},{"i":886,"t":"zune-jpegxl is a simple, fast and fully safe modular JXL encoder written in Rust. It is maintained by etemesi254. zune-jpegxl has the following features: Lossless encoding 8 bit and 16 bit support Grayscale and RGBA encoding Threading capabilities","s":"zune-jpegxl","u":"/docs/images/JXL","h":"#zune-jpegxl","p":857},{"i":889,"t":"jxl-oxide is a spec-conforming JPEG XL decoder written in pure Rust. It is maintained by Wonwoo Choi. Sources: JXL Wikipedia JPEGXL.info: Why JXL Apple JXL Announcement JPEG XL: How It Started, How It's Going The Case for JPEG XL Time for Next-Gen Codecs to Dethrone JPEG Image Codec Comparison","s":"jxl-oxide","u":"/docs/images/JXL","h":"#jxl-oxide","p":857},{"i":891,"t":"When learning about encoding technology, it is important to understand the vast terminology that is often used to describe concepts that are often not very complex to understand.","s":"Terminology","u":"/docs/introduction/terminology","h":"","p":890},{"i":893,"t":"A bitstream or bit stream is a media file, the kind that is played in a media player. It consists of a container wrapping multiple elementary streams","s":"Bitstream","u":"/docs/introduction/terminology","h":"#bitstream","p":890},{"i":895,"t":"Lossy encoding throws out some of the detail to achieve a smaller size. Often, this is an acceptable trade-off, but if you need a perfect recreation of the data, you need lossless encoding.","s":"Lossy / Lossless","u":"/docs/introduction/terminology","h":"#lossy--lossless","p":890},{"i":897,"t":"An elementary stream is an audio, video, or subtitle track. Basically, it's the compressed data you want to mux into the container.","s":"Elementary stream","u":"/docs/introduction/terminology","h":"#elementary-stream","p":890},{"i":899,"t":"Putting elementary streams into a container, which preserves them without making any changes to the data.","s":"Muxing","u":"/docs/introduction/terminology","h":"#muxing","p":890},{"i":901,"t":"A codec (coder/decoder) is the piece of code that actually encodes the data you put in. It takes as input and produces as output an elementary stream. More information is provided in the prologue under \"What is a Codec\".","s":"Codec","u":"/docs/introduction/terminology","h":"#codec","p":890},{"i":903,"t":"A filter is a piece of code you can apply to the data to make something about it different, for instance sharpening, removing artifacts, shakiness, denoising, scaling, overlay, etc.","s":"Filter","u":"/docs/introduction/terminology","h":"#filter","p":890},{"i":905,"t":"The pieces of code that mux or do the reverse, getting elementary streams from the container.","s":"Muxer/Demuxer","u":"/docs/introduction/terminology","h":"#muxerdemuxer","p":890},{"i":907,"t":"A bitstream filter is a filter that is directly applied to the bitstream in order to change something about the container, for instance, convert frame types, or corrupt some packets.","s":"Bitstream filter","u":"/docs/introduction/terminology","h":"#bitstream-filter","p":890},{"i":909,"t":"A container is a format for putting one or more elementary streams into one file, which is then called a bitstream. A video container is a digital file format that holds video and audio data, as well as additional information such as subtitles, metadata, and chapter markers. It acts as a \"wrapper\" that packages all these elements into a single file that can be played on various devices and software platforms. Think of it like a container you might use to transport goods - the video and audio data are like the items being transported, while the container itself provides a structure and organization for the contents. Some kinds of containers: MP4 / M4Vβ This is likely the most common container you've encountered, & has near universal compatibility. Has a limited maximum amount of streams. The supported video codecs are H.264, H.265, H.266, DivX, Xvid, VP9 (Unofficial, hacky), and AV1 (Unofficial, hacky). For audio codecs it's many of the various flavors of AAC, MP3, FLAC (Unofficial), Opus (Unofficial, hacky). For subtitles only MPEG-4 Timed Text (TTXT) is supported. The best tool to work with this container is MP4Box, but FFmpeg also works. MOVβ Similar to MP4, but less supported. Made with Apple Quicktime in mind, supports ProRes. MKV / MKA / MKS / MK3Dβ Also known as Matroska, allows an unlimited amount of video/audio/subtitle streams and any codec that probably still exists in Area 51, you can put literally anything in there and it won't even care, MPEG-2/DivX/H.266/Theora/Thor/RealVideo/MJPEG/AVS3/AMR-WB, you name it. All around best container for working with if you have the choice. WebMβ A container made with web streaming in mind. WebM is a stripped-down subset of MKV that only allows free & open source codecs such as VP8, VP9 or AV1 for video alongside Vorbis or Opus for audio. It is a common misconception that WebVTT tracks always work natively in browsers when within a WebM container; in practice, WebMs containing WebVTT subtitles will usually not play back the subtitles in browsers. WebVTT subtitles can be utilized with the