Add probe slow, add devcontainer, update readme (#289)

* Dockerfile: Update copy with chown, seperate copy requirements from all
* Add probe-slow support
* README: Update readme with new rust cli options
* Dockerfile: Hide av1an venv
* Add devcontainer
* Action: Add baseline-select
This commit is contained in:
Luigi311 2021-07-09 10:32:47 -06:00 committed by GitHub
parent 8e8f3a536e
commit 308e97047e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 177 additions and 86 deletions

View file

@ -0,0 +1,32 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
// https://github.com/microsoft/vscode-dev-containers/tree/v0.183.0/containers/docker-existing-dockerfile
{
"name": "Devcontainer Av1an",
// Sets the run context to one level up instead of the .devcontainer folder.
"context": "..",
// Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
"dockerFile": "../Dockerfile",
// Set *default* container specific settings.json values on container create.
"settings": {},
// Add the IDs of extensions you want installed when the container is created.
"extensions": []
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Uncomment the next line to run commands after the container is created - for example installing curl.
// "postCreateCommand": "apt-get update && apt-get install -y curl",
// Uncomment when using a ptrace-based debugger like C++, Go, and Rust
// "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ],
// Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker.
// "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ],
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
// "remoteUser": "vscode"
}

View file

@ -24,12 +24,16 @@ jobs:
fail-fast: false
matrix:
enc: [aom, rav1e, svt_av1, vpx, x265, x264]
name: [baseline, target-quality]
name: [baseline, baseline-select, target-quality, probe-slow]
include:
- name: baseline
flags: ""
- name: baseline-select
flags: "--chunk-method select"
- name: target-quality
flags: --target-quality 95
- name: probe-slow
flags: --target-quality 95 --probe-slow
- name: chunk_hybrid
enc: aom
flags: --chunk-method hybrid

View file

@ -21,29 +21,30 @@ RUN cmake .. -DCMAKE_BUILD_TYPE=Release && \
# Create user
RUN useradd -ms /bin/bash app_user
# Copy av1an
COPY . /Av1an
WORKDIR /Av1an
# Change permissions
RUN chmod 777 -R /Av1an
# Change user
USER app_user
# Install rust
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y --default-toolchain nightly
# Copy av1an requirements
COPY --chown=app_user requirements.txt /Av1an/requirements.txt
WORKDIR /Av1an
# Create virtualenv required for maturin develop
ENV VIRTUAL_ENV=/Av1an/venv
ENV VIRTUAL_ENV=/Av1an/.venv
RUN python3 -m venv "${VIRTUAL_ENV}"
ENV PATH="$VIRTUAL_ENV/bin:/home/app_user/.cargo/bin:$PATH"
# Install av1an requirements and build rust requirements
# Install av1an requirements
RUN pip3 install wheel && pip3 install -r requirements.txt vapoursynth
# Copy av1an and build av1an
COPY --chown=app_user . /Av1an
RUN maturin develop --release -m av1an-pyo3/Cargo.toml
# Open up /Av1an to all users
RUN chmod 777 /Av1an
VOLUME ["/videos"]
WORKDIR /videos

View file

@ -25,9 +25,9 @@ Example with default parameters:
With your own parameters:
av1an -i input -enc aom -v "--cpu-used=3 --end-usage=q --cq-level=30 --threads=8" -w 10
--split_method aom_keyframes --target_quality 95 --vmaf_path "vmaf_v0.6.1.pkl"
-min_q 20 -max_q 60 -ff "-vf scale=-1:1080" -a "-c:a libopus -ac 2 -b:a 192k"
av1an -i input -e aom -v " --cpu-used=3 --end-usage=q --cq-level=30 --threads=8 " -w 10
--split-method aom_keyframes --target-quality 95 --vmaf-path "vmaf_v0.6.1.pkl"
-min-q 20 -max-q 60 -f "-vf scale=-1:1080" -a "-c:a libopus -ac 2 -b:a 192k"
-s scenes.csv -log my_log -o output
@ -37,15 +37,15 @@ With your own parameters:
-i --input Input file(s), or Vapoursynth (.py,.vpy) script
(relative or absolute path)
-o --output_file Name/Path for output file (Default: (input file name)_(encoder).mkv)
-o --output-file Name/Path for output file (Default: (input file name)_(encoder).mkv)
Output file ending is always `.mkv`
-enc --encoder Encoder to use
-e --encoder Encoder to use
(`aom`,`rav1e`,`svt_av1`,`vpx`,`x265`, `x264`)
Default: aom
Example: -enc rav1e
-v --video_params Encoder settings flags (If not set, will be used default parameters.)
-v --video-params Encoder settings flags (If not set, will be used default parameters.)
Must be inside ' ' or " "
-p --passes Set number of passes for encoding
@ -63,35 +63,35 @@ With your own parameters:
-q --quiet Do not print a progress bar to the terminal.
-l --logging Path to .log file(By default created in temp folder)
-l --logging Path to .log file(By default created in temp folder)
--temp Set path for the temporary folder. Default: .temp
--mkvmerge Use mkvmerge for concatenating instead of FFmpeg.
Use when concatenation fails.
-c --concat Concatenation method to use for splits Default: ffmpeg
[possible values: ffmpeg, mkvmerge, ivf]
--webm Outputs webm file.
Use only if you're sure the source video and audio are compatible.
<h3 align="center">FFmpeg options</h3>
-a --audio_params FFmpeg audio settings (Default: copy audio from source to output)
-a --audio-params FFmpeg audio settings (Default: copy audio from source to output)
Example: -a '-c:a libopus -b:a 64k'
-ff --ffmpeg FFmpeg options video options.
-f --ffmpeg FFmpeg options video options.
Applied to each encoding segment individually.
(Warning: Cropping doesn't work with Target VMAF mode
without specifying it in --vmaf_filter)
without specifying it in --vmaf-filter)
Example:
--ff " -vf scale=320:240 "
-fmt --pix_format Setting custom pixel/bit format for piping
--pix-format Setting custom pixel/bit format for piping
(Default: 'yuv420p10le')
Options should be adjusted accordingly, based on the encoder.
<h3 align="center">Segmenting</h3>
--split_method Method used for generating splits.(Default: PySceneDetect)
--split-method Method used for generating splits.(Default: PySceneDetect)
Options: `pyscene`, `aom_keyframes`, `none`
`pyscene` - PyScenedetect, content based scenedetection
with threshold.
@ -103,14 +103,14 @@ With your own parameters:
fewer dependencies.
`none` - skips scenedetection. Useful for splitting by time
-cm --chunk_method Determine the method in which chunks are made for encoding.
-m --chunk-method Determine the method in which chunks are made for encoding.
By default the best method is selected automatically in this order:
vs_ffms2 > vs_lsmash > hybrid.
vs_ffms2 or vs_lsmash are recommended.
['hybrid'(default), 'select', 'vs_ffms2', 'vs_lsmash']
-tr --threshold PySceneDetect threshold for scene detection Default: 35
-t --threshold PySceneDetect threshold for scene detection Default: 35
-s --scenes Path to file with scenes timestamps.
If the file doesn't exist, a new file will be generated
@ -118,17 +118,17 @@ With your own parameters:
First run to generate stamps, all next reuse it.
Example: "-s scenes.csv"
-xs --extra_split Adding extra splits if frame distance between splits bigger than the
-x --extra-split Adding extra splits if frame distance between splits bigger than the
given value. Pair with none for time based splitting or with any
other splitting method to break up massive scenes.
Example: 1000 frames video with a single scene,
-xs 200 will add splits at 200,400,600,800.
--min_scene_len Specifies the minimum number of frames in each split.
--min-scene-len Specifies the minimum number of frames in each split.
<h3 align="center">Target Quality</h3>
--target_quality Quality value to target.
--target-quality Quality value to target.
VMAF used as substructure for algorithms.
Supported in all encoders supported by Av1an.
Best works in range 85-97.
@ -137,27 +137,23 @@ With your own parameters:
and some quantizer option provided. (This value will be replaced)
`--crf`,`--cq-level`,`--quantizer` etc
--target_quality_method Type of algorithm for use.
--target-quality-method Type of algorithm for use.
Options: per_shot
--min_q, --max_q Min,Max Q values limits
--min-q, --max-q Min,Max Q values limits
If not set by the user, the default for encoder range will be used.
--vmaf Calculate VMAF after encoding is done and make a plot.
--vmaf_plots Make plots for target quality search decisions
(Exception: early skips)
Saved in the temp folder by default.
--vmaf_path Custom path to libvmaf models.
example: --vmaf_path "vmaf_v0.6.1.pkl"
--vmaf-path Custom path to libvmaf models.
example: --vmaf-path "vmaf_v0.6.1.pkl"
Recommended to place both files in encoding folder
(`vmaf_v0.6.1.pkl` and `vmaf_v0.6.1.pkl.model`)
(Required if VMAF calculation doesn't work by default)
--vmaf_res Resolution scaling for VMAF calculation,
--vmaf-res Resolution scaling for VMAF calculation,
vmaf_v0.6.1.pkl is 1920x1080 (by default),
vmaf_4k_v0.6.1.pkl is 3840x2160 (don't forget about vmaf_path)
vmaf_4k_v0.6.1.pkl is 3840x2160 (don't forget about vmaf-path)
--probes Number of probes for interpolation.
1 and 2 probes have special cases to try to work with few data points.
@ -166,14 +162,14 @@ With your own parameters:
--probe-slow Use video encoding parameters for vmaf probes to get a more
accurate Q at the cost of speed.
--vmaf_filter Filter used for VMAF calculation. The passed format is filter_complex.
--vmaf-filter Filter used for VMAF calculation. The passed format is filter_complex.
So if crop filter used ` -ff " -vf crop=200:1000:0:0 "`
`--vmaf_filter` must be : ` --vmaf_filter "crop=200:1000:0:0"`
`--vmaf-filter` must be : ` --vmaf-filter "crop=200:1000:0:0"`
--probing_rate Setting rate for VMAF probes (Every N frame used in probe, Default: 4)
--probing-rate Setting rate for VMAF probes (Every N frame used in probe, Default: 4)
--n_threads Limit number of threads that are used for VMAF calculation
Example: --n_threads 12
--vmaf-threads Limit number of threads that are used for VMAF calculation
Example: --vmaf-threads 12
(Required if VMAF calculation gives error on high core counts)
<h2 align="center">Main Features</h2>
@ -237,11 +233,11 @@ With your own parameters:
Av1an can be run in a Docker container with the following command if you are in the current directory
Linux
```bash
docker run -v "$(pwd)":/videos --user $(id -u):$(id -g) -it --rm masterofzen/av1an:latest -i S01E01.mkv {options}
docker run --privileged -v "$(pwd):/videos" --user $(id -u):$(id -g) -it --rm masterofzen/av1an:latest -i S01E01.mkv {options}
```
Windows
```powershell
docker run -v ${PWD}:/videos -it --rm masterofzen/av1an:latest -i S01E01.mkv {options}
docker run --privileged -v "${PWD}:/videos" -it --rm masterofzen/av1an:latest -i S01E01.mkv {options}
```
Docker can also be built by using
@ -252,7 +248,7 @@ docker build -t "av1an" .
To specify a different directory to use you would replace $(pwd) with the directory
```bash
docker run -v /c/Users/masterofzen/Videos:/videos --user $(id -u):$(id -g) -it --rm masterofzen/av1an:latest -i S01E01.mkv {options}
docker run --privileged -v "/c/Users/masterofzen/Videos":/videos --user $(id -u):$(id -g) -it --rm masterofzen/av1an:latest -i S01E01.mkv {options}
```
The --user flag is required on linux to avoid permission issues with the docker container not being able to write to the location, if you get permission issues ensure your user has access to the folder that you are using to encode.

View file

@ -131,6 +131,10 @@ pub struct Args {
#[clap(long, default_value = "4")]
probing_rate: usize,
/// Use encoding settings for probes
#[clap(long)]
probe_slow: bool,
/// Min q for target_quality
#[clap(long)]
min_q: Option<u8>,

View file

@ -606,6 +606,77 @@ impl Encoder {
}
}
pub fn construct_target_quality_command_probe_slow(&self, q: String) -> Vec<Cow<str>> {
match &self {
Self::aom => into_vec![
"aomenc",
"--passes=1",
format!("--cq-level={}", q),
],
Self::rav1e => into_vec![
"rav1e",
"-y",
"--quantizer",
q,
],
Self::libvpx => into_vec![
"vpxenc",
"--passes=1",
"--pass=1",
"--codec=vp9",
"--end-usage=q",
format!("--cq-level={}", q),
],
Self::svt_av1 => into_vec![
"SvtAv1EncApp",
"-i",
"stdin",
"--crf",
q,
],
Self::x264 => into_vec![
"x264",
"--log-level",
"error",
"--demuxer",
"y4m",
"-",
"--no-progress",
"--crf",
q,
],
Self::x265 => into_vec![
"x265",
"--log-level",
"0",
"--no-progress",
"--y4m",
"--crf",
q,
],
}
}
// Function remove_patterns that takes in args and patterns and removes all instances of the patterns from the args.
pub fn remove_patterns(&self, args: Vec<String>, patterns: Vec<String>) -> Vec<String> {
let mut out = args.clone();
for pattern in patterns {
if let Some(index) = out.iter().position(|value| value.contains(&pattern)) {
out.remove(index);
// If pattern does not contain =, we need to remove the index that follows.
if pattern.contains("=") == false {
out.remove(index);
}
}
}
out
}
// Function unwrap cow strings that take in a vec of strings and returns a vec of strings.
pub fn decow_strings(&self, args: Vec<Cow<str>>) -> Vec<String> {
args.iter().map(|s| s.to_string()).collect::<Vec<String>>()
}
pub fn probe_cmd(
&self,
temp: String,
@ -614,6 +685,8 @@ impl Encoder {
ffmpeg_pipe: Vec<String>,
probing_rate: String,
n_threads: String,
video_params: Vec<String>,
probe_slow: bool,
) -> (Vec<String>, Vec<String>) {
let pipe: Vec<String> = chain!(
into_vec![
@ -639,8 +712,18 @@ impl Encoder {
probe.push(&probe_name);
let probe_path = probe.into_os_string().into_string().unwrap();
let ps = self.construct_target_quality_command(n_threads, q);
let params = ps.iter().map(|s| s.to_string()).collect::<Vec<String>>();
let mut params;
if probe_slow {
let mut args = video_params.clone();
let patterns = into_vec!["--cq-level=", "--passes=", "--pass=", "--crf", "--quantizer"];
args = self.remove_patterns(args, patterns);
let ps = self.construct_target_quality_command_probe_slow(q);
params = self.decow_strings(ps);
params.append(&mut args)
} else {
let ps = self.construct_target_quality_command(n_threads, q);
params = self.decow_strings(ps);
}
let output: Vec<String> = match &self {
Self::aom => chain!(params, into_vec!["-o", probe_path, "-"]).collect(),
@ -653,40 +736,6 @@ impl Encoder {
(pipe, output)
}
pub fn construct_target_quality_slow_command(&self, q: String) -> Vec<Cow<str>> {
match &self {
Encoder::aom => into_vec!["aomenc", "--passes=1", format!("--cq-level={}", q),],
Encoder::rav1e => into_vec!["rav1e", "-y", "--quantizer", q],
Encoder::libvpx => into_vec![
"vpxenc",
"--passes=1",
"--pass=1",
format!("--cq-level={}", q),
],
Encoder::svt_av1 => into_vec!["SvtAv1EncApp", "-i", "stdin", "--crf", q,],
Encoder::x264 => into_vec![
"x264",
"--log-level",
"error",
"--demuxer",
"y4m",
"-",
"--no-progress",
"--crf",
q,
],
Encoder::x265 => into_vec![
"x265",
"--log-level",
"0",
"--no-progress",
"--y4m",
"--crf",
q,
],
}
}
}
pub fn compose_ffmpeg_pipe(params: Vec<String>) -> Vec<String> {

View file

@ -371,9 +371,11 @@ fn probe_cmd(
ffmpeg_pipe: Vec<String>,
probing_rate: String,
n_threads: String,
video_params: Vec<String>,
probe_slow: bool,
) -> PyResult<(Vec<String>, Vec<String>)> {
let encoder = av1an_encoder_constructor::Encoder::from_str(&encoder).unwrap();
Ok(encoder.probe_cmd(temp, name, q, ffmpeg_pipe, probing_rate, n_threads))
Ok(encoder.probe_cmd(temp, name, q, ffmpeg_pipe, probing_rate, n_threads, video_params, probe_slow))
}
#[pyfunction]

View file

@ -44,6 +44,7 @@ class TargetQuality:
self.temp = project.temp
self.workers = project.workers
self.video_params = project.video_params
self.probe_slow = project.probe_slow
self.probing_rate = adapt_probing_rate(self.probing_rate, 20)
def per_shot_target_quality(self, chunk: Chunk):
@ -140,6 +141,8 @@ class TargetQuality:
self.ffmpeg_pipe,
str(self.probing_rate),
str(n_threads),
self.video_params,
self.probe_slow,
)
ffmpeg_gen_pipe = subprocess.Popen(
chunk.ffmpeg_gen_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT