Merge branch 'master' into master

This commit is contained in:
Zen 2021-01-17 00:08:55 +02:00 committed by GitHub
commit 88bdd065db
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 56 additions and 56 deletions

View file

@ -2,6 +2,9 @@
- Added vmaf validation on each time when VMAF initialized
- Fix running not required frame probe
- Chunk restarting
- Fixed ffmpeg segmenting
- `color_range 0` by default for pipes
- aomenc target quality probes to be 8 bit
### 4
- Refactored Args to Project class

View file

@ -197,6 +197,7 @@ With your own parameters:
## Install
* Prerequisites:
* [Windows Prebuilds](https://ci.appveyor.com/project/master-of-zen/av1an/build/artifacts)
* [Install Python3](https://www.python.org/downloads/) <br>
When installing under Windows, select the option `add Python to PATH` in the installer
* [Install FFmpeg](https://ffmpeg.org/download.html)

View file

@ -142,6 +142,6 @@ class Args:
# Misc
misc_group = parser.add_argument_group('Misc')
misc_group.add_argument('--version', action='version', version=f'Av1an version: 5.5-3')
misc_group.add_argument('--version', action='version', version=f'Av1an version: 5.5-4')
# Initialize project with initial values
return parser

View file

@ -23,7 +23,7 @@ class Counter:
self.initial = initial
self.left = total - initial
self.current = 0
self.use_tqdm = (use_tqdm and tqdm is not None)
self.use_tqdm = (use_tqdm and (tqdm is not None))
if use_tqdm:
self.tqdm_bar = tqdm(total=self.left, initial=0, dynamic_ncols=True, unit="fr", leave=True, smoothing=0.01)

View file

@ -148,6 +148,6 @@ class EncodingManager:
print(f'\rQueue: {clips} Workers: {project.workers} Passes: {project.passes}\n'
f'Params: {" ".join(project.video_params)}')
BaseManager.register('Counter', Counter)
counter = Manager().Counter(project.get_frames(), self.initial_frames)
counter = Manager().Counter(project.get_frames(), self.initial_frames, not project.quiet)
project.counter = counter

View file

@ -193,7 +193,7 @@ def aom_keyframes(video_path: Path, stat_file, min_scene_len, ffmpeg_pipe, video
f, e = compose_aomsplit_first_pass_command(video_path, stat_file, ffmpeg_pipe, video_params, is_vs)
tqdm_bar = None
if not quiet and tqdm is not None:
if (not quiet) and (not (tqdm is None)):
tqdm_bar = tqdm(total=total, initial=0, dynamic_ncols=True, unit="fr", leave=True, smoothing=0.2)
ffmpeg_pipe = subprocess.Popen(f, stdout=PIPE, stderr=STDOUT)
@ -212,7 +212,7 @@ def aom_keyframes(video_path: Path, stat_file, min_scene_len, ffmpeg_pipe, video
if line:
encoder_history.append(line)
if quiet or tqdm is None:
if quiet or (tqdm is None):
continue
match = re.search(r"frame.*?/([^ ]+?) ", line)

View file

@ -7,7 +7,7 @@ from av1an.logger import log
from av1an.vapoursynth import compose_vapoursynth_pipe
if sys.platform == "linux":
from os import mkfifo
from os import mkfifo
def ffmpeg(video, threshold, min_scene_len, total_frames, is_vs, temp):
@ -15,12 +15,16 @@ def ffmpeg(video, threshold, min_scene_len, total_frames, is_vs, temp):
Running FFMPEG detection on source video for segmenting.
Usually the optimal threshold is 0.1 - 0.3 but it can vary a lot
based on your source content.
Threshold value increased by x100 for matching with pyscene range
"""
log(f'Starting FFMPEG detection:\nThreshold: {threshold}, Is Vapoursynth input: {is_vs}\n')
log(f'Starting FFMPEG detection:\nThreshold: {threshold}, \nIs Vapoursynth input: {is_vs}\n')
scenes = []
frame:int = 0
if is_vs:
# Handling vapoursynth. Outputs vs to a file so ffmpeg can handle it.
if sys.platform == "linux":
vspipe_fifo = temp / 'vspipe.y4m'
mkfifo(vspipe_fifo)
@ -30,48 +34,40 @@ def ffmpeg(video, threshold, min_scene_len, total_frames, is_vs, temp):
vspipe_cmd = compose_vapoursynth_pipe(video, vspipe_fifo)
vspipe_process = Popen(vspipe_cmd)
finfo = "showinfo,select=gt(scene\\," + str(threshold) + "),showinfo"
ffmpeg_cmd = ["ffmpeg", "-i", str(vspipe_fifo if is_vs else video.as_posix()), "-hide_banner", "-loglevel", "32",
"-filter_complex", finfo, "-an", "-f", "null", "-"]
pipe = subprocess.Popen(ffmpeg_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
last_frame = -1
scenes = []
cmd = ['ffmpeg', '-hwaccel', 'auto','-hide_banner', '-i', str(vspipe_fifo if is_vs else video.as_posix()), '-an', '-sn', '-vf', 'scale=\'min(960,iw):-1:flags=neighbor\',select=\'gte(scene,0)\',metadata=print', '-f', 'null', '-']
pipe = Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
while True:
line = pipe.stderr.readline().strip()
line = pipe.stdout.readline().strip()
if len(line) == 0 and pipe.poll() is not None:
break
if len(line) == 0:
continue
if line:
cur_frame = re.search("n:\\ *[0-9]+", str(line))
if cur_frame is not None:
frame_num = re.search("[0-9]+", cur_frame.group(0))
if frame_num is not None:
frame_num = int(frame_num.group(0))
if frame_num < last_frame:
scenes += [last_frame]
else:
last_frame = frame_num
if 'frame' in line:
match = re.findall(r':(\d+)', line)
if match:
frame = int(match[0])
continue
if 'score' in line:
matches = re.findall(r"=\s*([\S\s]+)", line)
if matches:
score = float(matches[-1]) * 100
if score > threshold and frame - max(scenes, default=0) > min_scene_len:
scenes.append(frame)
if pipe.returncode != 0 and pipe.returncode != -2:
print(f"\n:: Error in ffmpeg scenedetection {pipe.returncode}")
print('\n'.join(scenes))
if is_vs:
vspipe_process.wait()
scenes = [0] + scenes + [total_frames]
index = 1
while index < len(scenes):
if scenes[index] < (scenes[index - 1] + min_scene_len):
scenes.pop(index)
else:
index = index + 1
if len(scenes) > 2:
scenes.pop(0)
scenes.pop(len(scenes) - 1)
else:
return []
if len(scenes) > 0 and scenes[0] == 0:
scenes.remove(0)
log(f'Found split points: {len(scenes)}\n')
log(f'Splits: {scenes}\n')

View file

@ -176,7 +176,6 @@ def calc_split_locations(project: Project) -> List[int]:
sc = ffmpeg(project.input, project.threshold, project.min_scene_len, project.get_frames(), project.is_vs, project.temp)
# Write scenes to file
if project.scenes:
write_scenes_to_file(sc, project.get_frames(), project.scenes)

View file

@ -145,7 +145,7 @@ def weighted_search(num1, vmaf1, num2, vmaf2, target):
return new_point
def probe_cmd(chunk: Chunk, q, ffmpeg_pipe, encoder, probing_rate) -> CommandPair:
def probe_cmd(chunk: Chunk, q, ffmpeg_pipe, encoder, probing_rate, n_threads) -> CommandPair:
"""
Generate and return commands for probes at set Q values
These are specifically not the commands that are generated
@ -159,39 +159,39 @@ def probe_cmd(chunk: Chunk, q, ffmpeg_pipe, encoder, probing_rate) -> CommandPai
probe_name = gen_probes_names(chunk, q).with_suffix('.ivf').as_posix()
if encoder == 'aom':
params = ['aomenc', '--passes=1', '--threads=12',
params = ['aomenc', '--passes=1', f'--threads={n_threads}', '--tile-columns=1',
'--end-usage=q', '-b', '8', '--cpu-used=6', f'--cq-level={q}']
cmd = CommandPair(pipe, [*params, '-o', probe_name, '-'])
elif encoder == 'x265':
params = ['x265', '--log-level', '0', '--no-progress',
'--y4m', '--preset', 'fast', '--crf', f'{q}']
'--y4m', '--frame-threads', f'{n_threads}', '--preset', 'fast', '--crf', f'{q}']
cmd = CommandPair(pipe, [*params, '-o', probe_name, '-'])
elif encoder == 'rav1e':
params = ['rav1e', '-y', '-s', '10', '--tiles', '32', '--quantizer', f'{q}']
params = ['rav1e', '-y', '-s', '10', '--threads', f'{n_threads}', '--tiles', '32', '--quantizer', f'{q}']
cmd = CommandPair(pipe, [*params, '-o', probe_name, '-'])
elif encoder == 'vpx':
params = ['vpxenc', '-b', '10', '--profile=2','--passes=1', '--pass=1', '--codec=vp9',
'--threads=8', '--cpu-used=9', '--end-usage=q',
f'--threads={n_threads}', '--cpu-used=9', '--end-usage=q',
f'--cq-level={q}', '--row-mt=1']
cmd = CommandPair(pipe, [*params, '-o', probe_name, '-'])
elif encoder == 'svt_av1':
params = ['SvtAv1EncApp', '-i', 'stdin',
params = ['SvtAv1EncApp', '-i', 'stdin', '--lp', f'{n_threads}',
'--preset', '8', '--rc', '0', '--qp', f'{q}']
cmd = CommandPair(pipe, [*params, '-b', probe_name, '-'])
elif encoder == 'svt_vp9':
params = ['SvtVp9EncApp', '-i', 'stdin',
params = ['SvtVp9EncApp', '-i', 'stdin', '--lp', f'{n_threads}',
'-enc-mode', '8', '-q', f'{q}']
# TODO: pipe needs to output rawvideo
cmd = CommandPair(pipe, [*params, '-b', probe_name, '-'])
elif encoder == 'x264':
params = ['x264', '--log-level', 'error', '--demuxer', 'y4m',
'-', '--no-progress', '--preset', 'medium', '--crf',
'-', '--no-progress', '--threads', f'{n_threads}', '--preset', 'medium', '--crf',
f'{q}']
cmd = CommandPair(pipe, [*params, '-o', probe_name, '-'])
@ -231,10 +231,12 @@ def vmaf_probe(chunk: Chunk, q, project: Project, probing_rate):
:param chunk: the Chunk
:param q: Value to make probe
:param project: the Project
:param probing_rate: 1 out of every N frames should be encoded for analysis
:return : path to json file with vmaf scores
"""
cmd = probe_cmd(chunk, q, project.ffmpeg_pipe, project.encoder, probing_rate)
n_threads = project.n_threads if project.n_threads else 12
cmd = probe_cmd(chunk, q, project.ffmpeg_pipe, project.encoder, probing_rate, n_threads)
pipe = make_pipes(chunk.ffmpeg_gen_cmd, cmd)
process_pipe(pipe, chunk)
vm = VMAF(n_threads=project.n_threads, model=project.vmaf_path, res=project.vmaf_res, vmaf_filter=project.vmaf_filter)

View file

@ -11,7 +11,7 @@ GIT: [AOM](https://aomedia.googlesource.com/aom/)
| -------------| -------------|
| --help | Show usage options and exit |
| --end-usage=arg | Rate control mode (vbr, cbr(default), cq, q) |
| --cpu-used=arg | CPU Used (0....6) Good mode, (5..9) realtime mode 1(default)|
| --cpu-used=arg | CPU Used (0..6) Good mode, (5..9) realtime mode 1(default)|
| --cq-level=arg | Constant/Constrained Quality level, used in Q/CQ modes |
| --target-bitrate=arg | Bitrate (kbps) |
| --bit-depth=arg | Bit depth (8, 10, 12) |
@ -35,7 +35,6 @@ GIT: [AOM](https://aomedia.googlesource.com/aom/)
| --transfer-characteristics=arg | Transfer characteristics (CICP) of input content(unspecified, bt709, bt470m, bt470bg, bt601, smpte240, lin, log100, log100sq10, iec61966, bt1361, srgb, bt2020-10bit, bt2020-12bit, smpte2084, hlg, smpte428. Leave at default unless you have HDR content or your source's transfer characteristics are different; in that case, set it to whatever your content is.
| --matrix-coefficients=arg | Matrix coefficients (CICP) of input content: identity, bt709, unspecified, fcc73, bt470bg, bt601, smpte240, ycgco, bt2020ncl, bt2020cl, smpte2085, chromncl, chromcl, ictcp. Leave at default unless you have HDR content or your source's matrix coefficients information is different; in that case, set it to whatever your content is.
### Examples of settings
##### Constant quality:

View file

@ -9,7 +9,7 @@ REQUIRES = [
with open("README.md", "r") as f:
long_description = f.read()
version = "5.4-1"
version = "5.5.4"
setuptools.setup(
name="Av1an-minimal",

View file

@ -13,7 +13,7 @@ REQUIRES = [
with open("README.md", "r") as f:
long_description = f.read()
version = "5.5-3"
version = "5.5-4"
setuptools.setup(
name="Av1an",