
* Allow passing the onnxruntime to install.py * Fix CI * Disallow none execution providers in the UI * Use CV2 to detect fps * Respect trim on videos with audio * Respect trim on videos with audio (finally) * Implement caching to speed up preview and webcam * Define webcam UI and webcam performance * Remove layout from components * Add primary buttons * Extract benchmark and webcam settings * Introduce compact UI settings * Caching for IO and **** prediction * Caching for IO and **** prediction * Introduce face analyser caching * Fix some typing * Improve setup for benchmark * Clear image cache via post process * Fix settings in UI, Simplify restore_audio() using shortest * Select resolution and fps via webcam ui * Introduce read_static_image() to stop caching temp images * Use DirectShow under Windows * Multi-threading for webcam * Fix typing * Refactor frame processor * Refactor webcam processing * Avoid warnings due capture.isOpened() * Resume downloads (#110) * Introduce resumable downloads * Fix CURL commands * Break execution_settings into pieces * Cosmetic changes on cv2 webcam * Update Gradio * Move face cache to own file * Uniform namings for threading * Fix sorting of get_temp_frame_paths(), extend get_temp_frames_pattern() * Minor changes from the review * Looks stable to tme * Update the disclaimer * Update the disclaimer * Update the disclaimer
50 lines
2.1 KiB
Python
50 lines
2.1 KiB
Python
import subprocess
|
|
import pytest
|
|
|
|
import facefusion.globals
|
|
from facefusion.utilities import conditional_download
|
|
from facefusion.vision import get_video_frame, detect_fps, count_video_frame_total
|
|
|
|
|
|
@pytest.fixture(scope = 'module', autouse = True)
|
|
def before_all() -> None:
|
|
facefusion.globals.temp_frame_quality = 100
|
|
facefusion.globals.trim_frame_start = None
|
|
facefusion.globals.trim_frame_end = None
|
|
facefusion.globals.temp_frame_format = 'png'
|
|
conditional_download('.assets/examples',
|
|
[
|
|
'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.jpg',
|
|
'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4'
|
|
])
|
|
subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=25', '.assets/examples/target-240p-25fps.mp4' ])
|
|
subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=30', '.assets/examples/target-240p-30fps.mp4' ])
|
|
subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=60', '.assets/examples/target-240p-60fps.mp4' ])
|
|
|
|
|
|
@pytest.fixture(scope = 'function', autouse = True)
|
|
def before_each() -> None:
|
|
facefusion.globals.trim_frame_start = None
|
|
facefusion.globals.trim_frame_end = None
|
|
facefusion.globals.temp_frame_quality = 90
|
|
facefusion.globals.temp_frame_format = 'jpg'
|
|
|
|
|
|
def test_get_video_frame() -> None:
|
|
assert get_video_frame('.assets/examples/target-240p-25fps.mp4') is not None
|
|
assert get_video_frame('invalid') is None
|
|
|
|
|
|
def test_detect_fps() -> None:
|
|
assert detect_fps('.assets/examples/target-240p-25fps.mp4') == 25.0
|
|
assert detect_fps('.assets/examples/target-240p-30fps.mp4') == 30.0
|
|
assert detect_fps('.assets/examples/target-240p-60fps.mp4') == 60.0
|
|
assert detect_fps('invalid') is None
|
|
|
|
|
|
def test_count_video_frame_total() -> None:
|
|
assert count_video_frame_total('.assets/examples/target-240p-25fps.mp4') == 270
|
|
assert count_video_frame_total('.assets/examples/target-240p-30fps.mp4') == 324
|
|
assert count_video_frame_total('.assets/examples/target-240p-60fps.mp4') == 648
|
|
assert count_video_frame_total('invalid') == 0
|