
* Rename landmark 5 variables * Mark as NEXT * Render tabs for multiple ui layout usage * Allow many face detectors at once, Add face detector tweaks * Remove face detector tweaks for now (kinda placebo) * Fix lint issues * Allow rendering the landmark-5 and landmark-5/68 via debugger * Fix naming * Convert face landmark based on confidence score * Convert face landmark based on confidence score * Add scrfd face detector model (#397) * Add scrfd face detector model * Switch to scrfd_2.5g.onnx model * Just some renaming * Downgrade OpenCV, Add SYSTEM_VERSION_COMPAT=0 for MacOS * Improve naming * prepare detect frame outside of semaphore * Feat/process manager (#399) * Minor naming * Introduce process manager to start and stop * Introduce process manager to start and stop * Introduce process manager to start and stop * Introduce process manager to start and stop * Introduce process manager to start and stop * Remove useless test for now * Avoid useless variables * Show stop once is_processing is True * Allow to stop ffmpeg processing too * Implement output image resolution (#403) * Implement output image resolution * Reorder code * Simplify output logic and therefore fix bug * Frame-enhancer-onnx (#404) * changes * changes * changes * changes * add models * update workflow * Some cleanup * Some cleanup * Feat/frame enhancer polishing (#410) * Some cleanup * Polish the frame enhancer * Frame Enhancer: Add more models, optimize processing * Minor changes * Improve readability of create_tile_frames and merge_tile_frames * We don't have enough models yet * Feat/face landmarker score (#413) * Introduce face landmarker score * Fix testing * Fix testing * Use release for score related sliders * Reduce face landmark fallbacks * Scores and landmarks in Face dict, Change color-theme in face debugger * Scores and landmarks in Face dict, Change color-theme in face debugger * Fix some naming * Add 8K support (for whatever reasons) * Fix testing * Using get() for face.landmarks * Introduce statistics * More statistics * Limit the histogram equalization * Enable queue() for default layout * Improve copy_image() * Fix error when switching detector model * Always set UI values with globals if possible * Use different logic for output image and output video resolutions * Enforce re-download if file size is off * Remove unused method * Remove unused method * Remove unused warning filter * Improved output path normalization (#419) * Handle some exceptions * Handle some exceptions * Cleanup * Prevent countless thread locks * Listen to user feedback * Fix webp edge case * Feat/cuda device detection (#424) * Introduce cuda device detection * Introduce cuda device detection * it's gtx * Move logic to run_nvidia_smi() * Finalize execution device naming * Finalize execution device naming * Merge execution_helper.py to execution.py * Undo lowercase of values * Undo lowercase of values * Finalize naming * Add missing entry to ini * fix lip_syncer preview (#426) * fix lip_syncer preview * change * Refresh preview on trim changes * Cleanup frame enhancers and remove useless scale in merge_video() (#428) * Keep lips over the whole video once lip syncer is enabled (#430) * Keep lips over the whole video once lip syncer is enabled * changes * changes * Fix spacing * Use empty audio frame on silence * Use empty audio frame on silence * Fix ConfigParser encoding (#431) facefusion.ini is UTF8 encoded but config.py doesn't specify encoding which results in corrupted entries when non english characters are used. Affected entries: source_paths target_path output_path * Adjust spacing * Improve the GTX 16 series detection * Use general exception to catch ParseError * Use general exception to catch ParseError * Host frame enhancer models4 * Use latest onnxruntime * Minor changes in benchmark UI * Different approach to cancel ffmpeg process * Add support for amd amf encoders (#433) * Add amd_amf encoders * remove -rc cqp from amf encoder parameters * Improve terminal output, move success messages to debug mode * Improve terminal output, move success messages to debug mode * Minor update * Minor update * onnxruntime 1.17.1 matches cuda 12.2 * Feat/improved scaling (#435) * Prevent useless temp upscaling, Show resolution and fps in terminal output * Remove temp frame quality * Remove temp frame quality * Tiny cleanup * Default back to png for temp frames, Remove pix_fmt from frame extraction due mjpeg error * Fix inswapper fallback by onnxruntime * Fix inswapper fallback by major onnxruntime * Fix inswapper fallback by major onnxruntime * Add testing for vision restrict methods * Fix left / right face mask regions, add left-ear and right-ear * Flip right and left again * Undo ears - does not work with box mask * Prepare next release * Fix spacing * 100% quality when using jpg for temp frames * Use span_kendata_x4 as default as of speed * benchmark optimal tile and pad * Undo commented out code * Add real_esrgan_x4_fp16 model * Be strict when using many face detectors --------- Co-authored-by: Harisreedhar <46858047+harisreedhar@users.noreply.github.com> Co-authored-by: aldemoth <159712934+aldemoth@users.noreply.github.com>
110 lines
6.5 KiB
Python
110 lines
6.5 KiB
Python
import subprocess
|
|
import pytest
|
|
|
|
from facefusion.download import conditional_download
|
|
from facefusion.vision import detect_image_resolution, restrict_image_resolution, create_image_resolutions, get_video_frame, count_video_frame_total, detect_video_fps, restrict_video_fps, detect_video_resolution, restrict_video_resolution, create_video_resolutions, normalize_resolution, pack_resolution, unpack_resolution
|
|
|
|
|
|
@pytest.fixture(scope = 'module', autouse = True)
|
|
def before_all() -> None:
|
|
conditional_download('.assets/examples',
|
|
[
|
|
'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.jpg',
|
|
'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4',
|
|
'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-1080p.mp4'
|
|
])
|
|
subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vframes', '1', '.assets/examples/target-240p.jpg' ])
|
|
subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-1080p.mp4', '-vframes', '1', '.assets/examples/target-1080p.jpg' ])
|
|
subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vframes', '1', '-vf', 'transpose=0', '.assets/examples/target-240p-90deg.jpg' ])
|
|
subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-1080p.mp4', '-vframes', '1', '-vf', 'transpose=0', '.assets/examples/target-1080p-90deg.jpg' ])
|
|
subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=25', '.assets/examples/target-240p-25fps.mp4' ])
|
|
subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=30', '.assets/examples/target-240p-30fps.mp4' ])
|
|
subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=60', '.assets/examples/target-240p-60fps.mp4' ])
|
|
subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'transpose=0', '.assets/examples/target-240p-90deg.mp4' ])
|
|
subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-1080p.mp4', '-vf', 'transpose=0', '.assets/examples/target-1080p-90deg.mp4' ])
|
|
|
|
|
|
def test_detect_image_resolution() -> None:
|
|
assert detect_image_resolution('.assets/examples/target-240p.jpg') == (426, 226)
|
|
assert detect_image_resolution('.assets/examples/target-240p-90deg.jpg') == (226, 426)
|
|
assert detect_image_resolution('.assets/examples/target-1080p.jpg') == (2048, 1080)
|
|
assert detect_image_resolution('.assets/examples/target-1080p-90deg.jpg') == (1080, 2048)
|
|
assert detect_image_resolution('invalid') is None
|
|
|
|
|
|
def test_restrict_image_resolution() -> None:
|
|
assert restrict_image_resolution('.assets/examples/target-1080p.jpg', (426, 226)) == (426, 226)
|
|
assert restrict_image_resolution('.assets/examples/target-1080p.jpg', (2048, 1080)) == (2048, 1080)
|
|
assert restrict_image_resolution('.assets/examples/target-1080p.jpg', (4096, 2160)) == (2048, 1080)
|
|
|
|
|
|
def test_create_image_resolutions() -> None:
|
|
assert create_image_resolutions((426, 226)) == [ '106x56', '212x112', '320x170', '426x226', '640x340', '852x452', '1064x564', '1278x678', '1492x792', '1704x904' ]
|
|
assert create_image_resolutions((226, 426)) == [ '56x106', '112x212', '170x320', '226x426', '340x640', '452x852', '564x1064', '678x1278', '792x1492', '904x1704' ]
|
|
assert create_image_resolutions((2048, 1080)) == [ '512x270', '1024x540', '1536x810', '2048x1080', '3072x1620', '4096x2160', '5120x2700', '6144x3240', '7168x3780', '8192x4320' ]
|
|
assert create_image_resolutions((1080, 2048)) == [ '270x512', '540x1024', '810x1536', '1080x2048', '1620x3072', '2160x4096', '2700x5120', '3240x6144', '3780x7168', '4320x8192' ]
|
|
assert create_image_resolutions(None) == []
|
|
|
|
|
|
def test_get_video_frame() -> None:
|
|
assert get_video_frame('.assets/examples/target-240p-25fps.mp4') is not None
|
|
assert get_video_frame('invalid') is None
|
|
|
|
|
|
def test_count_video_frame_total() -> None:
|
|
assert count_video_frame_total('.assets/examples/target-240p-25fps.mp4') == 270
|
|
assert count_video_frame_total('.assets/examples/target-240p-30fps.mp4') == 324
|
|
assert count_video_frame_total('.assets/examples/target-240p-60fps.mp4') == 648
|
|
assert count_video_frame_total('invalid') == 0
|
|
|
|
|
|
def test_detect_video_fps() -> None:
|
|
assert detect_video_fps('.assets/examples/target-240p-25fps.mp4') == 25.0
|
|
assert detect_video_fps('.assets/examples/target-240p-30fps.mp4') == 30.0
|
|
assert detect_video_fps('.assets/examples/target-240p-60fps.mp4') == 60.0
|
|
assert detect_video_fps('invalid') is None
|
|
|
|
|
|
def test_restrict_video_fps() -> None:
|
|
assert restrict_video_fps('.assets/examples/target-1080p.mp4', 20.0) == 20.0
|
|
assert restrict_video_fps('.assets/examples/target-1080p.mp4', 25.0) == 25.0
|
|
assert restrict_video_fps('.assets/examples/target-1080p.mp4', 60.0) == 25.0
|
|
|
|
|
|
def test_detect_video_resolution() -> None:
|
|
assert detect_video_resolution('.assets/examples/target-240p.mp4') == (426, 226)
|
|
assert detect_video_resolution('.assets/examples/target-240p-90deg.mp4') == (226, 426)
|
|
assert detect_video_resolution('.assets/examples/target-1080p.mp4') == (2048, 1080)
|
|
assert detect_video_resolution('.assets/examples/target-1080p-90deg.mp4') == (1080, 2048)
|
|
assert detect_video_resolution('invalid') is None
|
|
|
|
|
|
def test_restrict_video_resolution() -> None:
|
|
assert restrict_video_resolution('.assets/examples/target-1080p.mp4', (426, 226)) == (426, 226)
|
|
assert restrict_video_resolution('.assets/examples/target-1080p.mp4', (2048, 1080)) == (2048, 1080)
|
|
assert restrict_video_resolution('.assets/examples/target-1080p.mp4', (4096, 2160)) == (2048, 1080)
|
|
|
|
|
|
def test_create_video_resolutions() -> None:
|
|
assert create_video_resolutions((426, 226)) == [ '426x226', '452x240', '678x360', '904x480', '1018x540', '1358x720', '2036x1080', '2714x1440', '4072x2160', '8144x4320' ]
|
|
assert create_video_resolutions((226, 426)) == [ '226x426', '240x452', '360x678', '480x904', '540x1018', '720x1358', '1080x2036', '1440x2714', '2160x4072', '4320x8144' ]
|
|
assert create_video_resolutions((2048, 1080)) == [ '456x240', '682x360', '910x480', '1024x540', '1366x720', '2048x1080', '2730x1440', '4096x2160', '8192x4320' ]
|
|
assert create_video_resolutions((1080, 2048)) == [ '240x456', '360x682', '480x910', '540x1024', '720x1366', '1080x2048', '1440x2730', '2160x4096', '4320x8192' ]
|
|
assert create_video_resolutions(None) == []
|
|
|
|
|
|
def test_normalize_resolution() -> None:
|
|
assert normalize_resolution((2.5, 2.5)) == (2, 2)
|
|
assert normalize_resolution((3.0, 3.0)) == (4, 4)
|
|
assert normalize_resolution((6.5, 6.5)) == (6, 6)
|
|
|
|
|
|
def test_pack_resolution() -> None:
|
|
assert pack_resolution((1, 1)) == '0x0'
|
|
assert pack_resolution((2, 2)) == '2x2'
|
|
|
|
|
|
def test_unpack_resolution() -> None:
|
|
assert unpack_resolution('0x0') == (0, 0)
|
|
assert unpack_resolution('2x2') == (2, 2)
|