diff --git a/.github/preview.png b/.github/preview.png index ee316111..7f2dd02c 100755 Binary files a/.github/preview.png and b/.github/preview.png differ diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 82e1b0cb..829cc9be 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,10 +8,10 @@ jobs: steps: - name: Checkout uses: actions/checkout@v4 - - name: Set up Python 3.10 + - name: Set up Python 3.12 uses: actions/setup-python@v5 with: - python-version: '3.10' + python-version: '3.12' - run: pip install flake8 - run: pip install flake8-import-order - run: pip install mypy @@ -22,17 +22,17 @@ jobs: test: strategy: matrix: - os: [ macos-13, ubuntu-latest, windows-latest ] + os: [ macos-latest, ubuntu-latest, windows-latest ] runs-on: ${{ matrix.os }} steps: - name: Checkout uses: actions/checkout@v4 - name: Set up FFmpeg - uses: FedericoCarboni/setup-ffmpeg@v3 - - name: Set up Python 3.10 + uses: AnimMouse/setup-ffmpeg@v1 + - name: Set up Python 3.12 uses: actions/setup-python@v5 with: - python-version: '3.10' + python-version: '3.12' - run: python install.py --onnxruntime default --skip-conda - run: pip install pytest - run: pytest @@ -44,10 +44,10 @@ jobs: uses: actions/checkout@v4 - name: Set up FFmpeg uses: FedericoCarboni/setup-ffmpeg@v3 - - name: Set up Python 3.10 + - name: Set up Python 3.12 uses: actions/setup-python@v5 with: - python-version: '3.10' + python-version: '3.12' - run: python install.py --onnxruntime default --skip-conda - run: pip install coveralls - run: pip install pytest diff --git a/.gitignore b/.gitignore index 40bebe79..654ae6da 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +__pycache__ .assets .caches .jobs diff --git a/README.md b/README.md index 6f7bcbf7..52b29569 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,7 @@ options: commands: run run the program headless-run run the program in headless mode + batch-run run the program in batch mode force-download force automate downloads and exit job-list list jobs by status job-create create a drafted job diff --git a/facefusion.ini b/facefusion.ini index 8b23a0e3..539b83f6 100644 --- a/facefusion.ini +++ b/facefusion.ini @@ -1,9 +1,15 @@ [paths] +temp_path = jobs_path = source_paths = target_path = output_path = +[patterns] +source_pattern = +target_pattern = +output_pattern = + [face_detector] face_detector_model = face_detector_size = @@ -26,6 +32,8 @@ reference_face_distance = reference_frame_number = [face_masker] +face_occluder_model = +face_parser_model = face_mask_types = face_mask_blur = face_mask_padding = @@ -52,6 +60,8 @@ skip_audio = processors = age_modifier_model = age_modifier_direction = +deep_swapper_model = +deep_swapper_morph = expression_restorer_model = expression_restorer_factor = face_debugger_items = @@ -72,6 +82,7 @@ face_editor_head_yaw = face_editor_head_roll = face_enhancer_model = face_enhancer_blend = +face_enhancer_weight = face_swapper_model = face_swapper_pixel_boost = frame_colorizer_model = @@ -92,10 +103,13 @@ execution_providers = execution_thread_count = execution_queue_count = +[download] +download_providers = +download_scope = + [memory] video_memory_strategy = system_memory_limit = [misc] -skip_download = log_level = diff --git a/facefusion/args.py b/facefusion/args.py index 416546b7..f8ee798f 100644 --- a/facefusion/args.py +++ b/facefusion/args.py @@ -15,6 +15,14 @@ def reduce_step_args(args : Args) -> Args: return step_args +def reduce_job_args(args : Args) -> Args: + job_args =\ + { + key: args[key] for key in args if key in job_store.get_job_keys() + } + return job_args + + def collect_step_args() -> Args: step_args =\ { @@ -35,10 +43,15 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: # general apply_state_item('command', args.get('command')) # paths + apply_state_item('temp_path', args.get('temp_path')) apply_state_item('jobs_path', args.get('jobs_path')) apply_state_item('source_paths', args.get('source_paths')) apply_state_item('target_path', args.get('target_path')) apply_state_item('output_path', args.get('output_path')) + # patterns + apply_state_item('source_pattern', args.get('source_pattern')) + apply_state_item('target_pattern', args.get('target_pattern')) + apply_state_item('output_pattern', args.get('output_pattern')) # face detector apply_state_item('face_detector_model', args.get('face_detector_model')) apply_state_item('face_detector_size', args.get('face_detector_size')) @@ -48,16 +61,18 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: apply_state_item('face_landmarker_model', args.get('face_landmarker_model')) apply_state_item('face_landmarker_score', args.get('face_landmarker_score')) # face selector - state_manager.init_item('face_selector_mode', args.get('face_selector_mode')) - state_manager.init_item('face_selector_order', args.get('face_selector_order')) - state_manager.init_item('face_selector_age_start', args.get('face_selector_age_start')) - state_manager.init_item('face_selector_age_end', args.get('face_selector_age_end')) - state_manager.init_item('face_selector_gender', args.get('face_selector_gender')) - state_manager.init_item('face_selector_race', args.get('face_selector_race')) - state_manager.init_item('reference_face_position', args.get('reference_face_position')) - state_manager.init_item('reference_face_distance', args.get('reference_face_distance')) - state_manager.init_item('reference_frame_number', args.get('reference_frame_number')) + apply_state_item('face_selector_mode', args.get('face_selector_mode')) + apply_state_item('face_selector_order', args.get('face_selector_order')) + apply_state_item('face_selector_age_start', args.get('face_selector_age_start')) + apply_state_item('face_selector_age_end', args.get('face_selector_age_end')) + apply_state_item('face_selector_gender', args.get('face_selector_gender')) + apply_state_item('face_selector_race', args.get('face_selector_race')) + apply_state_item('reference_face_position', args.get('reference_face_position')) + apply_state_item('reference_face_distance', args.get('reference_face_distance')) + apply_state_item('reference_frame_number', args.get('reference_frame_number')) # face masker + apply_state_item('face_occluder_model', args.get('face_occluder_model')) + apply_state_item('face_parser_model', args.get('face_parser_model')) apply_state_item('face_mask_types', args.get('face_mask_types')) apply_state_item('face_mask_blur', args.get('face_mask_blur')) apply_state_item('face_mask_padding', normalize_padding(args.get('face_mask_padding'))) @@ -92,7 +107,7 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: apply_state_item('output_video_fps', output_video_fps) apply_state_item('skip_audio', args.get('skip_audio')) # processors - available_processors = list_directory('facefusion/processors/modules') + available_processors = [ file.get('name') for file in list_directory('facefusion/processors/modules') ] apply_state_item('processors', args.get('processors')) for processor_module in get_processors_modules(available_processors): processor_module.apply_args(args, apply_state_item) @@ -105,11 +120,13 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: apply_state_item('execution_providers', args.get('execution_providers')) apply_state_item('execution_thread_count', args.get('execution_thread_count')) apply_state_item('execution_queue_count', args.get('execution_queue_count')) + # download + apply_state_item('download_providers', args.get('download_providers')) + apply_state_item('download_scope', args.get('download_scope')) # memory apply_state_item('video_memory_strategy', args.get('video_memory_strategy')) apply_state_item('system_memory_limit', args.get('system_memory_limit')) # misc - apply_state_item('skip_download', args.get('skip_download')) apply_state_item('log_level', args.get('log_level')) # jobs apply_state_item('job_id', args.get('job_id')) diff --git a/facefusion/choices.py b/facefusion/choices.py index 15a8cc5e..6f128651 100755 --- a/facefusion/choices.py +++ b/facefusion/choices.py @@ -2,9 +2,7 @@ import logging from typing import List, Sequence from facefusion.common_helper import create_float_range, create_int_range -from facefusion.typing import Angle, ExecutionProviderSet, FaceDetectorSet, FaceLandmarkerModel, FaceMaskRegion, FaceMaskType, FaceSelectorMode, FaceSelectorOrder, Gender, JobStatus, LogLevelSet, OutputAudioEncoder, OutputVideoEncoder, OutputVideoPreset, Race, Score, TempFrameFormat, UiWorkflow, VideoMemoryStrategy - -video_memory_strategies : List[VideoMemoryStrategy] = [ 'strict', 'moderate', 'tolerant' ] +from facefusion.typing import Angle, DownloadProvider, DownloadProviderSet, DownloadScope, ExecutionProvider, ExecutionProviderSet, FaceDetectorModel, FaceDetectorSet, FaceLandmarkerModel, FaceMaskRegion, FaceMaskRegionSet, FaceMaskType, FaceOccluderModel, FaceParserModel, FaceSelectorMode, FaceSelectorOrder, Gender, JobStatus, LogLevel, LogLevelSet, OutputAudioEncoder, OutputVideoEncoder, OutputVideoPreset, Race, Score, TempFrameFormat, UiWorkflow, VideoMemoryStrategy face_detector_set : FaceDetectorSet =\ { @@ -13,29 +11,37 @@ face_detector_set : FaceDetectorSet =\ 'scrfd': [ '160x160', '320x320', '480x480', '512x512', '640x640' ], 'yoloface': [ '640x640' ] } +face_detector_models : List[FaceDetectorModel] = list(face_detector_set.keys()) face_landmarker_models : List[FaceLandmarkerModel] = [ 'many', '2dfan4', 'peppa_wutz' ] face_selector_modes : List[FaceSelectorMode] = [ 'many', 'one', 'reference' ] face_selector_orders : List[FaceSelectorOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best' ] -face_selector_genders : List[Gender] = ['female', 'male'] -face_selector_races : List[Race] = ['white', 'black', 'latino', 'asian', 'indian', 'arabic'] +face_selector_genders : List[Gender] = [ 'female', 'male' ] +face_selector_races : List[Race] = [ 'white', 'black', 'latino', 'asian', 'indian', 'arabic' ] +face_occluder_models : List[FaceOccluderModel] = [ 'xseg_1', 'xseg_2' ] +face_parser_models : List[FaceParserModel] = [ 'bisenet_resnet_18', 'bisenet_resnet_34' ] face_mask_types : List[FaceMaskType] = [ 'box', 'occlusion', 'region' ] -face_mask_regions : List[FaceMaskRegion] = [ 'skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip' ] +face_mask_region_set : FaceMaskRegionSet =\ +{ + 'skin': 1, + 'left-eyebrow': 2, + 'right-eyebrow': 3, + 'left-eye': 4, + 'right-eye': 5, + 'glasses': 6, + 'nose': 10, + 'mouth': 11, + 'upper-lip': 12, + 'lower-lip': 13 +} +face_mask_regions : List[FaceMaskRegion] = list(face_mask_region_set.keys()) temp_frame_formats : List[TempFrameFormat] = [ 'bmp', 'jpg', 'png' ] output_audio_encoders : List[OutputAudioEncoder] = [ 'aac', 'libmp3lame', 'libopus', 'libvorbis' ] -output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc', 'h264_amf', 'hevc_amf', 'h264_videotoolbox', 'hevc_videotoolbox' ] +output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc', 'h264_amf', 'hevc_amf', 'h264_qsv', 'hevc_qsv', 'h264_videotoolbox', 'hevc_videotoolbox' ] output_video_presets : List[OutputVideoPreset] = [ 'ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow' ] image_template_sizes : List[float] = [ 0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 3.5, 4 ] video_template_sizes : List[int] = [ 240, 360, 480, 540, 720, 1080, 1440, 2160, 4320 ] -log_level_set : LogLevelSet =\ -{ - 'error': logging.ERROR, - 'warn': logging.WARNING, - 'info': logging.INFO, - 'debug': logging.DEBUG -} - execution_provider_set : ExecutionProviderSet =\ { 'cpu': 'CPUExecutionProvider', @@ -46,6 +52,33 @@ execution_provider_set : ExecutionProviderSet =\ 'rocm': 'ROCMExecutionProvider', 'tensorrt': 'TensorrtExecutionProvider' } +execution_providers : List[ExecutionProvider] = list(execution_provider_set.keys()) +download_provider_set : DownloadProviderSet =\ +{ + 'github': + { + 'url': 'https://github.com', + 'path': '/facefusion/facefusion-assets/releases/download/{base_name}/{file_name}' + }, + 'huggingface': + { + 'url': 'https://huggingface.co', + 'path': '/facefusion/{base_name}/resolve/main/{file_name}' + } +} +download_providers : List[DownloadProvider] = list(download_provider_set.keys()) +download_scopes : List[DownloadScope] = [ 'lite', 'full' ] + +video_memory_strategies : List[VideoMemoryStrategy] = [ 'strict', 'moderate', 'tolerant' ] + +log_level_set : LogLevelSet =\ +{ + 'error': logging.ERROR, + 'warn': logging.WARNING, + 'info': logging.INFO, + 'debug': logging.DEBUG +} +log_levels : List[LogLevel] = list(log_level_set.keys()) ui_workflows : List[UiWorkflow] = [ 'instant_runner', 'job_runner', 'job_manager' ] job_statuses : List[JobStatus] = [ 'drafted', 'queued', 'completed', 'failed' ] diff --git a/facefusion/content_analyser.py b/facefusion/content_analyser.py index 998e560c..ee25f58b 100644 --- a/facefusion/content_analyser.py +++ b/facefusion/content_analyser.py @@ -5,41 +5,45 @@ import numpy from tqdm import tqdm from facefusion import inference_manager, state_manager, wording -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.filesystem import resolve_relative_path from facefusion.thread_helper import conditional_thread_semaphore -from facefusion.typing import Fps, InferencePool, ModelOptions, ModelSet, VisionFrame -from facefusion.vision import count_video_frame_total, detect_video_fps, get_video_frame, read_image +from facefusion.typing import DownloadScope, Fps, InferencePool, ModelOptions, ModelSet, VisionFrame +from facefusion.vision import detect_video_fps, get_video_frame, read_image -MODEL_SET : ModelSet =\ -{ - 'open_nsfw': - { - 'hashes': - { - 'content_analyser': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/open_nsfw.hash', - 'path': resolve_relative_path('../.assets/models/open_nsfw.hash') - } - }, - 'sources': - { - 'content_analyser': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/open_nsfw.onnx', - 'path': resolve_relative_path('../.assets/models/open_nsfw.onnx') - } - }, - 'size': (224, 224), - 'mean': [ 104, 117, 123 ] - } -} PROBABILITY_LIMIT = 0.80 RATE_LIMIT = 10 STREAM_COUNTER = 0 +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ + { + 'open_nsfw': + { + 'hashes': + { + 'content_analyser': + { + 'url': resolve_download_url('models-3.0.0', 'open_nsfw.hash'), + 'path': resolve_relative_path('../.assets/models/open_nsfw.hash') + } + }, + 'sources': + { + 'content_analyser': + { + 'url': resolve_download_url('models-3.0.0', 'open_nsfw.onnx'), + 'path': resolve_relative_path('../.assets/models/open_nsfw.onnx') + } + }, + 'size': (224, 224), + 'mean': [ 104, 117, 123 ] + } + } + + def get_inference_pool() -> InferencePool: model_sources = get_model_options().get('sources') return inference_manager.get_inference_pool(__name__, model_sources) @@ -50,15 +54,14 @@ def clear_inference_pool() -> None: def get_model_options() -> ModelOptions: - return MODEL_SET.get('open_nsfw') + return create_static_model_set('full').get('open_nsfw') def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes = get_model_options().get('hashes') model_sources = get_model_options().get('sources') - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) def analyse_stream(vision_frame : VisionFrame, video_fps : Fps) -> bool: @@ -100,23 +103,22 @@ def prepare_frame(vision_frame : VisionFrame) -> VisionFrame: @lru_cache(maxsize = None) def analyse_image(image_path : str) -> bool: - frame = read_image(image_path) - return analyse_frame(frame) + vision_frame = read_image(image_path) + return analyse_frame(vision_frame) @lru_cache(maxsize = None) -def analyse_video(video_path : str, start_frame : int, end_frame : int) -> bool: - video_frame_total = count_video_frame_total(video_path) +def analyse_video(video_path : str, trim_frame_start : int, trim_frame_end : int) -> bool: video_fps = detect_video_fps(video_path) - frame_range = range(start_frame or 0, end_frame or video_frame_total) + frame_range = range(trim_frame_start, trim_frame_end) rate = 0.0 counter = 0 with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress: for frame_number in frame_range: if frame_number % int(video_fps) == 0: - frame = get_video_frame(video_path, frame_number) - if analyse_frame(frame): + vision_frame = get_video_frame(video_path, frame_number) + if analyse_frame(vision_frame): counter += 1 rate = counter * int(video_fps) / len(frame_range) * 100 progress.update() diff --git a/facefusion/core.py b/facefusion/core.py index f0f7dfe7..38d64dbc 100755 --- a/facefusion/core.py +++ b/facefusion/core.py @@ -1,3 +1,4 @@ +import itertools import shutil import signal import sys @@ -6,7 +7,7 @@ from time import time import numpy from facefusion import content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, logger, process_manager, state_manager, voice_extractor, wording -from facefusion.args import apply_args, collect_job_args, reduce_step_args +from facefusion.args import apply_args, collect_job_args, reduce_job_args, reduce_step_args from facefusion.common_helper import get_first from facefusion.content_analyser import analyse_image, analyse_video from facefusion.download import conditional_download_hashes, conditional_download_sources @@ -15,7 +16,7 @@ from facefusion.face_analyser import get_average_face, get_many_faces, get_one_f from facefusion.face_selector import sort_and_filter_faces from facefusion.face_store import append_reference_face, clear_reference_faces, get_reference_faces from facefusion.ffmpeg import copy_image, extract_frames, finalize_image, merge_video, replace_audio, restore_audio -from facefusion.filesystem import filter_audio_paths, is_image, is_video, list_directory, resolve_relative_path +from facefusion.filesystem import filter_audio_paths, is_image, is_video, list_directory, resolve_file_pattern from facefusion.jobs import job_helper, job_manager, job_runner from facefusion.jobs.job_list import compose_job_list from facefusion.memory import limit_system_memory @@ -25,7 +26,7 @@ from facefusion.program_helper import validate_args from facefusion.statistics import conditional_log_statistics from facefusion.temp_helper import clear_temp_directory, create_temp_directory, get_temp_file_path, get_temp_frame_paths, move_temp_file from facefusion.typing import Args, ErrorCode -from facefusion.vision import get_video_frame, pack_resolution, read_image, read_static_images, restrict_image_resolution, restrict_video_fps, restrict_video_resolution, unpack_resolution +from facefusion.vision import get_video_frame, pack_resolution, read_image, read_static_images, restrict_image_resolution, restrict_trim_frame, restrict_video_fps, restrict_video_resolution, unpack_resolution def cli() -> None: @@ -41,6 +42,8 @@ def cli() -> None: route(args) else: program.print_help() + else: + hard_exit(2) def route(args : Args) -> None: @@ -65,12 +68,18 @@ def route(args : Args) -> None: for ui_layout in ui.get_ui_layouts_modules(state_manager.get_item('ui_layouts')): if not ui_layout.pre_check(): return conditional_exit(2) + ui.init() ui.launch() if state_manager.get_item('command') == 'headless-run': if not job_manager.init_jobs(state_manager.get_item('jobs_path')): hard_exit(1) error_core = process_headless(args) hard_exit(error_core) + if state_manager.get_item('command') == 'batch-run': + if not job_manager.init_jobs(state_manager.get_item('jobs_path')): + hard_exit(1) + error_core = process_batch(args) + hard_exit(error_core) if state_manager.get_item('command') in [ 'job-run', 'job-run-all', 'job-retry', 'job-retry-all' ]: if not job_manager.init_jobs(state_manager.get_item('jobs_path')): hard_exit(1) @@ -79,8 +88,8 @@ def route(args : Args) -> None: def pre_check() -> bool: - if sys.version_info < (3, 9): - logger.error(wording.get('python_not_supported').format(version = '3.9'), __name__) + if sys.version_info < (3, 10): + logger.error(wording.get('python_not_supported').format(version = '3.10'), __name__) return False if not shutil.which('curl'): logger.error(wording.get('curl_not_installed'), __name__) @@ -92,7 +101,7 @@ def pre_check() -> bool: def common_pre_check() -> bool: - modules =\ + common_modules =\ [ content_analyser, face_classifier, @@ -103,7 +112,7 @@ def common_pre_check() -> bool: voice_extractor ] - return all(module.pre_check() for module in modules) + return all(module.pre_check() for module in common_modules) def processors_pre_check() -> bool: @@ -113,64 +122,28 @@ def processors_pre_check() -> bool: return True -def conditional_process() -> ErrorCode: - start_time = time() - for processor_module in get_processors_modules(state_manager.get_item('processors')): - if not processor_module.pre_process('output'): - return 2 - conditional_append_reference_faces() - if is_image(state_manager.get_item('target_path')): - return process_image(start_time) - if is_video(state_manager.get_item('target_path')): - return process_video(start_time) - return 0 - - -def conditional_append_reference_faces() -> None: - if 'reference' in state_manager.get_item('face_selector_mode') and not get_reference_faces(): - source_frames = read_static_images(state_manager.get_item('source_paths')) - source_faces = get_many_faces(source_frames) - source_face = get_average_face(source_faces) - if is_video(state_manager.get_item('target_path')): - reference_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) - else: - reference_frame = read_image(state_manager.get_item('target_path')) - reference_faces = sort_and_filter_faces(get_many_faces([ reference_frame ])) - reference_face = get_one_face(reference_faces, state_manager.get_item('reference_face_position')) - append_reference_face('origin', reference_face) - - if source_face and reference_face: - for processor_module in get_processors_modules(state_manager.get_item('processors')): - abstract_reference_frame = processor_module.get_reference_frame(source_face, reference_face, reference_frame) - if numpy.any(abstract_reference_frame): - abstract_reference_faces = sort_and_filter_faces(get_many_faces([ abstract_reference_frame ])) - abstract_reference_face = get_one_face(abstract_reference_faces, state_manager.get_item('reference_face_position')) - append_reference_face(processor_module.__name__, abstract_reference_face) - - def force_download() -> ErrorCode: - download_directory_path = resolve_relative_path('../.assets/models') - available_processors = list_directory('facefusion/processors/modules') common_modules =\ [ content_analyser, face_classifier, face_detector, face_landmarker, - face_recognizer, face_masker, + face_recognizer, voice_extractor ] + available_processors = [ file.get('name') for file in list_directory('facefusion/processors/modules') ] processor_modules = get_processors_modules(available_processors) for module in common_modules + processor_modules: - if hasattr(module, 'MODEL_SET'): - for model in module.MODEL_SET.values(): + if hasattr(module, 'create_static_model_set'): + for model in module.create_static_model_set(state_manager.get_item('download_scope')).values(): model_hashes = model.get('hashes') model_sources = model.get('sources') if model_hashes and model_sources: - if not conditional_download_hashes(download_directory_path, model_hashes) or not conditional_download_sources(download_directory_path, model_sources): + if not conditional_download_hashes(model_hashes) or not conditional_download_sources(model_sources): return 1 return 0 @@ -279,6 +252,44 @@ def route_job_runner() -> ErrorCode: return 2 +def process_headless(args : Args) -> ErrorCode: + job_id = job_helper.suggest_job_id('headless') + step_args = reduce_step_args(args) + + if job_manager.create_job(job_id) and job_manager.add_step(job_id, step_args) and job_manager.submit_job(job_id) and job_runner.run_job(job_id, process_step): + return 0 + return 1 + + +def process_batch(args : Args) -> ErrorCode: + job_id = job_helper.suggest_job_id('batch') + step_args = reduce_step_args(args) + job_args = reduce_job_args(args) + source_paths = resolve_file_pattern(job_args.get('source_pattern')) + target_paths = resolve_file_pattern(job_args.get('target_pattern')) + + if job_manager.create_job(job_id): + if source_paths and target_paths: + for index, (source_path, target_path) in enumerate(itertools.product(source_paths, target_paths)): + step_args['source_paths'] = [ source_path ] + step_args['target_path'] = target_path + step_args['output_path'] = job_args.get('output_pattern').format(index = index) + if not job_manager.add_step(job_id, step_args): + return 1 + if job_manager.submit_job(job_id) and job_runner.run_job(job_id, process_step): + return 0 + + if not source_paths and target_paths: + for index, target_path in enumerate(target_paths): + step_args['target_path'] = target_path + step_args['output_path'] = job_args.get('output_pattern').format(index = index) + if not job_manager.add_step(job_id, step_args): + return 1 + if job_manager.submit_job(job_id) and job_runner.run_job(job_id, process_step): + return 0 + return 1 + + def process_step(job_id : str, step_index : int, step_args : Args) -> bool: clear_reference_faces() step_total = job_manager.count_step_total(job_id) @@ -292,13 +303,39 @@ def process_step(job_id : str, step_index : int, step_args : Args) -> bool: return False -def process_headless(args : Args) -> ErrorCode: - job_id = job_helper.suggest_job_id('headless') - step_args = reduce_step_args(args) +def conditional_process() -> ErrorCode: + start_time = time() + for processor_module in get_processors_modules(state_manager.get_item('processors')): + if not processor_module.pre_process('output'): + return 2 + conditional_append_reference_faces() + if is_image(state_manager.get_item('target_path')): + return process_image(start_time) + if is_video(state_manager.get_item('target_path')): + return process_video(start_time) + return 0 - if job_manager.create_job(job_id) and job_manager.add_step(job_id, step_args) and job_manager.submit_job(job_id) and job_runner.run_job(job_id, process_step): - return 0 - return 1 + +def conditional_append_reference_faces() -> None: + if 'reference' in state_manager.get_item('face_selector_mode') and not get_reference_faces(): + source_frames = read_static_images(state_manager.get_item('source_paths')) + source_faces = get_many_faces(source_frames) + source_face = get_average_face(source_faces) + if is_video(state_manager.get_item('target_path')): + reference_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) + else: + reference_frame = read_image(state_manager.get_item('target_path')) + reference_faces = sort_and_filter_faces(get_many_faces([ reference_frame ])) + reference_face = get_one_face(reference_faces, state_manager.get_item('reference_face_position')) + append_reference_face('origin', reference_face) + + if source_face and reference_face: + for processor_module in get_processors_modules(state_manager.get_item('processors')): + abstract_reference_frame = processor_module.get_reference_frame(source_face, reference_face, reference_frame) + if numpy.any(abstract_reference_frame): + abstract_reference_faces = sort_and_filter_faces(get_many_faces([ abstract_reference_frame ])) + abstract_reference_face = get_one_face(abstract_reference_faces, state_manager.get_item('reference_face_position')) + append_reference_face(processor_module.__name__, abstract_reference_face) def process_image(start_time : float) -> ErrorCode: @@ -352,7 +389,8 @@ def process_image(start_time : float) -> ErrorCode: def process_video(start_time : float) -> ErrorCode: - if analyse_video(state_manager.get_item('target_path'), state_manager.get_item('trim_frame_start'), state_manager.get_item('trim_frame_end')): + trim_frame_start, trim_frame_end = restrict_trim_frame(state_manager.get_item('target_path'), state_manager.get_item('trim_frame_start'), state_manager.get_item('trim_frame_end')) + if analyse_video(state_manager.get_item('target_path'), trim_frame_start, trim_frame_end): return 3 # clear temp logger.debug(wording.get('clearing_temp'), __name__) @@ -365,7 +403,7 @@ def process_video(start_time : float) -> ErrorCode: temp_video_resolution = pack_resolution(restrict_video_resolution(state_manager.get_item('target_path'), unpack_resolution(state_manager.get_item('output_video_resolution')))) temp_video_fps = restrict_video_fps(state_manager.get_item('target_path'), state_manager.get_item('output_video_fps')) logger.info(wording.get('extracting_frames').format(resolution = temp_video_resolution, fps = temp_video_fps), __name__) - if extract_frames(state_manager.get_item('target_path'), temp_video_resolution, temp_video_fps): + if extract_frames(state_manager.get_item('target_path'), temp_video_resolution, temp_video_fps, trim_frame_start, trim_frame_end): logger.debug(wording.get('extracting_frames_succeed'), __name__) else: if is_process_stopping(): @@ -414,7 +452,7 @@ def process_video(start_time : float) -> ErrorCode: logger.warn(wording.get('replacing_audio_skipped'), __name__) move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path')) else: - if restore_audio(state_manager.get_item('target_path'), state_manager.get_item('output_path'), state_manager.get_item('output_video_fps')): + if restore_audio(state_manager.get_item('target_path'), state_manager.get_item('output_path'), state_manager.get_item('output_video_fps'), trim_frame_start, trim_frame_end): logger.debug(wording.get('restoring_audio_succeed'), __name__) else: if is_process_stopping(): diff --git a/facefusion/download.py b/facefusion/download.py index 43c92a0e..663f9ba4 100644 --- a/facefusion/download.py +++ b/facefusion/download.py @@ -1,22 +1,23 @@ import os import shutil -import ssl import subprocess -import urllib.request from functools import lru_cache -from typing import List, Tuple +from typing import List, Optional, Tuple from urllib.parse import urlparse from tqdm import tqdm +import facefusion.choices from facefusion import logger, process_manager, state_manager, wording -from facefusion.common_helper import is_macos from facefusion.filesystem import get_file_size, is_file, remove_file from facefusion.hash_helper import validate_hash -from facefusion.typing import DownloadSet +from facefusion.typing import DownloadProvider, DownloadSet -if is_macos(): - ssl._create_default_https_context = ssl._create_unverified_context + +def open_curl(args : List[str]) -> subprocess.Popen[bytes]: + commands = [ shutil.which('curl'), '--silent', '--insecure', '--location' ] + commands.extend(args) + return subprocess.Popen(commands, stdin = subprocess.PIPE, stdout = subprocess.PIPE) def conditional_download(download_directory_path : str, urls : List[str]) -> None: @@ -24,14 +25,15 @@ def conditional_download(download_directory_path : str, urls : List[str]) -> Non download_file_name = os.path.basename(urlparse(url).path) download_file_path = os.path.join(download_directory_path, download_file_name) initial_size = get_file_size(download_file_path) - download_size = get_download_size(url) + download_size = get_static_download_size(url) if initial_size < download_size: with tqdm(total = download_size, initial = initial_size, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress: - subprocess.Popen([ shutil.which('curl'), '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ]) + commands = [ '--create-dirs', '--continue-at', '-', '--output', download_file_path, url ] + open_curl(commands) current_size = initial_size + progress.set_postfix(download_providers = state_manager.get_item('download_providers'), file_name = download_file_name) - progress.set_postfix(file = download_file_name) while current_size < download_size: if is_file(download_file_path): current_size = get_file_size(download_file_path) @@ -39,34 +41,42 @@ def conditional_download(download_directory_path : str, urls : List[str]) -> Non @lru_cache(maxsize = None) -def get_download_size(url : str) -> int: - try: - response = urllib.request.urlopen(url, timeout = 10) - content_length = response.headers.get('Content-Length') - return int(content_length) - except (OSError, TypeError, ValueError): - return 0 +def get_static_download_size(url : str) -> int: + commands = [ '-I', url ] + process = open_curl(commands) + lines = reversed(process.stdout.readlines()) + + for line in lines: + __line__ = line.decode().lower() + if 'content-length:' in __line__: + _, content_length = __line__.split('content-length:') + return int(content_length) + + return 0 -def is_download_done(url : str, file_path : str) -> bool: - if is_file(file_path): - return get_download_size(url) == get_file_size(file_path) - return False +@lru_cache(maxsize = None) +def ping_static_url(url : str) -> bool: + commands = [ '-I', url ] + process = open_curl(commands) + process.communicate() + return process.returncode == 0 -def conditional_download_hashes(download_directory_path : str, hashes : DownloadSet) -> bool: +def conditional_download_hashes(hashes : DownloadSet) -> bool: hash_paths = [ hashes.get(hash_key).get('path') for hash_key in hashes.keys() ] process_manager.check() - if not state_manager.get_item('skip_download'): - _, invalid_hash_paths = validate_hash_paths(hash_paths) - if invalid_hash_paths: - for index in hashes: - if hashes.get(index).get('path') in invalid_hash_paths: - invalid_hash_url = hashes.get(index).get('url') - conditional_download(download_directory_path, [ invalid_hash_url ]) + _, invalid_hash_paths = validate_hash_paths(hash_paths) + if invalid_hash_paths: + for index in hashes: + if hashes.get(index).get('path') in invalid_hash_paths: + invalid_hash_url = hashes.get(index).get('url') + download_directory_path = os.path.dirname(hashes.get(index).get('path')) + conditional_download(download_directory_path, [ invalid_hash_url ]) valid_hash_paths, invalid_hash_paths = validate_hash_paths(hash_paths) + for valid_hash_path in valid_hash_paths: valid_hash_file_name, _ = os.path.splitext(os.path.basename(valid_hash_path)) logger.debug(wording.get('validating_hash_succeed').format(hash_file_name = valid_hash_file_name), __name__) @@ -79,19 +89,20 @@ def conditional_download_hashes(download_directory_path : str, hashes : Download return not invalid_hash_paths -def conditional_download_sources(download_directory_path : str, sources : DownloadSet) -> bool: +def conditional_download_sources(sources : DownloadSet) -> bool: source_paths = [ sources.get(source_key).get('path') for source_key in sources.keys() ] process_manager.check() - if not state_manager.get_item('skip_download'): - _, invalid_source_paths = validate_source_paths(source_paths) - if invalid_source_paths: - for index in sources: - if sources.get(index).get('path') in invalid_source_paths: - invalid_source_url = sources.get(index).get('url') - conditional_download(download_directory_path, [ invalid_source_url ]) + _, invalid_source_paths = validate_source_paths(source_paths) + if invalid_source_paths: + for index in sources: + if sources.get(index).get('path') in invalid_source_paths: + invalid_source_url = sources.get(index).get('url') + download_directory_path = os.path.dirname(sources.get(index).get('path')) + conditional_download(download_directory_path, [ invalid_source_url ]) valid_source_paths, invalid_source_paths = validate_source_paths(source_paths) + for valid_source_path in valid_source_paths: valid_source_file_name, _ = os.path.splitext(os.path.basename(valid_source_path)) logger.debug(wording.get('validating_source_succeed').format(source_file_name = valid_source_file_name), __name__) @@ -129,3 +140,22 @@ def validate_source_paths(source_paths : List[str]) -> Tuple[List[str], List[str else: invalid_source_paths.append(source_path) return valid_source_paths, invalid_source_paths + + +def resolve_download_url(base_name : str, file_name : str) -> Optional[str]: + download_providers = state_manager.get_item('download_providers') + + for download_provider in download_providers: + if ping_download_provider(download_provider): + return resolve_download_url_by_provider(download_provider, base_name, file_name) + return None + + +def ping_download_provider(download_provider : DownloadProvider) -> bool: + download_provider_value = facefusion.choices.download_provider_set.get(download_provider) + return ping_static_url(download_provider_value.get('url')) + + +def resolve_download_url_by_provider(download_provider : DownloadProvider, base_name : str, file_name : str) -> Optional[str]: + download_provider_value = facefusion.choices.download_provider_set.get(download_provider) + return download_provider_value.get('url') + download_provider_value.get('path').format(base_name = base_name, file_name = file_name) diff --git a/facefusion/execution.py b/facefusion/execution.py index 6ce3a697..6fe73a23 100644 --- a/facefusion/execution.py +++ b/facefusion/execution.py @@ -1,46 +1,43 @@ +import shutil import subprocess import xml.etree.ElementTree as ElementTree from functools import lru_cache -from typing import Any, List +from typing import Any, List, Optional from onnxruntime import get_available_providers, set_default_logger_severity -from facefusion.choices import execution_provider_set -from facefusion.typing import ExecutionDevice, ExecutionProviderKey, ExecutionProviderSet, ValueAndUnit +import facefusion.choices +from facefusion.typing import ExecutionDevice, ExecutionProvider, ValueAndUnit set_default_logger_severity(3) -def get_execution_provider_choices() -> List[ExecutionProviderKey]: - return list(get_available_execution_provider_set().keys()) +def has_execution_provider(execution_provider : ExecutionProvider) -> bool: + return execution_provider in get_available_execution_providers() -def has_execution_provider(execution_provider_key : ExecutionProviderKey) -> bool: - return execution_provider_key in get_execution_provider_choices() +def get_available_execution_providers() -> List[ExecutionProvider]: + inference_execution_providers = get_available_providers() + available_execution_providers = [] + + for execution_provider, execution_provider_value in facefusion.choices.execution_provider_set.items(): + if execution_provider_value in inference_execution_providers: + available_execution_providers.append(execution_provider) + + return available_execution_providers -def get_available_execution_provider_set() -> ExecutionProviderSet: - available_execution_providers = get_available_providers() - available_execution_provider_set : ExecutionProviderSet = {} +def create_inference_execution_providers(execution_device_id : str, execution_providers : List[ExecutionProvider]) -> List[Any]: + inference_execution_providers : List[Any] = [] - for execution_provider_key, execution_provider_value in execution_provider_set.items(): - if execution_provider_value in available_execution_providers: - available_execution_provider_set[execution_provider_key] = execution_provider_value - return available_execution_provider_set - - -def create_execution_providers(execution_device_id : str, execution_provider_keys : List[ExecutionProviderKey]) -> List[Any]: - execution_providers : List[Any] = [] - - for execution_provider_key in execution_provider_keys: - if execution_provider_key == 'cuda': - execution_providers.append((execution_provider_set.get(execution_provider_key), + for execution_provider in execution_providers: + if execution_provider == 'cuda': + inference_execution_providers.append((facefusion.choices.execution_provider_set.get(execution_provider), { - 'device_id': execution_device_id, - 'cudnn_conv_algo_search': 'EXHAUSTIVE' if use_exhaustive() else 'DEFAULT' + 'device_id': execution_device_id })) - if execution_provider_key == 'tensorrt': - execution_providers.append((execution_provider_set.get(execution_provider_key), + if execution_provider == 'tensorrt': + inference_execution_providers.append((facefusion.choices.execution_provider_set.get(execution_provider), { 'device_id': execution_device_id, 'trt_engine_cache_enable': True, @@ -49,35 +46,28 @@ def create_execution_providers(execution_device_id : str, execution_provider_key 'trt_timing_cache_path': '.caches', 'trt_builder_optimization_level': 5 })) - if execution_provider_key == 'openvino': - execution_providers.append((execution_provider_set.get(execution_provider_key), + if execution_provider == 'openvino': + inference_execution_providers.append((facefusion.choices.execution_provider_set.get(execution_provider), { - 'device_type': 'GPU.' + execution_device_id, + 'device_type': 'GPU' if execution_device_id == '0' else 'GPU.' + execution_device_id, 'precision': 'FP32' })) - if execution_provider_key in [ 'directml', 'rocm' ]: - execution_providers.append((execution_provider_set.get(execution_provider_key), + if execution_provider in [ 'directml', 'rocm' ]: + inference_execution_providers.append((facefusion.choices.execution_provider_set.get(execution_provider), { 'device_id': execution_device_id })) - if execution_provider_key == 'coreml': - execution_providers.append(execution_provider_set.get(execution_provider_key)) + if execution_provider == 'coreml': + inference_execution_providers.append(facefusion.choices.execution_provider_set.get(execution_provider)) - if 'cpu' in execution_provider_keys: - execution_providers.append(execution_provider_set.get('cpu')) + if 'cpu' in execution_providers: + inference_execution_providers.append(facefusion.choices.execution_provider_set.get('cpu')) - return execution_providers - - -def use_exhaustive() -> bool: - execution_devices = detect_static_execution_devices() - product_names = ('GeForce GTX 1630', 'GeForce GTX 1650', 'GeForce GTX 1660') - - return any(execution_device.get('product').get('name').startswith(product_names) for execution_device in execution_devices) + return inference_execution_providers def run_nvidia_smi() -> subprocess.Popen[bytes]: - commands = [ 'nvidia-smi', '--query', '--xml-format' ] + commands = [ shutil.which('nvidia-smi'), '--query', '--xml-format' ] return subprocess.Popen(commands, stdout = subprocess.PIPE) @@ -98,37 +88,44 @@ def detect_execution_devices() -> List[ExecutionDevice]: for gpu_element in root_element.findall('gpu'): execution_devices.append( { - 'driver_version': root_element.find('driver_version').text, + 'driver_version': root_element.findtext('driver_version'), 'framework': { 'name': 'CUDA', - 'version': root_element.find('cuda_version').text + 'version': root_element.findtext('cuda_version') }, 'product': { 'vendor': 'NVIDIA', - 'name': gpu_element.find('product_name').text.replace('NVIDIA ', '') + 'name': gpu_element.findtext('product_name').replace('NVIDIA', '').strip() }, 'video_memory': { - 'total': create_value_and_unit(gpu_element.find('fb_memory_usage/total').text), - 'free': create_value_and_unit(gpu_element.find('fb_memory_usage/free').text) + 'total': create_value_and_unit(gpu_element.findtext('fb_memory_usage/total')), + 'free': create_value_and_unit(gpu_element.findtext('fb_memory_usage/free')) + }, + 'temperature': + { + 'gpu': create_value_and_unit(gpu_element.findtext('temperature/gpu_temp')), + 'memory': create_value_and_unit(gpu_element.findtext('temperature/memory_temp')) }, 'utilization': { - 'gpu': create_value_and_unit(gpu_element.find('utilization/gpu_util').text), - 'memory': create_value_and_unit(gpu_element.find('utilization/memory_util').text) + 'gpu': create_value_and_unit(gpu_element.findtext('utilization/gpu_util')), + 'memory': create_value_and_unit(gpu_element.findtext('utilization/memory_util')) } }) + return execution_devices -def create_value_and_unit(text : str) -> ValueAndUnit: - value, unit = text.split() - value_and_unit : ValueAndUnit =\ - { - 'value': int(value), - 'unit': str(unit) - } +def create_value_and_unit(text : str) -> Optional[ValueAndUnit]: + if ' ' in text: + value, unit = text.split(' ') - return value_and_unit + return\ + { + 'value': int(value), + 'unit': str(unit) + } + return None diff --git a/facefusion/exit_helper.py b/facefusion/exit_helper.py index e2991b43..5c30acab 100644 --- a/facefusion/exit_helper.py +++ b/facefusion/exit_helper.py @@ -1,3 +1,4 @@ +import signal import sys from time import sleep @@ -7,6 +8,7 @@ from facefusion.typing import ErrorCode def hard_exit(error_code : ErrorCode) -> None: + signal.signal(signal.SIGINT, signal.SIG_IGN) sys.exit(error_code) diff --git a/facefusion/face_classifier.py b/facefusion/face_classifier.py index 2addbe4b..186a4aeb 100644 --- a/facefusion/face_classifier.py +++ b/facefusion/face_classifier.py @@ -1,40 +1,44 @@ +from functools import lru_cache from typing import List, Tuple import numpy from facefusion import inference_manager -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.face_helper import warp_face_by_face_landmark_5 from facefusion.filesystem import resolve_relative_path from facefusion.thread_helper import conditional_thread_semaphore -from facefusion.typing import Age, FaceLandmark5, Gender, InferencePool, ModelOptions, ModelSet, Race, VisionFrame +from facefusion.typing import Age, DownloadScope, FaceLandmark5, Gender, InferencePool, ModelOptions, ModelSet, Race, VisionFrame -MODEL_SET : ModelSet =\ -{ - 'fairface': + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ { - 'hashes': + 'fairface': { - 'face_classifier': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/fairface.hash', - 'path': resolve_relative_path('../.assets/models/fairface.hash') - } - }, - 'sources': - { - 'face_classifier': + 'face_classifier': + { + 'url': resolve_download_url('models-3.0.0', 'fairface.hash'), + 'path': resolve_relative_path('../.assets/models/fairface.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/fairface.onnx', - 'path': resolve_relative_path('../.assets/models/fairface.onnx') - } - }, - 'template': 'arcface_112_v2', - 'size': (224, 224), - 'mean': [ 0.485, 0.456, 0.406 ], - 'standard_deviation': [ 0.229, 0.224, 0.225 ] + 'face_classifier': + { + 'url': resolve_download_url('models-3.0.0', 'fairface.onnx'), + 'path': resolve_relative_path('../.assets/models/fairface.onnx') + } + }, + 'template': 'arcface_112_v2', + 'size': (224, 224), + 'mean': [ 0.485, 0.456, 0.406 ], + 'standard_deviation': [ 0.229, 0.224, 0.225 ] + } } -} def get_inference_pool() -> InferencePool: @@ -47,15 +51,14 @@ def clear_inference_pool() -> None: def get_model_options() -> ModelOptions: - return MODEL_SET.get('fairface') + return create_static_model_set('full').get('fairface') def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes = get_model_options().get('hashes') model_sources = get_model_options().get('sources') - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) def classify_face(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandmark5) -> Tuple[Gender, Age, Race]: diff --git a/facefusion/face_detector.py b/facefusion/face_detector.py index 09d104b2..8787b161 100644 --- a/facefusion/face_detector.py +++ b/facefusion/face_detector.py @@ -2,109 +2,114 @@ from typing import List, Tuple import cv2 import numpy +from charset_normalizer.md import lru_cache from facefusion import inference_manager, state_manager -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.face_helper import create_rotated_matrix_and_size, create_static_anchors, distance_to_bounding_box, distance_to_face_landmark_5, normalize_bounding_box, transform_bounding_box, transform_points from facefusion.filesystem import resolve_relative_path from facefusion.thread_helper import thread_semaphore -from facefusion.typing import Angle, BoundingBox, Detection, DownloadSet, FaceLandmark5, InferencePool, ModelSet, Score, VisionFrame +from facefusion.typing import Angle, BoundingBox, Detection, DownloadScope, DownloadSet, FaceLandmark5, InferencePool, ModelSet, Score, VisionFrame from facefusion.vision import resize_frame_resolution, unpack_resolution -MODEL_SET : ModelSet =\ -{ - 'retinaface': + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ { - 'hashes': + 'retinaface': { - 'retinaface': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/retinaface_10g.hash', - 'path': resolve_relative_path('../.assets/models/retinaface_10g.hash') + 'retinaface': + { + 'url': resolve_download_url('models-3.0.0', 'retinaface_10g.hash'), + 'path': resolve_relative_path('../.assets/models/retinaface_10g.hash') + } + }, + 'sources': + { + 'retinaface': + { + 'url': resolve_download_url('models-3.0.0', 'retinaface_10g.onnx'), + 'path': resolve_relative_path('../.assets/models/retinaface_10g.onnx') + } } }, - 'sources': + 'scrfd': { - 'retinaface': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/retinaface_10g.onnx', - 'path': resolve_relative_path('../.assets/models/retinaface_10g.onnx') - } - } - }, - 'scrfd': - { - 'hashes': - { - 'scrfd': + 'scrfd': + { + 'url': resolve_download_url('models-3.0.0', 'scrfd_2.5g.hash'), + 'path': resolve_relative_path('../.assets/models/scrfd_2.5g.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/scrfd_2.5g.hash', - 'path': resolve_relative_path('../.assets/models/scrfd_2.5g.hash') + 'scrfd': + { + 'url': resolve_download_url('models-3.0.0', 'scrfd_2.5g.onnx'), + 'path': resolve_relative_path('../.assets/models/scrfd_2.5g.onnx') + } } }, - 'sources': + 'yoloface': { - 'scrfd': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/scrfd_2.5g.onnx', - 'path': resolve_relative_path('../.assets/models/scrfd_2.5g.onnx') - } - } - }, - 'yoloface': - { - 'hashes': - { - 'yoloface': + 'yoloface': + { + 'url': resolve_download_url('models-3.0.0', 'yoloface_8n.hash'), + 'path': resolve_relative_path('../.assets/models/yoloface_8n.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/yoloface_8n.hash', - 'path': resolve_relative_path('../.assets/models/yoloface_8n.hash') - } - }, - 'sources': - { - 'yoloface': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/yoloface_8n.onnx', - 'path': resolve_relative_path('../.assets/models/yoloface_8n.onnx') + 'yoloface': + { + 'url': resolve_download_url('models-3.0.0', 'yoloface_8n.onnx'), + 'path': resolve_relative_path('../.assets/models/yoloface_8n.onnx') + } } } } -} def get_inference_pool() -> InferencePool: _, model_sources = collect_model_downloads() - model_context = __name__ + '.' + state_manager.get_item('face_detector_model') - return inference_manager.get_inference_pool(model_context, model_sources) + return inference_manager.get_inference_pool(__name__, model_sources) def clear_inference_pool() -> None: - model_context = __name__ + '.' + state_manager.get_item('face_detector_model') - inference_manager.clear_inference_pool(model_context) + inference_manager.clear_inference_pool(__name__) def collect_model_downloads() -> Tuple[DownloadSet, DownloadSet]: model_hashes = {} model_sources = {} + model_set = create_static_model_set('full') if state_manager.get_item('face_detector_model') in [ 'many', 'retinaface' ]: - model_hashes['retinaface'] = MODEL_SET.get('retinaface').get('hashes').get('retinaface') - model_sources['retinaface'] = MODEL_SET.get('retinaface').get('sources').get('retinaface') + model_hashes['retinaface'] = model_set.get('retinaface').get('hashes').get('retinaface') + model_sources['retinaface'] = model_set.get('retinaface').get('sources').get('retinaface') + if state_manager.get_item('face_detector_model') in [ 'many', 'scrfd' ]: - model_hashes['scrfd'] = MODEL_SET.get('scrfd').get('hashes').get('scrfd') - model_sources['scrfd'] = MODEL_SET.get('scrfd').get('sources').get('scrfd') + model_hashes['scrfd'] = model_set.get('scrfd').get('hashes').get('scrfd') + model_sources['scrfd'] = model_set.get('scrfd').get('sources').get('scrfd') + if state_manager.get_item('face_detector_model') in [ 'many', 'yoloface' ]: - model_hashes['yoloface'] = MODEL_SET.get('yoloface').get('hashes').get('yoloface') - model_sources['yoloface'] = MODEL_SET.get('yoloface').get('sources').get('yoloface') + model_hashes['yoloface'] = model_set.get('yoloface').get('hashes').get('yoloface') + model_sources['yoloface'] = model_set.get('yoloface').get('sources').get('yoloface') + return model_hashes, model_sources def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes, model_sources = collect_model_downloads() - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) def detect_faces(vision_frame : VisionFrame) -> Tuple[List[BoundingBox], List[Score], List[FaceLandmark5]]: diff --git a/facefusion/face_helper.py b/facefusion/face_helper.py index 1265cac6..9218fdb0 100644 --- a/facefusion/face_helper.py +++ b/facefusion/face_helper.py @@ -33,6 +33,14 @@ WARP_TEMPLATES : WarpTemplateSet =\ [ 0.38710391, 0.72160547 ], [ 0.61507734, 0.72034453 ] ]), + 'dfl_whole_face': numpy.array( + [ + [ 0.35342266, 0.39285716 ], + [ 0.62797622, 0.39285716 ], + [ 0.48660713, 0.54017860 ], + [ 0.38839287, 0.68750011 ], + [ 0.59821427, 0.68750011 ] + ]), 'ffhq_512': numpy.array( [ [ 0.37691676, 0.46864664 ], @@ -40,6 +48,22 @@ WARP_TEMPLATES : WarpTemplateSet =\ [ 0.50123859, 0.61331904 ], [ 0.39308822, 0.72541100 ], [ 0.61150205, 0.72490465 ] + ]), + 'mtcnn_512': numpy.array( + [ + [ 0.36562865, 0.46733799 ], + [ 0.63305391, 0.46585885 ], + [ 0.50019127, 0.61942959 ], + [ 0.39032951, 0.77598822 ], + [ 0.61178945, 0.77476328 ] + ]), + 'styleganex_384': numpy.array( + [ + [ 0.42353745, 0.52289879 ], + [ 0.57725008, 0.52319972 ], + [ 0.50123859, 0.61331904 ], + [ 0.43364461, 0.68337652 ], + [ 0.57015325, 0.68306005 ] ]) } diff --git a/facefusion/face_landmarker.py b/facefusion/face_landmarker.py index 6760c271..4a87fe1b 100644 --- a/facefusion/face_landmarker.py +++ b/facefusion/face_landmarker.py @@ -1,114 +1,118 @@ +from functools import lru_cache from typing import Tuple import cv2 import numpy from facefusion import inference_manager, state_manager -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.face_helper import create_rotated_matrix_and_size, estimate_matrix_by_face_landmark_5, transform_points, warp_face_by_translation from facefusion.filesystem import resolve_relative_path from facefusion.thread_helper import conditional_thread_semaphore -from facefusion.typing import Angle, BoundingBox, DownloadSet, FaceLandmark5, FaceLandmark68, InferencePool, ModelSet, Prediction, Score, VisionFrame +from facefusion.typing import Angle, BoundingBox, DownloadScope, DownloadSet, FaceLandmark5, FaceLandmark68, InferencePool, ModelSet, Prediction, Score, VisionFrame -MODEL_SET : ModelSet =\ -{ - '2dfan4': + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ { - 'hashes': + '2dfan4': { - '2dfan4': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/2dfan4.hash', - 'path': resolve_relative_path('../.assets/models/2dfan4.hash') - } + '2dfan4': + { + 'url': resolve_download_url('models-3.0.0', '2dfan4.hash'), + 'path': resolve_relative_path('../.assets/models/2dfan4.hash') + } + }, + 'sources': + { + '2dfan4': + { + 'url': resolve_download_url('models-3.0.0', '2dfan4.onnx'), + 'path': resolve_relative_path('../.assets/models/2dfan4.onnx') + } + }, + 'size': (256, 256) }, - 'sources': + 'peppa_wutz': { - '2dfan4': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/2dfan4.onnx', - 'path': resolve_relative_path('../.assets/models/2dfan4.onnx') - } + 'peppa_wutz': + { + 'url': resolve_download_url('models-3.0.0', 'peppa_wutz.hash'), + 'path': resolve_relative_path('../.assets/models/peppa_wutz.hash') + } + }, + 'sources': + { + 'peppa_wutz': + { + 'url': resolve_download_url('models-3.0.0', 'peppa_wutz.onnx'), + 'path': resolve_relative_path('../.assets/models/peppa_wutz.onnx') + } + }, + 'size': (256, 256) }, - 'size': (256, 256) - }, - 'peppa_wutz': - { - 'hashes': + 'fan_68_5': { - 'peppa_wutz': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/peppa_wutz.hash', - 'path': resolve_relative_path('../.assets/models/peppa_wutz.hash') - } - }, - 'sources': - { - 'peppa_wutz': + 'fan_68_5': + { + 'url': resolve_download_url('models-3.0.0', 'fan_68_5.hash'), + 'path': resolve_relative_path('../.assets/models/fan_68_5.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/peppa_wutz.onnx', - 'path': resolve_relative_path('../.assets/models/peppa_wutz.onnx') - } - }, - 'size': (256, 256) - }, - 'fan_68_5': - { - 'hashes': - { - 'fan_68_5': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/fan_68_5.hash', - 'path': resolve_relative_path('../.assets/models/fan_68_5.hash') - } - }, - 'sources': - { - 'fan_68_5': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/fan_68_5.onnx', - 'path': resolve_relative_path('../.assets/models/fan_68_5.onnx') + 'fan_68_5': + { + 'url': resolve_download_url('models-3.0.0', 'fan_68_5.onnx'), + 'path': resolve_relative_path('../.assets/models/fan_68_5.onnx') + } } } } -} def get_inference_pool() -> InferencePool: _, model_sources = collect_model_downloads() - model_context = __name__ + '.' + state_manager.get_item('face_landmarker_model') - return inference_manager.get_inference_pool(model_context, model_sources) + return inference_manager.get_inference_pool(__name__, model_sources) def clear_inference_pool() -> None: - model_context = __name__ + '.' + state_manager.get_item('face_landmarker_model') - inference_manager.clear_inference_pool(model_context) + inference_manager.clear_inference_pool(__name__) def collect_model_downloads() -> Tuple[DownloadSet, DownloadSet]: + model_set = create_static_model_set('full') model_hashes =\ { - 'fan_68_5': MODEL_SET.get('fan_68_5').get('hashes').get('fan_68_5') + 'fan_68_5': model_set.get('fan_68_5').get('hashes').get('fan_68_5') } model_sources =\ { - 'fan_68_5': MODEL_SET.get('fan_68_5').get('sources').get('fan_68_5') + 'fan_68_5': model_set.get('fan_68_5').get('sources').get('fan_68_5') } if state_manager.get_item('face_landmarker_model') in [ 'many', '2dfan4' ]: - model_hashes['2dfan4'] = MODEL_SET.get('2dfan4').get('hashes').get('2dfan4') - model_sources['2dfan4'] = MODEL_SET.get('2dfan4').get('sources').get('2dfan4') + model_hashes['2dfan4'] = model_set.get('2dfan4').get('hashes').get('2dfan4') + model_sources['2dfan4'] = model_set.get('2dfan4').get('sources').get('2dfan4') + if state_manager.get_item('face_landmarker_model') in [ 'many', 'peppa_wutz' ]: - model_hashes['peppa_wutz'] = MODEL_SET.get('peppa_wutz').get('hashes').get('peppa_wutz') - model_sources['peppa_wutz'] = MODEL_SET.get('peppa_wutz').get('sources').get('peppa_wutz') + model_hashes['peppa_wutz'] = model_set.get('peppa_wutz').get('hashes').get('peppa_wutz') + model_sources['peppa_wutz'] = model_set.get('peppa_wutz').get('sources').get('peppa_wutz') + return model_hashes, model_sources def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes, model_sources = collect_model_downloads() - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) def detect_face_landmarks(vision_frame : VisionFrame, bounding_box : BoundingBox, face_angle : Angle) -> Tuple[FaceLandmark68, Score]: @@ -119,6 +123,7 @@ def detect_face_landmarks(vision_frame : VisionFrame, bounding_box : BoundingBox if state_manager.get_item('face_landmarker_model') in [ 'many', '2dfan4' ]: face_landmark_2dfan4, face_landmark_score_2dfan4 = detect_with_2dfan4(vision_frame, bounding_box, face_angle) + if state_manager.get_item('face_landmarker_model') in [ 'many', 'peppa_wutz' ]: face_landmark_peppa_wutz, face_landmark_score_peppa_wutz = detect_with_peppa_wutz(vision_frame, bounding_box, face_angle) @@ -128,7 +133,7 @@ def detect_face_landmarks(vision_frame : VisionFrame, bounding_box : BoundingBox def detect_with_2dfan4(temp_vision_frame: VisionFrame, bounding_box: BoundingBox, face_angle: Angle) -> Tuple[FaceLandmark68, Score]: - model_size = MODEL_SET.get('2dfan4').get('size') + model_size = create_static_model_set('full').get('2dfan4').get('size') scale = 195 / numpy.subtract(bounding_box[2:], bounding_box[:2]).max().clip(1, None) translation = (model_size[0] - numpy.add(bounding_box[2:], bounding_box[:2]) * scale) * 0.5 rotated_matrix, rotated_size = create_rotated_matrix_and_size(face_angle, model_size) @@ -147,7 +152,7 @@ def detect_with_2dfan4(temp_vision_frame: VisionFrame, bounding_box: BoundingBox def detect_with_peppa_wutz(temp_vision_frame : VisionFrame, bounding_box : BoundingBox, face_angle : Angle) -> Tuple[FaceLandmark68, Score]: - model_size = MODEL_SET.get('peppa_wutz').get('size') + model_size = create_static_model_set('full').get('peppa_wutz').get('size') scale = 195 / numpy.subtract(bounding_box[2:], bounding_box[:2]).max().clip(1, None) translation = (model_size[0] - numpy.add(bounding_box[2:], bounding_box[:2]) * scale) * 0.5 rotated_matrix, rotated_size = create_rotated_matrix_and_size(face_angle, model_size) @@ -167,7 +172,7 @@ def detect_with_peppa_wutz(temp_vision_frame : VisionFrame, bounding_box : Bound def conditional_optimize_contrast(crop_vision_frame : VisionFrame) -> VisionFrame: crop_vision_frame = cv2.cvtColor(crop_vision_frame, cv2.COLOR_RGB2Lab) - if numpy.mean(crop_vision_frame[:, :, 0]) < 30: # type:ignore[arg-type] + if numpy.mean(crop_vision_frame[:, :, 0]) < 30: #type:ignore[arg-type] crop_vision_frame[:, :, 0] = cv2.createCLAHE(clipLimit = 2).apply(crop_vision_frame[:, :, 0]) crop_vision_frame = cv2.cvtColor(crop_vision_frame, cv2.COLOR_Lab2RGB) return crop_vision_frame diff --git a/facefusion/face_masker.py b/facefusion/face_masker.py index 3b615b09..6c118fbd 100755 --- a/facefusion/face_masker.py +++ b/facefusion/face_masker.py @@ -1,72 +1,103 @@ from functools import lru_cache -from typing import Dict, List, Tuple +from typing import List, Tuple import cv2 import numpy from cv2.typing import Size -from facefusion import inference_manager -from facefusion.download import conditional_download_hashes, conditional_download_sources +import facefusion.choices +from facefusion import inference_manager, state_manager +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.filesystem import resolve_relative_path from facefusion.thread_helper import conditional_thread_semaphore -from facefusion.typing import DownloadSet, FaceLandmark68, FaceMaskRegion, InferencePool, Mask, ModelSet, Padding, VisionFrame +from facefusion.typing import DownloadScope, DownloadSet, FaceLandmark68, FaceMaskRegion, InferencePool, Mask, ModelSet, Padding, VisionFrame -MODEL_SET : ModelSet =\ -{ - 'face_occluder': + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ { - 'hashes': + 'xseg_1': { - 'face_occluder': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/dfl_xseg.hash', - 'path': resolve_relative_path('../.assets/models/dfl_xseg.hash') - } + 'face_occluder': + { + 'url': resolve_download_url('models-3.1.0', 'xseg_1.hash'), + 'path': resolve_relative_path('../.assets/models/xseg_1.hash') + } + }, + 'sources': + { + 'face_occluder': + { + 'url': resolve_download_url('models-3.1.0', 'xseg_1.onnx'), + 'path': resolve_relative_path('../.assets/models/xseg_1.onnx') + } + }, + 'size': (256, 256) }, - 'sources': + 'xseg_2': { - 'face_occluder': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/dfl_xseg.onnx', - 'path': resolve_relative_path('../.assets/models/dfl_xseg.onnx') - } + 'face_occluder': + { + 'url': resolve_download_url('models-3.1.0', 'xseg_2.hash'), + 'path': resolve_relative_path('../.assets/models/xseg_2.hash') + } + }, + 'sources': + { + 'face_occluder': + { + 'url': resolve_download_url('models-3.1.0', 'xseg_2.onnx'), + 'path': resolve_relative_path('../.assets/models/xseg_2.onnx') + } + }, + 'size': (256, 256) }, - 'size': (256, 256) - }, - 'face_parser': - { - 'hashes': + 'bisenet_resnet_18': { - 'face_parser': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/bisenet_resnet_34.hash', - 'path': resolve_relative_path('../.assets/models/bisenet_resnet_34.hash') - } + 'face_parser': + { + 'url': resolve_download_url('models-3.1.0', 'bisenet_resnet_18.hash'), + 'path': resolve_relative_path('../.assets/models/bisenet_resnet_18.hash') + } + }, + 'sources': + { + 'face_parser': + { + 'url': resolve_download_url('models-3.1.0', 'bisenet_resnet_18.onnx'), + 'path': resolve_relative_path('../.assets/models/bisenet_resnet_18.onnx') + } + }, + 'size': (512, 512) }, - 'sources': + 'bisenet_resnet_34': { - 'face_parser': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/bisenet_resnet_34.onnx', - 'path': resolve_relative_path('../.assets/models/bisenet_resnet_34.onnx') - } - }, - 'size': (512, 512) + 'face_parser': + { + 'url': resolve_download_url('models-3.0.0', 'bisenet_resnet_34.hash'), + 'path': resolve_relative_path('../.assets/models/bisenet_resnet_34.hash') + } + }, + 'sources': + { + 'face_parser': + { + 'url': resolve_download_url('models-3.0.0', 'bisenet_resnet_34.onnx'), + 'path': resolve_relative_path('../.assets/models/bisenet_resnet_34.onnx') + } + }, + 'size': (512, 512) + } } -} -FACE_MASK_REGIONS : Dict[FaceMaskRegion, int] =\ -{ - 'skin': 1, - 'left-eyebrow': 2, - 'right-eyebrow': 3, - 'left-eye': 4, - 'right-eye': 5, - 'glasses': 6, - 'nose': 10, - 'mouth': 11, - 'upper-lip': 12, - 'lower-lip': 13 -} def get_inference_pool() -> InferencePool: @@ -79,24 +110,33 @@ def clear_inference_pool() -> None: def collect_model_downloads() -> Tuple[DownloadSet, DownloadSet]: - model_hashes =\ - { - 'face_occluder': MODEL_SET.get('face_occluder').get('hashes').get('face_occluder'), - 'face_parser': MODEL_SET.get('face_parser').get('hashes').get('face_parser') - } - model_sources =\ - { - 'face_occluder': MODEL_SET.get('face_occluder').get('sources').get('face_occluder'), - 'face_parser': MODEL_SET.get('face_parser').get('sources').get('face_parser') - } + model_hashes = {} + model_sources = {} + model_set = create_static_model_set('full') + + if state_manager.get_item('face_occluder_model') == 'xseg_1': + model_hashes['xseg_1'] = model_set.get('xseg_1').get('hashes').get('face_occluder') + model_sources['xseg_1'] = model_set.get('xseg_1').get('sources').get('face_occluder') + + if state_manager.get_item('face_occluder_model') == 'xseg_2': + model_hashes['xseg_2'] = model_set.get('xseg_2').get('hashes').get('face_occluder') + model_sources['xseg_2'] = model_set.get('xseg_2').get('sources').get('face_occluder') + + if state_manager.get_item('face_parser_model') == 'bisenet_resnet_18': + model_hashes['bisenet_resnet_18'] = model_set.get('bisenet_resnet_18').get('hashes').get('face_parser') + model_sources['bisenet_resnet_18'] = model_set.get('bisenet_resnet_18').get('sources').get('face_parser') + + if state_manager.get_item('face_parser_model') == 'bisenet_resnet_34': + model_hashes['bisenet_resnet_34'] = model_set.get('bisenet_resnet_34').get('hashes').get('face_parser') + model_sources['bisenet_resnet_34'] = model_set.get('bisenet_resnet_34').get('sources').get('face_parser') + return model_hashes, model_sources def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes, model_sources = collect_model_downloads() - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) @lru_cache(maxsize = None) @@ -114,7 +154,8 @@ def create_static_box_mask(crop_size : Size, face_mask_blur : float, face_mask_p def create_occlusion_mask(crop_vision_frame : VisionFrame) -> Mask: - model_size = MODEL_SET.get('face_occluder').get('size') + face_occluder_model = state_manager.get_item('face_occluder_model') + model_size = create_static_model_set('full').get(face_occluder_model).get('size') prepare_vision_frame = cv2.resize(crop_vision_frame, model_size) prepare_vision_frame = numpy.expand_dims(prepare_vision_frame, axis = 0).astype(numpy.float32) / 255 prepare_vision_frame = prepare_vision_frame.transpose(0, 1, 2, 3) @@ -126,7 +167,8 @@ def create_occlusion_mask(crop_vision_frame : VisionFrame) -> Mask: def create_region_mask(crop_vision_frame : VisionFrame, face_mask_regions : List[FaceMaskRegion]) -> Mask: - model_size = MODEL_SET.get('face_parser').get('size') + face_parser_model = state_manager.get_item('face_parser_model') + model_size = create_static_model_set('full').get(face_parser_model).get('size') prepare_vision_frame = cv2.resize(crop_vision_frame, model_size) prepare_vision_frame = prepare_vision_frame[:, :, ::-1].astype(numpy.float32) / 255 prepare_vision_frame = numpy.subtract(prepare_vision_frame, numpy.array([ 0.485, 0.456, 0.406 ]).astype(numpy.float32)) @@ -134,7 +176,7 @@ def create_region_mask(crop_vision_frame : VisionFrame, face_mask_regions : List prepare_vision_frame = numpy.expand_dims(prepare_vision_frame, axis = 0) prepare_vision_frame = prepare_vision_frame.transpose(0, 3, 1, 2) region_mask = forward_parse_face(prepare_vision_frame) - region_mask = numpy.isin(region_mask.argmax(0), [ FACE_MASK_REGIONS[region] for region in face_mask_regions ]) + region_mask = numpy.isin(region_mask.argmax(0), [ facefusion.choices.face_mask_region_set.get(face_mask_region) for face_mask_region in face_mask_regions ]) region_mask = cv2.resize(region_mask.astype(numpy.float32), crop_vision_frame.shape[:2][::-1]) region_mask = (cv2.GaussianBlur(region_mask.clip(0, 1), (0, 0), 5).clip(0.5, 1) - 0.5) * 2 return region_mask @@ -150,7 +192,8 @@ def create_mouth_mask(face_landmark_68 : FaceLandmark68) -> Mask: def forward_occlude_face(prepare_vision_frame : VisionFrame) -> Mask: - face_occluder = get_inference_pool().get('face_occluder') + face_occluder_model = state_manager.get_item('face_occluder_model') + face_occluder = get_inference_pool().get(face_occluder_model) with conditional_thread_semaphore(): occlusion_mask : Mask = face_occluder.run(None, @@ -162,7 +205,8 @@ def forward_occlude_face(prepare_vision_frame : VisionFrame) -> Mask: def forward_parse_face(prepare_vision_frame : VisionFrame) -> Mask: - face_parser = get_inference_pool().get('face_parser') + face_parser_model = state_manager.get_item('face_parser_model') + face_parser = get_inference_pool().get(face_parser_model) with conditional_thread_semaphore(): region_mask : Mask = face_parser.run(None, diff --git a/facefusion/face_recognizer.py b/facefusion/face_recognizer.py index 5ed85c42..9bcd703b 100644 --- a/facefusion/face_recognizer.py +++ b/facefusion/face_recognizer.py @@ -1,38 +1,42 @@ +from functools import lru_cache from typing import Tuple import numpy from facefusion import inference_manager -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.face_helper import warp_face_by_face_landmark_5 from facefusion.filesystem import resolve_relative_path from facefusion.thread_helper import conditional_thread_semaphore -from facefusion.typing import Embedding, FaceLandmark5, InferencePool, ModelOptions, ModelSet, VisionFrame +from facefusion.typing import DownloadScope, Embedding, FaceLandmark5, InferencePool, ModelOptions, ModelSet, VisionFrame -MODEL_SET : ModelSet =\ -{ - 'arcface': + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ { - 'hashes': + 'arcface': { - 'face_recognizer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_w600k_r50.hash', - 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.hash') - } - }, - 'sources': - { - 'face_recognizer': + 'face_recognizer': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_w600k_r50.hash'), + 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_w600k_r50.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx') - } - }, - 'template': 'arcface_112_v2', - 'size': (112, 112) + 'face_recognizer': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_w600k_r50.onnx'), + 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx') + } + }, + 'template': 'arcface_112_v2', + 'size': (112, 112) + } } -} def get_inference_pool() -> InferencePool: @@ -45,15 +49,14 @@ def clear_inference_pool() -> None: def get_model_options() -> ModelOptions: - return MODEL_SET.get('arcface') + return create_static_model_set('full').get('arcface') def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes = get_model_options().get('hashes') model_sources = get_model_options().get('sources') - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) def calc_embedding(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandmark5) -> Tuple[Embedding, Embedding]: diff --git a/facefusion/face_selector.py b/facefusion/face_selector.py index a3385e96..8738140e 100644 --- a/facefusion/face_selector.py +++ b/facefusion/face_selector.py @@ -33,17 +33,17 @@ def calc_face_distance(face : Face, reference_face : Face) -> float: def sort_and_filter_faces(faces : List[Face]) -> List[Face]: if faces: if state_manager.get_item('face_selector_order'): - faces = sort_by_order(faces, state_manager.get_item('face_selector_order')) + faces = sort_faces_by_order(faces, state_manager.get_item('face_selector_order')) if state_manager.get_item('face_selector_gender'): - faces = filter_by_gender(faces, state_manager.get_item('face_selector_gender')) + faces = filter_faces_by_gender(faces, state_manager.get_item('face_selector_gender')) if state_manager.get_item('face_selector_race'): - faces = filter_by_race(faces, state_manager.get_item('face_selector_race')) + faces = filter_faces_by_race(faces, state_manager.get_item('face_selector_race')) if state_manager.get_item('face_selector_age_start') or state_manager.get_item('face_selector_age_end'): - faces = filter_by_age(faces, state_manager.get_item('face_selector_age_start'), state_manager.get_item('face_selector_age_end')) + faces = filter_faces_by_age(faces, state_manager.get_item('face_selector_age_start'), state_manager.get_item('face_selector_age_end')) return faces -def sort_by_order(faces : List[Face], order : FaceSelectorOrder) -> List[Face]: +def sort_faces_by_order(faces : List[Face], order : FaceSelectorOrder) -> List[Face]: if order == 'left-right': return sorted(faces, key = lambda face: face.bounding_box[0]) if order == 'right-left': @@ -63,7 +63,7 @@ def sort_by_order(faces : List[Face], order : FaceSelectorOrder) -> List[Face]: return faces -def filter_by_gender(faces : List[Face], gender : Gender) -> List[Face]: +def filter_faces_by_gender(faces : List[Face], gender : Gender) -> List[Face]: filter_faces = [] for face in faces: @@ -72,7 +72,7 @@ def filter_by_gender(faces : List[Face], gender : Gender) -> List[Face]: return filter_faces -def filter_by_age(faces : List[Face], face_selector_age_start : int, face_selector_age_end : int) -> List[Face]: +def filter_faces_by_age(faces : List[Face], face_selector_age_start : int, face_selector_age_end : int) -> List[Face]: filter_faces = [] age = range(face_selector_age_start, face_selector_age_end) @@ -82,7 +82,7 @@ def filter_by_age(faces : List[Face], face_selector_age_start : int, face_select return filter_faces -def filter_by_race(faces : List[Face], race : Race) -> List[Face]: +def filter_faces_by_race(faces : List[Face], race : Race) -> List[Face]: filter_faces = [] for face in faces: diff --git a/facefusion/ffmpeg.py b/facefusion/ffmpeg.py index 68107c51..0cd3c1f9 100644 --- a/facefusion/ffmpeg.py +++ b/facefusion/ffmpeg.py @@ -5,22 +5,50 @@ import tempfile from typing import List, Optional import filetype +from tqdm import tqdm -from facefusion import logger, process_manager, state_manager +from facefusion import logger, process_manager, state_manager, wording from facefusion.filesystem import remove_file -from facefusion.temp_helper import get_temp_file_path, get_temp_frames_pattern -from facefusion.typing import AudioBuffer, Fps, OutputVideoPreset -from facefusion.vision import restrict_video_fps +from facefusion.temp_helper import get_temp_file_path, get_temp_frame_paths, get_temp_frames_pattern +from facefusion.typing import AudioBuffer, Fps, OutputVideoPreset, UpdateProgress +from facefusion.vision import count_trim_frame_total, detect_video_duration, restrict_video_fps -def run_ffmpeg(args : List[str]) -> subprocess.Popen[bytes]: - commands = [ shutil.which('ffmpeg'), '-hide_banner', '-loglevel', 'error' ] +def run_ffmpeg_with_progress(args: List[str], update_progress : UpdateProgress) -> subprocess.Popen[bytes]: + log_level = state_manager.get_item('log_level') + commands = [ shutil.which('ffmpeg'), '-hide_banner', '-nostats', '-loglevel', 'error', '-progress', '-' ] commands.extend(args) process = subprocess.Popen(commands, stderr = subprocess.PIPE, stdout = subprocess.PIPE) while process_manager.is_processing(): try: - if state_manager.get_item('log_level') == 'debug': + + while __line__ := process.stdout.readline().decode().lower(): + if 'frame=' in __line__: + _, frame_number = __line__.split('frame=') + update_progress(int(frame_number)) + + if log_level == 'debug': + log_debug(process) + process.wait(timeout = 0.5) + except subprocess.TimeoutExpired: + continue + return process + + if process_manager.is_stopping(): + process.terminate() + return process + + +def run_ffmpeg(args : List[str]) -> subprocess.Popen[bytes]: + log_level = state_manager.get_item('log_level') + commands = [ shutil.which('ffmpeg'), '-hide_banner', '-nostats', '-loglevel', 'error' ] + commands.extend(args) + process = subprocess.Popen(commands, stderr = subprocess.PIPE, stdout = subprocess.PIPE) + + while process_manager.is_processing(): + try: + if log_level == 'debug': log_debug(process) process.wait(timeout = 0.5) except subprocess.TimeoutExpired: @@ -33,7 +61,7 @@ def run_ffmpeg(args : List[str]) -> subprocess.Popen[bytes]: def open_ffmpeg(args : List[str]) -> subprocess.Popen[bytes]: - commands = [ shutil.which('ffmpeg'), '-hide_banner', '-loglevel', 'quiet' ] + commands = [ shutil.which('ffmpeg'), '-loglevel', 'quiet' ] commands.extend(args) return subprocess.Popen(commands, stdin = subprocess.PIPE, stdout = subprocess.PIPE) @@ -47,9 +75,8 @@ def log_debug(process : subprocess.Popen[bytes]) -> None: logger.debug(error.strip(), __name__) -def extract_frames(target_path : str, temp_video_resolution : str, temp_video_fps : Fps) -> bool: - trim_frame_start = state_manager.get_item('trim_frame_start') - trim_frame_end = state_manager.get_item('trim_frame_end') +def extract_frames(target_path : str, temp_video_resolution : str, temp_video_fps : Fps, trim_frame_start : int, trim_frame_end : int) -> bool: + extract_frame_total = count_trim_frame_total(target_path, trim_frame_start, trim_frame_end) temp_frames_pattern = get_temp_frames_pattern(target_path, '%08d') commands = [ '-i', target_path, '-s', str(temp_video_resolution), '-q:v', '0' ] @@ -62,34 +89,48 @@ def extract_frames(target_path : str, temp_video_resolution : str, temp_video_fp else: commands.extend([ '-vf', 'fps=' + str(temp_video_fps) ]) commands.extend([ '-vsync', '0', temp_frames_pattern ]) - return run_ffmpeg(commands).returncode == 0 + + with tqdm(total = extract_frame_total, desc = wording.get('extracting'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress: + process = run_ffmpeg_with_progress(commands, lambda frame_number: progress.update(frame_number - progress.n)) + return process.returncode == 0 -def merge_video(target_path : str, output_video_resolution : str, output_video_fps : Fps) -> bool: +def merge_video(target_path : str, output_video_resolution : str, output_video_fps: Fps) -> bool: + output_video_encoder = state_manager.get_item('output_video_encoder') + output_video_quality = state_manager.get_item('output_video_quality') + output_video_preset = state_manager.get_item('output_video_preset') + merge_frame_total = len(get_temp_frame_paths(target_path)) temp_video_fps = restrict_video_fps(target_path, output_video_fps) temp_file_path = get_temp_file_path(target_path) temp_frames_pattern = get_temp_frames_pattern(target_path, '%08d') - commands = [ '-r', str(temp_video_fps), '-i', temp_frames_pattern, '-s', str(output_video_resolution), '-c:v', state_manager.get_item('output_video_encoder') ] + is_webm = filetype.guess_mime(target_path) == 'video/webm' - if state_manager.get_item('output_video_encoder') in [ 'libx264', 'libx265' ]: - output_video_compression = round(51 - (state_manager.get_item('output_video_quality') * 0.51)) - commands.extend([ '-crf', str(output_video_compression), '-preset', state_manager.get_item('output_video_preset') ]) - if state_manager.get_item('output_video_encoder') in [ 'libvpx-vp9' ]: - output_video_compression = round(63 - (state_manager.get_item('output_video_quality') * 0.63)) + if is_webm: + output_video_encoder = 'libvpx-vp9' + commands = [ '-r', str(temp_video_fps), '-i', temp_frames_pattern, '-s', str(output_video_resolution), '-c:v', output_video_encoder ] + if output_video_encoder in [ 'libx264', 'libx265' ]: + output_video_compression = round(51 - (output_video_quality * 0.51)) + commands.extend([ '-crf', str(output_video_compression), '-preset', output_video_preset ]) + if output_video_encoder in [ 'libvpx-vp9' ]: + output_video_compression = round(63 - (output_video_quality * 0.63)) commands.extend([ '-crf', str(output_video_compression) ]) - if state_manager.get_item('output_video_encoder') in [ 'h264_nvenc', 'hevc_nvenc' ]: - output_video_compression = round(51 - (state_manager.get_item('output_video_quality') * 0.51)) - commands.extend([ '-cq', str(output_video_compression), '-preset', map_nvenc_preset(state_manager.get_item('output_video_preset')) ]) - if state_manager.get_item('output_video_encoder') in [ 'h264_amf', 'hevc_amf' ]: - output_video_compression = round(51 - (state_manager.get_item('output_video_quality') * 0.51)) - commands.extend([ '-qp_i', str(output_video_compression), '-qp_p', str(output_video_compression), '-quality', map_amf_preset(state_manager.get_item('output_video_preset')) ]) - if state_manager.get_item('output_video_encoder') in [ 'h264_videotoolbox', 'hevc_videotoolbox' ]: - commands.extend([ '-q:v', str(state_manager.get_item('output_video_quality')) ]) + if output_video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]: + output_video_compression = round(51 - (output_video_quality * 0.51)) + commands.extend([ '-cq', str(output_video_compression), '-preset', map_nvenc_preset(output_video_preset) ]) + if output_video_encoder in [ 'h264_amf', 'hevc_amf' ]: + output_video_compression = round(51 - (output_video_quality * 0.51)) + commands.extend([ '-qp_i', str(output_video_compression), '-qp_p', str(output_video_compression), '-quality', map_amf_preset(output_video_preset) ]) + if output_video_encoder in [ 'h264_videotoolbox', 'hevc_videotoolbox' ]: + commands.extend([ '-q:v', str(output_video_quality) ]) commands.extend([ '-vf', 'framerate=fps=' + str(output_video_fps), '-pix_fmt', 'yuv420p', '-colorspace', 'bt709', '-y', temp_file_path ]) - return run_ffmpeg(commands).returncode == 0 + + with tqdm(total = merge_frame_total, desc = wording.get('merging'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress: + process = run_ffmpeg_with_progress(commands, lambda frame_number: progress.update(frame_number - progress.n)) + return process.returncode == 0 def concat_video(output_path : str, temp_output_paths : List[str]) -> bool: + output_audio_encoder = state_manager.get_item('output_audio_encoder') concat_video_path = tempfile.mktemp() with open(concat_video_path, 'w') as concat_video_file: @@ -97,7 +138,7 @@ def concat_video(output_path : str, temp_output_paths : List[str]) -> bool: concat_video_file.write('file \'' + os.path.abspath(temp_output_path) + '\'' + os.linesep) concat_video_file.flush() concat_video_file.close() - commands = [ '-f', 'concat', '-safe', '0', '-i', concat_video_file.name, '-c:v', 'copy', '-c:a', state_manager.get_item('output_audio_encoder'), '-y', os.path.abspath(output_path) ] + commands = [ '-f', 'concat', '-safe', '0', '-i', concat_video_file.name, '-c:v', 'copy', '-c:a', output_audio_encoder, '-y', os.path.abspath(output_path) ] process = run_ffmpeg(commands) process.communicate() remove_file(concat_video_path) @@ -112,8 +153,9 @@ def copy_image(target_path : str, temp_image_resolution : str) -> bool: def finalize_image(target_path : str, output_path : str, output_image_resolution : str) -> bool: + output_image_quality = state_manager.get_item('output_image_quality') temp_file_path = get_temp_file_path(target_path) - output_image_compression = calc_image_compression(target_path, state_manager.get_item('output_image_quality')) + output_image_compression = calc_image_compression(target_path, output_image_quality) commands = [ '-i', temp_file_path, '-s', str(output_image_resolution), '-q:v', str(output_image_compression), '-y', output_path ] return run_ffmpeg(commands).returncode == 0 @@ -134,10 +176,10 @@ def read_audio_buffer(target_path : str, sample_rate : int, channel_total : int) return None -def restore_audio(target_path : str, output_path : str, output_video_fps : Fps) -> bool: - trim_frame_start = state_manager.get_item('trim_frame_start') - trim_frame_end = state_manager.get_item('trim_frame_end') +def restore_audio(target_path : str, output_path : str, output_video_fps : Fps, trim_frame_start : int, trim_frame_end : int) -> bool: + output_audio_encoder = state_manager.get_item('output_audio_encoder') temp_file_path = get_temp_file_path(target_path) + temp_video_duration = detect_video_duration(temp_file_path) commands = [ '-i', temp_file_path ] if isinstance(trim_frame_start, int): @@ -146,13 +188,15 @@ def restore_audio(target_path : str, output_path : str, output_video_fps : Fps) if isinstance(trim_frame_end, int): end_time = trim_frame_end / output_video_fps commands.extend([ '-to', str(end_time) ]) - commands.extend([ '-i', target_path, '-c:v', 'copy', '-c:a', state_manager.get_item('output_audio_encoder'), '-map', '0:v:0', '-map', '1:a:0', '-shortest', '-y', output_path ]) + commands.extend([ '-i', target_path, '-c:v', 'copy', '-c:a', output_audio_encoder, '-map', '0:v:0', '-map', '1:a:0', '-t', str(temp_video_duration), '-y', output_path ]) return run_ffmpeg(commands).returncode == 0 def replace_audio(target_path : str, audio_path : str, output_path : str) -> bool: + output_audio_encoder = state_manager.get_item('output_audio_encoder') temp_file_path = get_temp_file_path(target_path) - commands = [ '-i', temp_file_path, '-i', audio_path, '-c:a', state_manager.get_item('output_audio_encoder'), '-af', 'apad', '-shortest', '-y', output_path ] + temp_video_duration = detect_video_duration(temp_file_path) + commands = [ '-i', temp_file_path, '-i', audio_path, '-c:v', 'copy', '-c:a', output_audio_encoder, '-t', str(temp_video_duration), '-y', output_path ] return run_ffmpeg(commands).returncode == 0 @@ -174,3 +218,13 @@ def map_amf_preset(output_video_preset : OutputVideoPreset) -> Optional[str]: if output_video_preset in [ 'slow', 'slower', 'veryslow' ]: return 'quality' return None + + +def map_qsv_preset(output_video_preset : OutputVideoPreset) -> Optional[str]: + if output_video_preset in [ 'ultrafast', 'superfast', 'veryfast', 'faster', 'fast' ]: + return 'fast' + if output_video_preset == 'medium': + return 'medium' + if output_video_preset in [ 'slow', 'slower', 'veryslow' ]: + return 'slow' + return None diff --git a/facefusion/filesystem.py b/facefusion/filesystem.py index ac01c944..09974f93 100644 --- a/facefusion/filesystem.py +++ b/facefusion/filesystem.py @@ -1,3 +1,4 @@ +import glob import os import shutil from pathlib import Path @@ -6,6 +7,7 @@ from typing import List, Optional import filetype from facefusion.common_helper import is_windows +from facefusion.typing import File if is_windows(): import ctypes @@ -125,14 +127,32 @@ def create_directory(directory_path : str) -> bool: return False -def list_directory(directory_path : str) -> Optional[List[str]]: +def list_directory(directory_path : str) -> Optional[List[File]]: if is_directory(directory_path): - files = os.listdir(directory_path) - files = [ Path(file).stem for file in files if not Path(file).stem.startswith(('.', '__')) ] - return sorted(files) + file_paths = sorted(os.listdir(directory_path)) + files: List[File] = [] + + for file_path in file_paths: + file_name, file_extension = os.path.splitext(file_path) + + if not file_name.startswith(('.', '__')): + files.append( + { + 'name': file_name, + 'extension': file_extension, + 'path': os.path.join(directory_path, file_path) + }) + + return files return None +def resolve_file_pattern(file_pattern : str) -> List[str]: + if in_directory(file_pattern): + return sorted(glob.glob(file_pattern)) + return [] + + def remove_directory(directory_path : str) -> bool: if is_directory(directory_path): shutil.rmtree(directory_path, ignore_errors = True) diff --git a/facefusion/inference_manager.py b/facefusion/inference_manager.py index 91da50c5..884a020d 100644 --- a/facefusion/inference_manager.py +++ b/facefusion/inference_manager.py @@ -1,20 +1,18 @@ -from functools import lru_cache from time import sleep from typing import List -import onnx from onnxruntime import InferenceSession from facefusion import process_manager, state_manager from facefusion.app_context import detect_app_context -from facefusion.execution import create_execution_providers, has_execution_provider +from facefusion.execution import create_inference_execution_providers from facefusion.thread_helper import thread_lock -from facefusion.typing import DownloadSet, ExecutionProviderKey, InferencePool, InferencePoolSet, ModelInitializer +from facefusion.typing import DownloadSet, ExecutionProvider, InferencePool, InferencePoolSet INFERENCE_POOLS : InferencePoolSet =\ { - 'cli': {}, # type:ignore[typeddict-item] - 'ui': {} # type:ignore[typeddict-item] + 'cli': {}, #type:ignore[typeddict-item] + 'ui': {} #type:ignore[typeddict-item] } @@ -32,17 +30,16 @@ def get_inference_pool(model_context : str, model_sources : DownloadSet) -> Infe if app_context == 'ui' and INFERENCE_POOLS.get('cli').get(inference_context): INFERENCE_POOLS['ui'][inference_context] = INFERENCE_POOLS.get('cli').get(inference_context) if not INFERENCE_POOLS.get(app_context).get(inference_context): - execution_provider_keys = resolve_execution_provider_keys(model_context) - INFERENCE_POOLS[app_context][inference_context] = create_inference_pool(model_sources, state_manager.get_item('execution_device_id'), execution_provider_keys) + INFERENCE_POOLS[app_context][inference_context] = create_inference_pool(model_sources, state_manager.get_item('execution_device_id'), state_manager.get_item('execution_providers')) return INFERENCE_POOLS.get(app_context).get(inference_context) -def create_inference_pool(model_sources : DownloadSet, execution_device_id : str, execution_provider_keys : List[ExecutionProviderKey]) -> InferencePool: +def create_inference_pool(model_sources : DownloadSet, execution_device_id : str, execution_providers : List[ExecutionProvider]) -> InferencePool: inference_pool : InferencePool = {} for model_name in model_sources.keys(): - inference_pool[model_name] = create_inference_session(model_sources.get(model_name).get('path'), execution_device_id, execution_provider_keys) + inference_pool[model_name] = create_inference_session(model_sources.get(model_name).get('path'), execution_device_id, execution_providers) return inference_pool @@ -56,24 +53,11 @@ def clear_inference_pool(model_context : str) -> None: del INFERENCE_POOLS[app_context][inference_context] -def create_inference_session(model_path : str, execution_device_id : str, execution_provider_keys : List[ExecutionProviderKey]) -> InferenceSession: - execution_providers = create_execution_providers(execution_device_id, execution_provider_keys) - return InferenceSession(model_path, providers = execution_providers) - - -@lru_cache(maxsize = None) -def get_static_model_initializer(model_path : str) -> ModelInitializer: - model = onnx.load(model_path) - return onnx.numpy_helper.to_array(model.graph.initializer[-1]) - - -def resolve_execution_provider_keys(model_context : str) -> List[ExecutionProviderKey]: - if has_execution_provider('coreml') and (model_context.startswith('facefusion.processors.modules.age_modifier') or model_context.startswith('facefusion.processors.modules.frame_colorizer')): - return [ 'cpu' ] - return state_manager.get_item('execution_providers') +def create_inference_session(model_path : str, execution_device_id : str, execution_providers : List[ExecutionProvider]) -> InferenceSession: + inference_execution_providers = create_inference_execution_providers(execution_device_id, execution_providers) + return InferenceSession(model_path, providers = inference_execution_providers) def get_inference_context(model_context : str) -> str: - execution_provider_keys = resolve_execution_provider_keys(model_context) - inference_context = model_context + '.' + '_'.join(execution_provider_keys) + inference_context = model_context + '.' + '_'.join(state_manager.get_item('execution_providers')) return inference_context diff --git a/facefusion/installer.py b/facefusion/installer.py index 5691d45f..a18d12fb 100644 --- a/facefusion/installer.py +++ b/facefusion/installer.py @@ -13,11 +13,11 @@ from facefusion.common_helper import is_linux, is_macos, is_windows ONNXRUNTIMES : Dict[str, Tuple[str, str]] = {} if is_macos(): - ONNXRUNTIMES['default'] = ('onnxruntime', '1.19.2') + ONNXRUNTIMES['default'] = ('onnxruntime', '1.20.1') else: - ONNXRUNTIMES['default'] = ('onnxruntime', '1.19.2') - ONNXRUNTIMES['cuda'] = ('onnxruntime-gpu', '1.19.2') - ONNXRUNTIMES['openvino'] = ('onnxruntime-openvino', '1.19.0') + ONNXRUNTIMES['default'] = ('onnxruntime', '1.20.1') + ONNXRUNTIMES['cuda'] = ('onnxruntime-gpu', '1.20.1') + ONNXRUNTIMES['openvino'] = ('onnxruntime-openvino', '1.20.0') if is_linux(): ONNXRUNTIMES['rocm'] = ('onnxruntime-rocm', '1.18.0') if is_windows(): @@ -72,7 +72,7 @@ def run(program : ArgumentParser) -> None: os.path.join(os.getenv('CONDA_PREFIX'), 'lib'), os.path.join(os.getenv('CONDA_PREFIX'), 'lib', python_id, 'site-packages', 'tensorrt_libs') ]) - library_paths = [ library_path for library_path in library_paths if os.path.exists(library_path) ] + library_paths = list(dict.fromkeys([ library_path for library_path in library_paths if os.path.exists(library_path) ])) subprocess.call([ shutil.which('conda'), 'env', 'config', 'vars', 'set', 'LD_LIBRARY_PATH=' + os.pathsep.join(library_paths) ]) @@ -85,10 +85,9 @@ def run(program : ArgumentParser) -> None: os.path.join(os.getenv('CONDA_PREFIX'), 'Lib'), os.path.join(os.getenv('CONDA_PREFIX'), 'Lib', 'site-packages', 'tensorrt_libs') ]) - library_paths = [ library_path for library_path in library_paths if os.path.exists(library_path) ] + library_paths = list(dict.fromkeys([ library_path for library_path in library_paths if os.path.exists(library_path) ])) subprocess.call([ shutil.which('conda'), 'env', 'config', 'vars', 'set', 'PATH=' + os.pathsep.join(library_paths) ]) if onnxruntime_version < '1.19.0': subprocess.call([ shutil.which('pip'), 'install', 'numpy==1.26.4', '--force-reinstall' ]) - subprocess.call([ shutil.which('pip'), 'install', 'python-multipart==0.0.12', '--force-reinstall' ]) diff --git a/facefusion/jobs/job_manager.py b/facefusion/jobs/job_manager.py index 2e396b32..6b783b21 100644 --- a/facefusion/jobs/job_manager.py +++ b/facefusion/jobs/job_manager.py @@ -1,14 +1,12 @@ -import glob import os from copy import copy from typing import List, Optional -from facefusion.choices import job_statuses +import facefusion.choices from facefusion.date_helper import get_current_date_time -from facefusion.filesystem import create_directory, is_directory, is_file, move_file, remove_directory, remove_file +from facefusion.filesystem import create_directory, is_directory, is_file, move_file, remove_directory, remove_file, resolve_file_pattern from facefusion.jobs.job_helper import get_step_output_path from facefusion.json import read_json, write_json -from facefusion.temp_helper import create_base_directory from facefusion.typing import Args, Job, JobSet, JobStatus, JobStep, JobStepStatus JOBS_PATH : Optional[str] = None @@ -18,9 +16,8 @@ def init_jobs(jobs_path : str) -> bool: global JOBS_PATH JOBS_PATH = jobs_path - job_status_paths = [ os.path.join(JOBS_PATH, job_status) for job_status in job_statuses ] + job_status_paths = [ os.path.join(JOBS_PATH, job_status) for job_status in facefusion.choices.job_statuses ] - create_base_directory() for job_status_path in job_status_paths: create_directory(job_status_path) return all(is_directory(status_path) for status_path in job_status_paths) @@ -88,12 +85,12 @@ def find_jobs(job_status : JobStatus) -> JobSet: def find_job_ids(job_status : JobStatus) -> List[str]: job_pattern = os.path.join(JOBS_PATH, job_status, '*.json') - job_files = glob.glob(job_pattern) - job_files.sort(key = os.path.getmtime) + job_paths = resolve_file_pattern(job_pattern) + job_paths.sort(key = os.path.getmtime) job_ids = [] - for job_file in job_files: - job_id, _ = os.path.splitext(os.path.basename(job_file)) + for job_path in job_paths: + job_id, _ = os.path.splitext(os.path.basename(job_path)) job_ids.append(job_id) return job_ids @@ -248,9 +245,9 @@ def find_job_path(job_id : str) -> Optional[str]: job_file_name = get_job_file_name(job_id) if job_file_name: - for job_status in job_statuses: + for job_status in facefusion.choices.job_statuses: job_pattern = os.path.join(JOBS_PATH, job_status, job_file_name) - job_paths = glob.glob(job_pattern) + job_paths = resolve_file_pattern(job_pattern) for job_path in job_paths: return job_path diff --git a/facefusion/logger.py b/facefusion/logger.py index c951c0b0..641d6b23 100644 --- a/facefusion/logger.py +++ b/facefusion/logger.py @@ -1,14 +1,14 @@ from logging import Logger, basicConfig, getLogger from typing import Tuple -from facefusion.choices import log_level_set +import facefusion.choices from facefusion.common_helper import get_first, get_last from facefusion.typing import LogLevel, TableContents, TableHeaders def init(log_level : LogLevel) -> None: basicConfig(format = '%(message)s') - get_package_logger().setLevel(log_level_set.get(log_level)) + get_package_logger().setLevel(facefusion.choices.log_level_set.get(log_level)) def get_package_logger() -> Logger: diff --git a/facefusion/metadata.py b/facefusion/metadata.py index bb584233..eea6629f 100644 --- a/facefusion/metadata.py +++ b/facefusion/metadata.py @@ -4,7 +4,7 @@ METADATA =\ { 'name': 'FaceFusion', 'description': 'Industry leading face manipulation platform', - 'version': '3.0.1', + 'version': '3.1.0', 'license': 'MIT', 'author': 'Henry Ruhs', 'url': 'https://facefusion.io' diff --git a/facefusion/model_helper.py b/facefusion/model_helper.py new file mode 100644 index 00000000..f5bbfad3 --- /dev/null +++ b/facefusion/model_helper.py @@ -0,0 +1,11 @@ +from functools import lru_cache + +import onnx + +from facefusion.typing import ModelInitializer + + +@lru_cache(maxsize = None) +def get_static_model_initializer(model_path : str) -> ModelInitializer: + model = onnx.load(model_path) + return onnx.numpy_helper.to_array(model.graph.initializer[-1]) diff --git a/facefusion/processors/choices.py b/facefusion/processors/choices.py index 094e1f72..e0008afc 100755 --- a/facefusion/processors/choices.py +++ b/facefusion/processors/choices.py @@ -1,9 +1,170 @@ from typing import List, Sequence from facefusion.common_helper import create_float_range, create_int_range -from facefusion.processors.typing import AgeModifierModel, ExpressionRestorerModel, FaceDebuggerItem, FaceEditorModel, FaceEnhancerModel, FaceSwapperSet, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel +from facefusion.filesystem import list_directory, resolve_relative_path +from facefusion.processors.typing import AgeModifierModel, DeepSwapperModel, ExpressionRestorerModel, FaceDebuggerItem, FaceEditorModel, FaceEnhancerModel, FaceSwapperModel, FaceSwapperSet, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel age_modifier_models : List[AgeModifierModel] = [ 'styleganex_age' ] +deep_swapper_models : List[DeepSwapperModel] =\ +[ + 'druuzil/adrianne_palicki_384', + 'druuzil/agnetha_falskog_224', + 'druuzil/alan_ritchson_320', + 'druuzil/alicia_vikander_320', + 'druuzil/amber_midthunder_320', + 'druuzil/andras_arato_384', + 'druuzil/andrew_tate_320', + 'druuzil/anne_hathaway_320', + 'druuzil/anya_chalotra_320', + 'druuzil/arnold_schwarzenegger_320', + 'druuzil/benjamin_affleck_320', + 'druuzil/benjamin_stiller_384', + 'druuzil/bradley_pitt_224', + 'druuzil/brie_larson_384', + 'druuzil/bryan_cranston_320', + 'druuzil/catherine_blanchett_352', + 'druuzil/christian_bale_320', + 'druuzil/christopher_hemsworth_320', + 'druuzil/christoph_waltz_384', + 'druuzil/cillian_murphy_320', + 'druuzil/cobie_smulders_256', + 'druuzil/dwayne_johnson_384', + 'druuzil/edward_norton_320', + 'druuzil/elisabeth_shue_320', + 'druuzil/elizabeth_olsen_384', + 'druuzil/elon_musk_320', + 'druuzil/emily_blunt_320', + 'druuzil/emma_stone_384', + 'druuzil/emma_watson_320', + 'druuzil/erin_moriarty_384', + 'druuzil/eva_green_320', + 'druuzil/ewan_mcgregor_320', + 'druuzil/florence_pugh_320', + 'druuzil/freya_allan_320', + 'druuzil/gary_cole_224', + 'druuzil/gigi_hadid_224', + 'druuzil/harrison_ford_384', + 'druuzil/hayden_christensen_320', + 'druuzil/heath_ledger_320', + 'druuzil/henry_cavill_448', + 'druuzil/hugh_jackman_384', + 'druuzil/idris_elba_320', + 'druuzil/jack_nicholson_320', + 'druuzil/james_mcavoy_320', + 'druuzil/james_varney_320', + 'druuzil/jason_momoa_320', + 'druuzil/jason_statham_320', + 'druuzil/jennifer_connelly_384', + 'druuzil/jimmy_donaldson_320', + 'druuzil/jordan_peterson_384', + 'druuzil/karl_urban_224', + 'druuzil/kate_beckinsale_384', + 'druuzil/laurence_fishburne_384', + 'druuzil/lili_reinhart_320', + 'druuzil/mads_mikkelsen_384', + 'druuzil/mary_winstead_320', + 'druuzil/margaret_qualley_384', + 'druuzil/melina_juergens_320', + 'druuzil/michael_fassbender_320', + 'druuzil/michael_fox_320', + 'druuzil/millie_bobby_brown_320', + 'druuzil/morgan_freeman_320', + 'druuzil/patrick_stewart_320', + 'druuzil/rebecca_ferguson_320', + 'druuzil/scarlett_johansson_320', + 'druuzil/seth_macfarlane_384', + 'druuzil/thomas_cruise_320', + 'druuzil/thomas_hanks_384', + 'edel/emma_roberts_224', + 'edel/ivanka_trump_224', + 'edel/lize_dzjabrailova_224', + 'edel/sidney_sweeney_224', + 'edel/winona_ryder_224', + 'iperov/alexandra_daddario_224', + 'iperov/alexei_navalny_224', + 'iperov/amber_heard_224', + 'iperov/dilraba_dilmurat_224', + 'iperov/elon_musk_224', + 'iperov/emilia_clarke_224', + 'iperov/emma_watson_224', + 'iperov/erin_moriarty_224', + 'iperov/jackie_chan_224', + 'iperov/james_carrey_224', + 'iperov/jason_statham_320', + 'iperov/keanu_reeves_320', + 'iperov/margot_robbie_224', + 'iperov/natalie_dormer_224', + 'iperov/nicolas_coppola_224', + 'iperov/robert_downey_224', + 'iperov/rowan_atkinson_224', + 'iperov/ryan_reynolds_224', + 'iperov/scarlett_johansson_224', + 'iperov/sylvester_stallone_224', + 'iperov/thomas_cruise_224', + 'iperov/thomas_holland_224', + 'iperov/vin_diesel_224', + 'iperov/vladimir_putin_224', + 'jen/angelica_trae_288', + 'jen/ella_freya_224', + 'jen/emma_myers_320', + 'jen/evie_pickerill_224', + 'jen/kang_hyewon_320', + 'jen/maddie_mead_224', + 'jen/nicole_turnbull_288', + 'mats/alica_schmidt_320', + 'mats/ashley_alexiss_224', + 'mats/billie_eilish_224', + 'mats/brie_larson_224', + 'mats/cara_delevingne_224', + 'mats/carolin_kebekus_224', + 'mats/chelsea_clinton_224', + 'mats/claire_boucher_224', + 'mats/corinna_kopf_224', + 'mats/florence_pugh_224', + 'mats/hillary_clinton_224', + 'mats/jenna_fischer_224', + 'mats/kim_jisoo_320', + 'mats/mica_suarez_320', + 'mats/shailene_woodley_224', + 'mats/shraddha_kapoor_320', + 'mats/yu_jimin_352', + 'rumateus/alison_brie_224', + 'rumateus/amber_heard_224', + 'rumateus/angelina_jolie_224', + 'rumateus/aubrey_plaza_224', + 'rumateus/bridget_regan_224', + 'rumateus/cobie_smulders_224', + 'rumateus/deborah_woll_224', + 'rumateus/dua_lipa_224', + 'rumateus/emma_stone_224', + 'rumateus/hailee_steinfeld_224', + 'rumateus/hilary_duff_224', + 'rumateus/jessica_alba_224', + 'rumateus/jessica_biel_224', + 'rumateus/john_cena_224', + 'rumateus/kim_kardashian_224', + 'rumateus/kristen_bell_224', + 'rumateus/lucy_liu_224', + 'rumateus/margot_robbie_224', + 'rumateus/megan_fox_224', + 'rumateus/meghan_markle_224', + 'rumateus/millie_bobby_brown_224', + 'rumateus/natalie_portman_224', + 'rumateus/nicki_minaj_224', + 'rumateus/olivia_wilde_224', + 'rumateus/shay_mitchell_224', + 'rumateus/sophie_turner_224', + 'rumateus/taylor_swift_224' +] + +custom_model_files = list_directory(resolve_relative_path('../.assets/models/custom')) + +if custom_model_files: + + for model_file in custom_model_files: + model_id = '/'.join([ 'custom', model_file.get('name') ]) + deep_swapper_models.append(model_id) + expression_restorer_models : List[ExpressionRestorerModel] = [ 'live_portrait' ] face_debugger_items : List[FaceDebuggerItem] = [ 'bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender', 'race' ] face_editor_models : List[FaceEditorModel] = [ 'live_portrait' ] @@ -14,18 +175,21 @@ face_swapper_set : FaceSwapperSet =\ 'ghost_1_256': [ '256x256', '512x512', '768x768', '1024x1024' ], 'ghost_2_256': [ '256x256', '512x512', '768x768', '1024x1024' ], 'ghost_3_256': [ '256x256', '512x512', '768x768', '1024x1024' ], + 'hififace_unofficial_256': [ '256x256', '512x512', '768x768', '1024x1024' ], 'inswapper_128': [ '128x128', '256x256', '384x384', '512x512', '768x768', '1024x1024' ], 'inswapper_128_fp16': [ '128x128', '256x256', '384x384', '512x512', '768x768', '1024x1024' ], 'simswap_256': [ '256x256', '512x512', '768x768', '1024x1024' ], 'simswap_unofficial_512': [ '512x512', '768x768', '1024x1024' ], 'uniface_256': [ '256x256', '512x512', '768x768', '1024x1024' ] } +face_swapper_models : List[FaceSwapperModel] = list(face_swapper_set.keys()) frame_colorizer_models : List[FrameColorizerModel] = [ 'ddcolor', 'ddcolor_artistic', 'deoldify', 'deoldify_artistic', 'deoldify_stable' ] frame_colorizer_sizes : List[str] = [ '192x192', '256x256', '384x384', '512x512' ] -frame_enhancer_models : List[FrameEnhancerModel] = [ 'clear_reality_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_esrgan_x8', 'real_esrgan_x8_fp16', 'real_hatgan_x4', 'span_kendata_x4', 'ultra_sharp_x4' ] +frame_enhancer_models : List[FrameEnhancerModel] = [ 'clear_reality_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_esrgan_x8', 'real_esrgan_x8_fp16', 'real_hatgan_x4', 'real_web_photo_x4', 'realistic_rescaler_x4', 'remacri_x4', 'siax_x4', 'span_kendata_x4', 'swin2_sr_x4', 'ultra_sharp_x4' ] lip_syncer_models : List[LipSyncerModel] = [ 'wav2lip_96', 'wav2lip_gan_96' ] age_modifier_direction_range : Sequence[int] = create_int_range(-100, 100, 1) +deep_swapper_morph_range : Sequence[int] = create_int_range(0, 100, 1) expression_restorer_factor_range : Sequence[int] = create_int_range(0, 100, 1) face_editor_eyebrow_direction_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05) face_editor_eye_gaze_horizontal_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05) @@ -42,5 +206,6 @@ face_editor_head_pitch_range : Sequence[float] = create_float_range(-1.0, 1.0, 0 face_editor_head_yaw_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05) face_editor_head_roll_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05) face_enhancer_blend_range : Sequence[int] = create_int_range(0, 100, 1) +face_enhancer_weight_range : Sequence[float] = create_float_range(0.0, 1.0, 0.05) frame_colorizer_blend_range : Sequence[int] = create_int_range(0, 100, 1) frame_enhancer_blend_range : Sequence[int] = create_int_range(0, 100, 1) diff --git a/facefusion/processors/core.py b/facefusion/processors/core.py index 7cff5ebf..af65b210 100644 --- a/facefusion/processors/core.py +++ b/facefusion/processors/core.py @@ -53,21 +53,10 @@ def get_processors_modules(processors : List[str]) -> List[ModuleType]: return processor_modules -def clear_processors_modules(processors : List[str]) -> None: - for processor in processors: - processor_module = load_processor_module(processor) - processor_module.clear_inference_pool() - - def multi_process_frames(source_paths : List[str], temp_frame_paths : List[str], process_frames : ProcessFrames) -> None: queue_payloads = create_queue_payloads(temp_frame_paths) with tqdm(total = len(queue_payloads), desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress: - progress.set_postfix( - { - 'execution_providers': state_manager.get_item('execution_providers'), - 'execution_thread_count': state_manager.get_item('execution_thread_count'), - 'execution_queue_count': state_manager.get_item('execution_queue_count') - }) + progress.set_postfix(execution_providers = state_manager.get_item('execution_providers')) with ThreadPoolExecutor(max_workers = state_manager.get_item('execution_thread_count')) as executor: futures = [] queue : Queue[QueuePayload] = create_queue(queue_payloads) diff --git a/facefusion/processors/modules/age_modifier.py b/facefusion/processors/modules/age_modifier.py index ec67f8cf..ed4e9316 100755 --- a/facefusion/processors/modules/age_modifier.py +++ b/facefusion/processors/modules/age_modifier.py @@ -1,17 +1,18 @@ from argparse import ArgumentParser -from typing import Any, List +from functools import lru_cache +from typing import List import cv2 import numpy -from cv2.typing import Size -from numpy.typing import NDArray +import facefusion.choices import facefusion.jobs.job_manager import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, wording from facefusion.common_helper import create_int_metavar -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url +from facefusion.execution import has_execution_provider from facefusion.face_analyser import get_many_faces, get_one_face from facefusion.face_helper import merge_matrix, paste_back, scale_face_landmark_5, warp_face_by_face_landmark_5 from facefusion.face_masker import create_occlusion_mask, create_static_box_mask @@ -19,52 +20,61 @@ from facefusion.face_selector import find_similar_faces, sort_and_filter_faces from facefusion.face_store import get_reference_faces from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension from facefusion.processors import choices as processors_choices -from facefusion.processors.typing import AgeModifierInputs +from facefusion.processors.typing import AgeModifierDirection, AgeModifierInputs from facefusion.program_helper import find_argument_group from facefusion.thread_helper import thread_semaphore -from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, Mask, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame -from facefusion.vision import read_image, read_static_image, write_image +from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame +from facefusion.vision import match_frame_color, read_image, read_static_image, write_image -MODEL_SET : ModelSet =\ -{ - 'styleganex_age': + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ { - 'hashes': + 'styleganex_age': { - 'age_modifier': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/styleganex_age.hash', - 'path': resolve_relative_path('../.assets/models/styleganex_age.hash') - } - }, - 'sources': - { - 'age_modifier': + 'age_modifier': + { + 'url': resolve_download_url('models-3.1.0', 'styleganex_age.hash'), + 'path': resolve_relative_path('../.assets/models/styleganex_age.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/styleganex_age.onnx', - 'path': resolve_relative_path('../.assets/models/styleganex_age.onnx') + 'age_modifier': + { + 'url': resolve_download_url('models-3.1.0', 'styleganex_age.onnx'), + 'path': resolve_relative_path('../.assets/models/styleganex_age.onnx') + } + }, + 'templates': + { + 'target': 'ffhq_512', + 'target_with_background': 'styleganex_384' + }, + 'sizes': + { + 'target': (256, 256), + 'target_with_background': (384, 384) } - }, - 'template': 'ffhq_512', - 'size': (512, 512) + } } -} def get_inference_pool() -> InferencePool: model_sources = get_model_options().get('sources') - model_context = __name__ + '.' + state_manager.get_item('age_modifier_model') - return inference_manager.get_inference_pool(model_context, model_sources) + return inference_manager.get_inference_pool(__name__, model_sources) def clear_inference_pool() -> None: - model_context = __name__ + '.' + state_manager.get_item('age_modifier_model') - inference_manager.clear_inference_pool(model_context) + inference_manager.clear_inference_pool(__name__) def get_model_options() -> ModelOptions: age_modifier_model = state_manager.get_item('age_modifier_model') - return MODEL_SET.get(age_modifier_model) + return create_static_model_set('full').get(age_modifier_model) def register_args(program : ArgumentParser) -> None: @@ -81,11 +91,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes = get_model_options().get('hashes') model_sources = get_model_options().get('sources') - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) def pre_process(mode : ProcessMode) -> bool: @@ -115,15 +124,14 @@ def post_process() -> None: def modify_age(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: - model_template = get_model_options().get('template') - model_size = get_model_options().get('size') - crop_size = (model_size[0] // 2, model_size[1] // 2) + model_templates = get_model_options().get('templates') + model_sizes = get_model_options().get('sizes') face_landmark_5 = target_face.landmark_set.get('5/68').copy() - extend_face_landmark_5 = scale_face_landmark_5(face_landmark_5, 2.0) - crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, face_landmark_5, model_template, crop_size) - extend_vision_frame, extend_affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, extend_face_landmark_5, model_template, model_size) + crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, face_landmark_5, model_templates.get('target'), model_sizes.get('target')) + extend_face_landmark_5 = scale_face_landmark_5(face_landmark_5, 0.875) + extend_vision_frame, extend_affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, extend_face_landmark_5, model_templates.get('target_with_background'), model_sizes.get('target_with_background')) extend_vision_frame_raw = extend_vision_frame.copy() - box_mask = create_static_box_mask(model_size, state_manager.get_item('face_mask_blur'), (0, 0, 0, 0)) + box_mask = create_static_box_mask(model_sizes.get('target_with_background'), state_manager.get_item('face_mask_blur'), (0, 0, 0, 0)) crop_masks =\ [ box_mask @@ -132,31 +140,36 @@ def modify_age(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFra if 'occlusion' in state_manager.get_item('face_mask_types'): occlusion_mask = create_occlusion_mask(crop_vision_frame) combined_matrix = merge_matrix([ extend_affine_matrix, cv2.invertAffineTransform(affine_matrix) ]) - occlusion_mask = cv2.warpAffine(occlusion_mask, combined_matrix, model_size) + occlusion_mask = cv2.warpAffine(occlusion_mask, combined_matrix, model_sizes.get('target_with_background')) crop_masks.append(occlusion_mask) crop_vision_frame = prepare_vision_frame(crop_vision_frame) extend_vision_frame = prepare_vision_frame(extend_vision_frame) - extend_vision_frame = forward(crop_vision_frame, extend_vision_frame) + age_modifier_direction = numpy.array(numpy.interp(state_manager.get_item('age_modifier_direction'), [-100, 100], [2.5, -2.5])).astype(numpy.float32) + extend_vision_frame = forward(crop_vision_frame, extend_vision_frame, age_modifier_direction) extend_vision_frame = normalize_extend_frame(extend_vision_frame) - extend_vision_frame = fix_color(extend_vision_frame_raw, extend_vision_frame) - extend_crop_mask = cv2.pyrUp(numpy.minimum.reduce(crop_masks).clip(0, 1)) - extend_affine_matrix *= extend_vision_frame.shape[0] / 512 - paste_vision_frame = paste_back(temp_vision_frame, extend_vision_frame, extend_crop_mask, extend_affine_matrix) + extend_vision_frame = match_frame_color(extend_vision_frame_raw, extend_vision_frame) + extend_affine_matrix *= (model_sizes.get('target')[0] * 4) / model_sizes.get('target_with_background')[0] + crop_mask = numpy.minimum.reduce(crop_masks).clip(0, 1) + crop_mask = cv2.resize(crop_mask, (model_sizes.get('target')[0] * 4, model_sizes.get('target')[1] * 4)) + paste_vision_frame = paste_back(temp_vision_frame, extend_vision_frame, crop_mask, extend_affine_matrix) return paste_vision_frame -def forward(crop_vision_frame : VisionFrame, extend_vision_frame : VisionFrame) -> VisionFrame: +def forward(crop_vision_frame : VisionFrame, extend_vision_frame : VisionFrame, age_modifier_direction : AgeModifierDirection) -> VisionFrame: age_modifier = get_inference_pool().get('age_modifier') age_modifier_inputs = {} + if has_execution_provider('coreml'): + age_modifier.set_providers([ facefusion.choices.execution_provider_set.get('cpu') ]) + for age_modifier_input in age_modifier.get_inputs(): if age_modifier_input.name == 'target': age_modifier_inputs[age_modifier_input.name] = crop_vision_frame if age_modifier_input.name == 'target_with_background': age_modifier_inputs[age_modifier_input.name] = extend_vision_frame if age_modifier_input.name == 'direction': - age_modifier_inputs[age_modifier_input.name] = prepare_direction(state_manager.get_item('age_modifier_direction')) + age_modifier_inputs[age_modifier_input.name] = age_modifier_direction with thread_semaphore(): crop_vision_frame = age_modifier.run(None, age_modifier_inputs)[0][0] @@ -164,38 +177,6 @@ def forward(crop_vision_frame : VisionFrame, extend_vision_frame : VisionFrame) return crop_vision_frame -def fix_color(extend_vision_frame_raw : VisionFrame, extend_vision_frame : VisionFrame) -> VisionFrame: - color_difference = compute_color_difference(extend_vision_frame_raw, extend_vision_frame, (48, 48)) - color_difference_mask = create_static_box_mask(extend_vision_frame.shape[:2][::-1], 1.0, (0, 0, 0, 0)) - color_difference_mask = numpy.stack((color_difference_mask, ) * 3, axis = -1) - extend_vision_frame = normalize_color_difference(color_difference, color_difference_mask, extend_vision_frame) - return extend_vision_frame - - -def compute_color_difference(extend_vision_frame_raw : VisionFrame, extend_vision_frame : VisionFrame, size : Size) -> VisionFrame: - extend_vision_frame_raw = extend_vision_frame_raw.astype(numpy.float32) / 255 - extend_vision_frame_raw = cv2.resize(extend_vision_frame_raw, size, interpolation = cv2.INTER_AREA) - extend_vision_frame = extend_vision_frame.astype(numpy.float32) / 255 - extend_vision_frame = cv2.resize(extend_vision_frame, size, interpolation = cv2.INTER_AREA) - color_difference = extend_vision_frame_raw - extend_vision_frame - return color_difference - - -def normalize_color_difference(color_difference : VisionFrame, color_difference_mask : Mask, extend_vision_frame : VisionFrame) -> VisionFrame: - color_difference = cv2.resize(color_difference, extend_vision_frame.shape[:2][::-1], interpolation = cv2.INTER_CUBIC) - color_difference_mask = 1 - color_difference_mask.clip(0, 0.75) - extend_vision_frame = extend_vision_frame.astype(numpy.float32) / 255 - extend_vision_frame += color_difference * color_difference_mask - extend_vision_frame = extend_vision_frame.clip(0, 1) - extend_vision_frame = numpy.multiply(extend_vision_frame, 255).astype(numpy.uint8) - return extend_vision_frame - - -def prepare_direction(direction : int) -> NDArray[Any]: - direction = numpy.interp(float(direction), [ -100, 100 ], [ 2.5, -2.5 ]) #type:ignore[assignment] - return numpy.array(direction).astype(numpy.float32) - - def prepare_vision_frame(vision_frame : VisionFrame) -> VisionFrame: vision_frame = vision_frame[:, :, ::-1] / 255.0 vision_frame = (vision_frame - 0.5) / 0.5 @@ -204,12 +185,13 @@ def prepare_vision_frame(vision_frame : VisionFrame) -> VisionFrame: def normalize_extend_frame(extend_vision_frame : VisionFrame) -> VisionFrame: + model_sizes = get_model_options().get('sizes') extend_vision_frame = numpy.clip(extend_vision_frame, -1, 1) extend_vision_frame = (extend_vision_frame + 1) / 2 extend_vision_frame = extend_vision_frame.transpose(1, 2, 0).clip(0, 255) extend_vision_frame = (extend_vision_frame * 255.0) extend_vision_frame = extend_vision_frame.astype(numpy.uint8)[:, :, ::-1] - extend_vision_frame = cv2.pyrDown(extend_vision_frame) + extend_vision_frame = cv2.resize(extend_vision_frame, (model_sizes.get('target')[0] * 4, model_sizes.get('target')[1] * 4), interpolation = cv2.INTER_AREA) return extend_vision_frame diff --git a/facefusion/processors/modules/deep_swapper.py b/facefusion/processors/modules/deep_swapper.py new file mode 100755 index 00000000..71706cbd --- /dev/null +++ b/facefusion/processors/modules/deep_swapper.py @@ -0,0 +1,444 @@ +from argparse import ArgumentParser +from functools import lru_cache +from typing import List, Tuple + +import cv2 +import numpy +from cv2.typing import Size + +import facefusion.jobs.job_manager +import facefusion.jobs.job_store +import facefusion.processors.core as processors +from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, wording +from facefusion.common_helper import create_int_metavar +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url_by_provider +from facefusion.face_analyser import get_many_faces, get_one_face +from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5 +from facefusion.face_masker import create_occlusion_mask, create_region_mask, create_static_box_mask +from facefusion.face_selector import find_similar_faces, sort_and_filter_faces +from facefusion.face_store import get_reference_faces +from facefusion.filesystem import in_directory, is_image, is_video, list_directory, resolve_relative_path, same_file_extension +from facefusion.processors import choices as processors_choices +from facefusion.processors.typing import DeepSwapperInputs, DeepSwapperMorph +from facefusion.program_helper import find_argument_group +from facefusion.thread_helper import thread_semaphore +from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, InferencePool, Mask, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame +from facefusion.vision import conditional_match_frame_color, read_image, read_static_image, write_image + + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + model_config = [] + + if download_scope == 'full': + model_config.extend( + [ + ('druuzil', 'adrianne_palicki_384'), + ('druuzil', 'agnetha_falskog_224'), + ('druuzil', 'alan_ritchson_320'), + ('druuzil', 'alicia_vikander_320'), + ('druuzil', 'amber_midthunder_320'), + ('druuzil', 'andras_arato_384'), + ('druuzil', 'andrew_tate_320'), + ('druuzil', 'anne_hathaway_320'), + ('druuzil', 'anya_chalotra_320'), + ('druuzil', 'arnold_schwarzenegger_320'), + ('druuzil', 'benjamin_affleck_320'), + ('druuzil', 'benjamin_stiller_384'), + ('druuzil', 'bradley_pitt_224'), + ('druuzil', 'brie_larson_384'), + ('druuzil', 'bryan_cranston_320'), + ('druuzil', 'catherine_blanchett_352'), + ('druuzil', 'christian_bale_320'), + ('druuzil', 'christopher_hemsworth_320'), + ('druuzil', 'christoph_waltz_384'), + ('druuzil', 'cillian_murphy_320'), + ('druuzil', 'cobie_smulders_256'), + ('druuzil', 'dwayne_johnson_384'), + ('druuzil', 'edward_norton_320'), + ('druuzil', 'elisabeth_shue_320'), + ('druuzil', 'elizabeth_olsen_384'), + ('druuzil', 'elon_musk_320'), + ('druuzil', 'emily_blunt_320'), + ('druuzil', 'emma_stone_384'), + ('druuzil', 'emma_watson_320'), + ('druuzil', 'erin_moriarty_384'), + ('druuzil', 'eva_green_320'), + ('druuzil', 'ewan_mcgregor_320'), + ('druuzil', 'florence_pugh_320'), + ('druuzil', 'freya_allan_320'), + ('druuzil', 'gary_cole_224'), + ('druuzil', 'gigi_hadid_224'), + ('druuzil', 'harrison_ford_384'), + ('druuzil', 'hayden_christensen_320'), + ('druuzil', 'heath_ledger_320'), + ('druuzil', 'henry_cavill_448'), + ('druuzil', 'hugh_jackman_384'), + ('druuzil', 'idris_elba_320'), + ('druuzil', 'jack_nicholson_320'), + ('druuzil', 'james_mcavoy_320'), + ('druuzil', 'james_varney_320'), + ('druuzil', 'jason_momoa_320'), + ('druuzil', 'jason_statham_320'), + ('druuzil', 'jennifer_connelly_384'), + ('druuzil', 'jimmy_donaldson_320'), + ('druuzil', 'jordan_peterson_384'), + ('druuzil', 'karl_urban_224'), + ('druuzil', 'kate_beckinsale_384'), + ('druuzil', 'laurence_fishburne_384'), + ('druuzil', 'lili_reinhart_320'), + ('druuzil', 'mads_mikkelsen_384'), + ('druuzil', 'mary_winstead_320'), + ('druuzil', 'margaret_qualley_384'), + ('druuzil', 'melina_juergens_320'), + ('druuzil', 'michael_fassbender_320'), + ('druuzil', 'michael_fox_320'), + ('druuzil', 'millie_bobby_brown_320'), + ('druuzil', 'morgan_freeman_320'), + ('druuzil', 'patrick_stewart_320'), + ('druuzil', 'rebecca_ferguson_320'), + ('druuzil', 'scarlett_johansson_320'), + ('druuzil', 'seth_macfarlane_384'), + ('druuzil', 'thomas_cruise_320'), + ('druuzil', 'thomas_hanks_384'), + ('edel', 'emma_roberts_224'), + ('edel', 'ivanka_trump_224'), + ('edel', 'lize_dzjabrailova_224'), + ('edel', 'sidney_sweeney_224'), + ('edel', 'winona_ryder_224') + ]) + if download_scope in [ 'lite', 'full' ]: + model_config.extend( + [ + ('iperov', 'alexandra_daddario_224'), + ('iperov', 'alexei_navalny_224'), + ('iperov', 'amber_heard_224'), + ('iperov', 'dilraba_dilmurat_224'), + ('iperov', 'elon_musk_224'), + ('iperov', 'emilia_clarke_224'), + ('iperov', 'emma_watson_224'), + ('iperov', 'erin_moriarty_224'), + ('iperov', 'jackie_chan_224'), + ('iperov', 'james_carrey_224'), + ('iperov', 'jason_statham_320'), + ('iperov', 'keanu_reeves_320'), + ('iperov', 'margot_robbie_224'), + ('iperov', 'natalie_dormer_224'), + ('iperov', 'nicolas_coppola_224'), + ('iperov', 'robert_downey_224'), + ('iperov', 'rowan_atkinson_224'), + ('iperov', 'ryan_reynolds_224'), + ('iperov', 'scarlett_johansson_224'), + ('iperov', 'sylvester_stallone_224'), + ('iperov', 'thomas_cruise_224'), + ('iperov', 'thomas_holland_224'), + ('iperov', 'vin_diesel_224'), + ('iperov', 'vladimir_putin_224') + ]) + if download_scope == 'full': + model_config.extend( + [ + ('jen', 'angelica_trae_288'), + ('jen', 'ella_freya_224'), + ('jen', 'emma_myers_320'), + ('jen', 'evie_pickerill_224'), + ('jen', 'kang_hyewon_320'), + ('jen', 'maddie_mead_224'), + ('jen', 'nicole_turnbull_288'), + ('mats', 'alica_schmidt_320'), + ('mats', 'ashley_alexiss_224'), + ('mats', 'billie_eilish_224'), + ('mats', 'brie_larson_224'), + ('mats', 'cara_delevingne_224'), + ('mats', 'carolin_kebekus_224'), + ('mats', 'chelsea_clinton_224'), + ('mats', 'claire_boucher_224'), + ('mats', 'corinna_kopf_224'), + ('mats', 'florence_pugh_224'), + ('mats', 'hillary_clinton_224'), + ('mats', 'jenna_fischer_224'), + ('mats', 'kim_jisoo_320'), + ('mats', 'mica_suarez_320'), + ('mats', 'shailene_woodley_224'), + ('mats', 'shraddha_kapoor_320'), + ('mats', 'yu_jimin_352'), + ('rumateus', 'alison_brie_224'), + ('rumateus', 'amber_heard_224'), + ('rumateus', 'angelina_jolie_224'), + ('rumateus', 'aubrey_plaza_224'), + ('rumateus', 'bridget_regan_224'), + ('rumateus', 'cobie_smulders_224'), + ('rumateus', 'deborah_woll_224'), + ('rumateus', 'dua_lipa_224'), + ('rumateus', 'emma_stone_224'), + ('rumateus', 'hailee_steinfeld_224'), + ('rumateus', 'hilary_duff_224'), + ('rumateus', 'jessica_alba_224'), + ('rumateus', 'jessica_biel_224'), + ('rumateus', 'john_cena_224'), + ('rumateus', 'kim_kardashian_224'), + ('rumateus', 'kristen_bell_224'), + ('rumateus', 'lucy_liu_224'), + ('rumateus', 'margot_robbie_224'), + ('rumateus', 'megan_fox_224'), + ('rumateus', 'meghan_markle_224'), + ('rumateus', 'millie_bobby_brown_224'), + ('rumateus', 'natalie_portman_224'), + ('rumateus', 'nicki_minaj_224'), + ('rumateus', 'olivia_wilde_224'), + ('rumateus', 'shay_mitchell_224'), + ('rumateus', 'sophie_turner_224'), + ('rumateus', 'taylor_swift_224') + ]) + model_set : ModelSet = {} + + for model_scope, model_name in model_config: + model_id = '/'.join([ model_scope, model_name ]) + + model_set[model_id] =\ + { + 'hashes': + { + 'deep_swapper': + { + 'url': resolve_download_url_by_provider('huggingface', 'deepfacelive-models-' + model_scope, model_name + '.hash'), + 'path': resolve_relative_path('../.assets/models/' + model_scope + '/' + model_name + '.hash') + } + }, + 'sources': + { + 'deep_swapper': + { + 'url': resolve_download_url_by_provider('huggingface', 'deepfacelive-models-' + model_scope, model_name + '.dfm'), + 'path': resolve_relative_path('../.assets/models/' + model_scope + '/' + model_name + '.dfm') + } + }, + 'template': 'dfl_whole_face' + } + + custom_model_files = list_directory(resolve_relative_path('../.assets/models/custom')) + + if custom_model_files: + + for model_file in custom_model_files: + model_id = '/'.join([ 'custom', model_file.get('name') ]) + + model_set[model_id] =\ + { + 'sources': + { + 'deep_swapper': + { + 'path': resolve_relative_path(model_file.get('path')) + } + }, + 'template': 'dfl_whole_face' + } + + return model_set + + +def get_inference_pool() -> InferencePool: + model_sources = get_model_options().get('sources') + return inference_manager.get_inference_pool(__name__, model_sources) + + +def clear_inference_pool() -> None: + inference_manager.clear_inference_pool(__name__) + + +def get_model_options() -> ModelOptions: + deep_swapper_model = state_manager.get_item('deep_swapper_model') + return create_static_model_set('full').get(deep_swapper_model) + + +def get_model_size() -> Size: + deep_swapper = get_inference_pool().get('deep_swapper') + deep_swapper_outputs = deep_swapper.get_outputs() + + for deep_swapper_output in deep_swapper_outputs: + return deep_swapper_output.shape[1:3] + return 0, 0 + + +def register_args(program : ArgumentParser) -> None: + group_processors = find_argument_group(program, 'processors') + if group_processors: + group_processors.add_argument('--deep-swapper-model', help = wording.get('help.deep_swapper_model'), default = config.get_str_value('processors.deep_swapper_model', 'iperov/elon_musk_224'), choices = processors_choices.deep_swapper_models) + group_processors.add_argument('--deep-swapper-morph', help = wording.get('help.deep_swapper_morph'), type = int, default = config.get_int_value('processors.deep_swapper_morph', '80'), choices = processors_choices.deep_swapper_morph_range, metavar = create_int_metavar(processors_choices.deep_swapper_morph_range)) + facefusion.jobs.job_store.register_step_keys([ 'deep_swapper_model', 'deep_swapper_morph' ]) + + +def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: + apply_state_item('deep_swapper_model', args.get('deep_swapper_model')) + apply_state_item('deep_swapper_morph', args.get('deep_swapper_morph')) + + +def pre_check() -> bool: + model_hashes = get_model_options().get('hashes') + model_sources = get_model_options().get('sources') + + if model_hashes and model_sources: + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) + return True + + +def pre_process(mode : ProcessMode) -> bool: + if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')): + logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__) + return False + if mode == 'output' and not in_directory(state_manager.get_item('output_path')): + logger.error(wording.get('specify_image_or_video_output') + wording.get('exclamation_mark'), __name__) + return False + if mode == 'output' and not same_file_extension([ state_manager.get_item('target_path'), state_manager.get_item('output_path') ]): + logger.error(wording.get('match_target_and_output_extension') + wording.get('exclamation_mark'), __name__) + return False + return True + + +def post_process() -> None: + read_static_image.cache_clear() + if state_manager.get_item('video_memory_strategy') in [ 'strict', 'moderate' ]: + clear_inference_pool() + if state_manager.get_item('video_memory_strategy') == 'strict': + content_analyser.clear_inference_pool() + face_classifier.clear_inference_pool() + face_detector.clear_inference_pool() + face_landmarker.clear_inference_pool() + face_masker.clear_inference_pool() + face_recognizer.clear_inference_pool() + + +def swap_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: + model_template = get_model_options().get('template') + model_size = get_model_size() + crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmark_set.get('5/68'), model_template, model_size) + crop_vision_frame_raw = crop_vision_frame.copy() + box_mask = create_static_box_mask(crop_vision_frame.shape[:2][::-1], state_manager.get_item('face_mask_blur'), state_manager.get_item('face_mask_padding')) + crop_masks =\ + [ + box_mask + ] + + if 'occlusion' in state_manager.get_item('face_mask_types'): + occlusion_mask = create_occlusion_mask(crop_vision_frame) + crop_masks.append(occlusion_mask) + + crop_vision_frame = prepare_crop_frame(crop_vision_frame) + deep_swapper_morph = numpy.array([ numpy.interp(state_manager.get_item('deep_swapper_morph'), [ 0, 100 ], [ 0, 1 ]) ]).astype(numpy.float32) + crop_vision_frame, crop_source_mask, crop_target_mask = forward(crop_vision_frame, deep_swapper_morph) + crop_vision_frame = normalize_crop_frame(crop_vision_frame) + crop_vision_frame = conditional_match_frame_color(crop_vision_frame_raw, crop_vision_frame) + crop_masks.append(prepare_crop_mask(crop_source_mask, crop_target_mask)) + + if 'region' in state_manager.get_item('face_mask_types'): + region_mask = create_region_mask(crop_vision_frame, state_manager.get_item('face_mask_regions')) + crop_masks.append(region_mask) + + crop_mask = numpy.minimum.reduce(crop_masks).clip(0, 1) + paste_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, crop_mask, affine_matrix) + return paste_vision_frame + + +def forward(crop_vision_frame : VisionFrame, deep_swapper_morph : DeepSwapperMorph) -> Tuple[VisionFrame, Mask, Mask]: + deep_swapper = get_inference_pool().get('deep_swapper') + deep_swapper_inputs = {} + + for deep_swapper_input in deep_swapper.get_inputs(): + if deep_swapper_input.name == 'in_face:0': + deep_swapper_inputs[deep_swapper_input.name] = crop_vision_frame + if deep_swapper_input.name == 'morph_value:0': + deep_swapper_inputs[deep_swapper_input.name] = deep_swapper_morph + + with thread_semaphore(): + crop_target_mask, crop_vision_frame, crop_source_mask = deep_swapper.run(None, deep_swapper_inputs) + + return crop_vision_frame[0], crop_source_mask[0], crop_target_mask[0] + + +def has_morph_input() -> bool: + deep_swapper = get_inference_pool().get('deep_swapper') + + for deep_swapper_input in deep_swapper.get_inputs(): + if deep_swapper_input.name == 'morph_value:0': + return True + return False + + +def prepare_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame: + crop_vision_frame = cv2.addWeighted(crop_vision_frame, 1.75, cv2.GaussianBlur(crop_vision_frame, (0, 0), 2), -0.75, 0) + crop_vision_frame = crop_vision_frame / 255.0 + crop_vision_frame = numpy.expand_dims(crop_vision_frame, axis = 0).astype(numpy.float32) + return crop_vision_frame + + +def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame: + crop_vision_frame = (crop_vision_frame * 255.0).clip(0, 255) + crop_vision_frame = crop_vision_frame.astype(numpy.uint8) + return crop_vision_frame + + +def prepare_crop_mask(crop_source_mask : Mask, crop_target_mask : Mask) -> Mask: + model_size = get_model_size() + blur_size = 6.25 + kernel_size = 3 + crop_mask = numpy.minimum.reduce([ crop_source_mask, crop_target_mask ]) + crop_mask = crop_mask.reshape(model_size).clip(0, 1) + crop_mask = cv2.erode(crop_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size)), iterations = 2) + crop_mask = cv2.GaussianBlur(crop_mask, (0, 0), blur_size) + return crop_mask + + +def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: + return swap_face(target_face, temp_vision_frame) + + +def process_frame(inputs : DeepSwapperInputs) -> VisionFrame: + reference_faces = inputs.get('reference_faces') + target_vision_frame = inputs.get('target_vision_frame') + many_faces = sort_and_filter_faces(get_many_faces([ target_vision_frame ])) + + if state_manager.get_item('face_selector_mode') == 'many': + if many_faces: + for target_face in many_faces: + target_vision_frame = swap_face(target_face, target_vision_frame) + if state_manager.get_item('face_selector_mode') == 'one': + target_face = get_one_face(many_faces) + if target_face: + target_vision_frame = swap_face(target_face, target_vision_frame) + if state_manager.get_item('face_selector_mode') == 'reference': + similar_faces = find_similar_faces(many_faces, reference_faces, state_manager.get_item('reference_face_distance')) + if similar_faces: + for similar_face in similar_faces: + target_vision_frame = swap_face(similar_face, target_vision_frame) + return target_vision_frame + + +def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: + reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None + + for queue_payload in process_manager.manage(queue_payloads): + target_vision_path = queue_payload['frame_path'] + target_vision_frame = read_image(target_vision_path) + output_vision_frame = process_frame( + { + 'reference_faces': reference_faces, + 'target_vision_frame': target_vision_frame + }) + write_image(target_vision_path, output_vision_frame) + update_progress(1) + + +def process_image(source_path : str, target_path : str, output_path : str) -> None: + reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None + target_vision_frame = read_static_image(target_path) + output_vision_frame = process_frame( + { + 'reference_faces': reference_faces, + 'target_vision_frame': target_vision_frame + }) + write_image(output_path, output_vision_frame) + + +def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: + processors.multi_process_frames(None, temp_frame_paths, process_frames) diff --git a/facefusion/processors/modules/expression_restorer.py b/facefusion/processors/modules/expression_restorer.py index 1068bccc..2dd0547e 100755 --- a/facefusion/processors/modules/expression_restorer.py +++ b/facefusion/processors/modules/expression_restorer.py @@ -1,4 +1,5 @@ from argparse import ArgumentParser +from functools import lru_cache from typing import List, Tuple import cv2 @@ -9,7 +10,7 @@ import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, wording from facefusion.common_helper import create_int_metavar -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.face_analyser import get_many_faces, get_one_face from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5 from facefusion.face_masker import create_occlusion_mask, create_static_box_mask @@ -22,59 +23,61 @@ from facefusion.processors.typing import ExpressionRestorerInputs from facefusion.processors.typing import LivePortraitExpression, LivePortraitFeatureVolume, LivePortraitMotionPoints, LivePortraitPitch, LivePortraitRoll, LivePortraitScale, LivePortraitTranslation, LivePortraitYaw from facefusion.program_helper import find_argument_group from facefusion.thread_helper import conditional_thread_semaphore, thread_semaphore -from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame +from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import get_video_frame, read_image, read_static_image, write_image -MODEL_SET : ModelSet =\ -{ - 'live_portrait': + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ { - 'hashes': + 'live_portrait': { - 'feature_extractor': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_feature_extractor.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.hash') + 'feature_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.hash') + }, + 'motion_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.hash') + }, + 'generator': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_generator.hash') + } }, - 'motion_extractor': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_motion_extractor.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.hash') + 'feature_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.onnx') + }, + 'motion_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.onnx') + }, + 'generator': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx') + } }, - 'generator': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_generator.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_generator.hash') - } - }, - 'sources': - { - 'feature_extractor': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_feature_extractor.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.onnx') - }, - 'motion_extractor': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_motion_extractor.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.onnx') - }, - 'generator': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_generator.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx') - } - }, - 'template': 'arcface_128_v2', - 'size': (512, 512) + 'template': 'arcface_128_v2', + 'size': (512, 512) + } } -} def get_inference_pool() -> InferencePool: model_sources = get_model_options().get('sources') - model_context = __name__ + '.' + state_manager.get_item('expression_restorer_model') - return inference_manager.get_inference_pool(model_context, model_sources) + return inference_manager.get_inference_pool(__name__, model_sources) def clear_inference_pool() -> None: @@ -83,7 +86,7 @@ def clear_inference_pool() -> None: def get_model_options() -> ModelOptions: expression_restorer_model = state_manager.get_item('expression_restorer_model') - return MODEL_SET.get(expression_restorer_model) + return create_static_model_set('full').get(expression_restorer_model) def register_args(program : ArgumentParser) -> None: @@ -100,14 +103,16 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes = get_model_options().get('hashes') model_sources = get_model_options().get('sources') - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) def pre_process(mode : ProcessMode) -> bool: + if mode == 'stream': + logger.error(wording.get('stream_not_supported') + wording.get('exclamation_mark'), __name__) + return False if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')): logger.error(wording.get('choose_image_or_video_target') + wording.get('exclamation_mark'), __name__) return False @@ -222,7 +227,7 @@ def prepare_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame: def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame: crop_vision_frame = crop_vision_frame.transpose(1, 2, 0).clip(0, 1) - crop_vision_frame = (crop_vision_frame * 255.0) + crop_vision_frame = crop_vision_frame * 255.0 crop_vision_frame = crop_vision_frame.astype(numpy.uint8)[:, :, ::-1] return crop_vision_frame diff --git a/facefusion/processors/modules/face_debugger.py b/facefusion/processors/modules/face_debugger.py index 2dd11e46..d1f1f3f8 100755 --- a/facefusion/processors/modules/face_debugger.py +++ b/facefusion/processors/modules/face_debugger.py @@ -17,11 +17,11 @@ from facefusion.filesystem import in_directory, same_file_extension from facefusion.processors import choices as processors_choices from facefusion.processors.typing import FaceDebuggerInputs from facefusion.program_helper import find_argument_group -from facefusion.typing import ApplyStateItem, Args, Face, ProcessMode, QueuePayload, UpdateProgress, VisionFrame +from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import read_image, read_static_image, write_image -def get_inference_pool() -> None: +def get_inference_pool() -> InferencePool: pass diff --git a/facefusion/processors/modules/face_editor.py b/facefusion/processors/modules/face_editor.py index 22682bec..e55b9420 100755 --- a/facefusion/processors/modules/face_editor.py +++ b/facefusion/processors/modules/face_editor.py @@ -1,4 +1,5 @@ from argparse import ArgumentParser +from functools import lru_cache from typing import List, Tuple import cv2 @@ -9,7 +10,7 @@ import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, wording from facefusion.common_helper import create_float_metavar -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.face_analyser import get_many_faces, get_one_face from facefusion.face_helper import paste_back, scale_face_landmark_5, warp_face_by_face_landmark_5 from facefusion.face_masker import create_static_box_mask @@ -21,99 +22,100 @@ from facefusion.processors.live_portrait import create_rotation, limit_euler_ang from facefusion.processors.typing import FaceEditorInputs, LivePortraitExpression, LivePortraitFeatureVolume, LivePortraitMotionPoints, LivePortraitPitch, LivePortraitRoll, LivePortraitRotation, LivePortraitScale, LivePortraitTranslation, LivePortraitYaw from facefusion.program_helper import find_argument_group from facefusion.thread_helper import conditional_thread_semaphore, thread_semaphore -from facefusion.typing import ApplyStateItem, Args, Face, FaceLandmark68, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame +from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, FaceLandmark68, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import read_image, read_static_image, write_image -MODEL_SET : ModelSet =\ -{ - 'live_portrait': + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ { - 'hashes': + 'live_portrait': { - 'feature_extractor': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_feature_extractor.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.hash') + 'feature_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.hash') + }, + 'motion_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.hash') + }, + 'eye_retargeter': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_eye_retargeter.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.hash') + }, + 'lip_retargeter': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_lip_retargeter.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.hash') + }, + 'stitcher': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_stitcher.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.hash') + }, + 'generator': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_generator.hash') + } }, - 'motion_extractor': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_motion_extractor.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.hash') + 'feature_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.onnx') + }, + 'motion_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.onnx') + }, + 'eye_retargeter': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_eye_retargeter.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.onnx') + }, + 'lip_retargeter': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_lip_retargeter.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.onnx') + }, + 'stitcher': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_stitcher.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.onnx') + }, + 'generator': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx') + } }, - 'eye_retargeter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_eye_retargeter.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.hash') - }, - 'lip_retargeter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_lip_retargeter.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.hash') - }, - 'stitcher': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_stitcher.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.hash') - }, - 'generator': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_generator.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_generator.hash') - } - }, - 'sources': - { - 'feature_extractor': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_feature_extractor.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.onnx') - }, - 'motion_extractor': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_motion_extractor.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.onnx') - }, - 'eye_retargeter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_eye_retargeter.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.onnx') - }, - 'lip_retargeter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_lip_retargeter.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.onnx') - }, - 'stitcher': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_stitcher.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.onnx') - }, - 'generator': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_generator.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx') - } - }, - 'template': 'ffhq_512', - 'size': (512, 512) + 'template': 'ffhq_512', + 'size': (512, 512) + } } -} def get_inference_pool() -> InferencePool: model_sources = get_model_options().get('sources') - model_context = __name__ + '.' + state_manager.get_item('face_editor_model') - return inference_manager.get_inference_pool(model_context, model_sources) + return inference_manager.get_inference_pool(__name__, model_sources) def clear_inference_pool() -> None: - model_context = __name__ + '.' + state_manager.get_item('face_editor_model') - inference_manager.clear_inference_pool(model_context) + inference_manager.clear_inference_pool(__name__) def get_model_options() -> ModelOptions: face_editor_model = state_manager.get_item('face_editor_model') - return MODEL_SET.get(face_editor_model) + return create_static_model_set('full').get(face_editor_model) def register_args(program : ArgumentParser) -> None: @@ -156,11 +158,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes = get_model_options().get('hashes') model_sources = get_model_options().get('sources') - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) def pre_process(mode : ProcessMode) -> bool: diff --git a/facefusion/processors/modules/face_enhancer.py b/facefusion/processors/modules/face_enhancer.py index da8d6498..ce8de8a3 100755 --- a/facefusion/processors/modules/face_enhancer.py +++ b/facefusion/processors/modules/face_enhancer.py @@ -1,4 +1,5 @@ from argparse import ArgumentParser +from functools import lru_cache from typing import List import cv2 @@ -8,8 +9,8 @@ import facefusion.jobs.job_manager import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, wording -from facefusion.common_helper import create_int_metavar -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.common_helper import create_float_metavar, create_int_metavar +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.face_analyser import get_many_faces, get_one_face from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5 from facefusion.face_masker import create_occlusion_mask, create_static_box_mask @@ -17,220 +18,221 @@ from facefusion.face_selector import find_similar_faces, sort_and_filter_faces from facefusion.face_store import get_reference_faces from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension from facefusion.processors import choices as processors_choices -from facefusion.processors.typing import FaceEnhancerInputs +from facefusion.processors.typing import FaceEnhancerInputs, FaceEnhancerWeight from facefusion.program_helper import find_argument_group from facefusion.thread_helper import thread_semaphore -from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame +from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import read_image, read_static_image, write_image -MODEL_SET : ModelSet =\ -{ - 'codeformer': + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ { - 'hashes': + 'codeformer': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/codeformer.hash', - 'path': resolve_relative_path('../.assets/models/codeformer.hash') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'codeformer.hash'), + 'path': resolve_relative_path('../.assets/models/codeformer.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'codeformer.onnx'), + 'path': resolve_relative_path('../.assets/models/codeformer.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (512, 512) }, - 'sources': + 'gfpgan_1.2': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/codeformer.onnx', - 'path': resolve_relative_path('../.assets/models/codeformer.onnx') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gfpgan_1.2.hash'), + 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gfpgan_1.2.onnx'), + 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (512, 512) }, - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'gfpgan_1.2': - { - 'hashes': + 'gfpgan_1.3': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gfpgan_1.2.hash', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.hash') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gfpgan_1.3.hash'), + 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gfpgan_1.3.onnx'), + 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (512, 512) }, - 'sources': + 'gfpgan_1.4': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gfpgan_1.2.onnx', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gfpgan_1.4.hash'), + 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gfpgan_1.4.onnx'), + 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (512, 512) }, - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'gfpgan_1.3': - { - 'hashes': + 'gpen_bfr_256': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gfpgan_1.3.hash', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.hash') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_256.hash'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_256.onnx'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx') + } + }, + 'template': 'arcface_128_v2', + 'size': (256, 256) }, - 'sources': + 'gpen_bfr_512': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gfpgan_1.3.onnx', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_512.hash'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_512.onnx'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (512, 512) }, - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'gfpgan_1.4': - { - 'hashes': + 'gpen_bfr_1024': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gfpgan_1.4.hash', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.hash') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_1024.hash'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_1024.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_1024.onnx'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_1024.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (1024, 1024) }, - 'sources': + 'gpen_bfr_2048': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gfpgan_1.4.onnx', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_2048.hash'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_2048.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_2048.onnx'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_2048.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (2048, 2048) }, - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'gpen_bfr_256': - { - 'hashes': + 'restoreformer_plus_plus': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_256.hash', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.hash') - } - }, - 'sources': - { - 'face_enhancer': + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'restoreformer_plus_plus.hash'), + 'path': resolve_relative_path('../.assets/models/restoreformer_plus_plus.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_256.onnx', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx') - } - }, - 'template': 'arcface_128_v2', - 'size': (256, 256) - }, - 'gpen_bfr_512': - { - 'hashes': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_512.hash', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.hash') - } - }, - 'sources': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_512.onnx', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx') - } - }, - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'gpen_bfr_1024': - { - 'hashes': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_1024.hash', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_1024.hash') - } - }, - 'sources': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_1024.onnx', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_1024.onnx') - } - }, - 'template': 'ffhq_512', - 'size': (1024, 1024) - }, - 'gpen_bfr_2048': - { - 'hashes': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_2048.hash', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_2048.hash') - } - }, - 'sources': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_2048.onnx', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_2048.onnx') - } - }, - 'template': 'ffhq_512', - 'size': (2048, 2048) - }, - 'restoreformer_plus_plus': - { - 'hashes': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/restoreformer_plus_plus.hash', - 'path': resolve_relative_path('../.assets/models/restoreformer_plus_plus.hash') - } - }, - 'sources': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/restoreformer_plus_plus.onnx', - 'path': resolve_relative_path('../.assets/models/restoreformer_plus_plus.onnx') - } - }, - 'template': 'ffhq_512', - 'size': (512, 512) + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'restoreformer_plus_plus.onnx'), + 'path': resolve_relative_path('../.assets/models/restoreformer_plus_plus.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (512, 512) + } } -} def get_inference_pool() -> InferencePool: model_sources = get_model_options().get('sources') - model_context = __name__ + '.' + state_manager.get_item('face_enhancer_model') - return inference_manager.get_inference_pool(model_context, model_sources) + return inference_manager.get_inference_pool(__name__, model_sources) def clear_inference_pool() -> None: - model_context = __name__ + '.' + state_manager.get_item('face_enhancer_model') - inference_manager.clear_inference_pool(model_context) + inference_manager.clear_inference_pool(__name__) def get_model_options() -> ModelOptions: face_enhancer_model = state_manager.get_item('face_enhancer_model') - return MODEL_SET.get(face_enhancer_model) + return create_static_model_set('full').get(face_enhancer_model) def register_args(program : ArgumentParser) -> None: @@ -238,20 +240,21 @@ def register_args(program : ArgumentParser) -> None: if group_processors: group_processors.add_argument('--face-enhancer-model', help = wording.get('help.face_enhancer_model'), default = config.get_str_value('processors.face_enhancer_model', 'gfpgan_1.4'), choices = processors_choices.face_enhancer_models) group_processors.add_argument('--face-enhancer-blend', help = wording.get('help.face_enhancer_blend'), type = int, default = config.get_int_value('processors.face_enhancer_blend', '80'), choices = processors_choices.face_enhancer_blend_range, metavar = create_int_metavar(processors_choices.face_enhancer_blend_range)) - facefusion.jobs.job_store.register_step_keys([ 'face_enhancer_model', 'face_enhancer_blend' ]) + group_processors.add_argument('--face-enhancer-weight', help = wording.get('help.face_enhancer_weight'), type = float, default = config.get_float_value('processors.face_enhancer_weight', '1.0'), choices = processors_choices.face_enhancer_weight_range, metavar = create_float_metavar(processors_choices.face_enhancer_weight_range)) + facefusion.jobs.job_store.register_step_keys([ 'face_enhancer_model', 'face_enhancer_blend', 'face_enhancer_weight' ]) def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: apply_state_item('face_enhancer_model', args.get('face_enhancer_model')) apply_state_item('face_enhancer_blend', args.get('face_enhancer_blend')) + apply_state_item('face_enhancer_weight', args.get('face_enhancer_weight')) def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes = get_model_options().get('hashes') model_sources = get_model_options().get('sources') - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) def pre_process(mode : ProcessMode) -> bool: @@ -295,7 +298,8 @@ def enhance_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionF crop_masks.append(occlusion_mask) crop_vision_frame = prepare_crop_frame(crop_vision_frame) - crop_vision_frame = forward(crop_vision_frame) + face_enhancer_weight = numpy.array([ state_manager.get_item('face_enhancer_weight') ]).astype(numpy.double) + crop_vision_frame = forward(crop_vision_frame, face_enhancer_weight) crop_vision_frame = normalize_crop_frame(crop_vision_frame) crop_mask = numpy.minimum.reduce(crop_masks).clip(0, 1) paste_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, crop_mask, affine_matrix) @@ -303,7 +307,7 @@ def enhance_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionF return temp_vision_frame -def forward(crop_vision_frame : VisionFrame) -> VisionFrame: +def forward(crop_vision_frame : VisionFrame, face_enhancer_weight : FaceEnhancerWeight) -> VisionFrame: face_enhancer = get_inference_pool().get('face_enhancer') face_enhancer_inputs = {} @@ -311,8 +315,7 @@ def forward(crop_vision_frame : VisionFrame) -> VisionFrame: if face_enhancer_input.name == 'input': face_enhancer_inputs[face_enhancer_input.name] = crop_vision_frame if face_enhancer_input.name == 'weight': - weight = numpy.array([ 1 ]).astype(numpy.double) - face_enhancer_inputs[face_enhancer_input.name] = weight + face_enhancer_inputs[face_enhancer_input.name] = face_enhancer_weight with thread_semaphore(): crop_vision_frame = face_enhancer.run(None, face_enhancer_inputs)[0][0] @@ -320,6 +323,15 @@ def forward(crop_vision_frame : VisionFrame) -> VisionFrame: return crop_vision_frame +def has_weight_input() -> bool: + face_enhancer = get_inference_pool().get('face_enhancer') + + for deep_swapper_input in face_enhancer.get_inputs(): + if deep_swapper_input.name == 'weight': + return True + return False + + def prepare_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame: crop_vision_frame = crop_vision_frame[:, :, ::-1] / 255.0 crop_vision_frame = (crop_vision_frame - 0.5) / 0.5 diff --git a/facefusion/processors/modules/face_swapper.py b/facefusion/processors/modules/face_swapper.py index 72aa8e03..bcd2b4e8 100755 --- a/facefusion/processors/modules/face_swapper.py +++ b/facefusion/processors/modules/face_swapper.py @@ -1,323 +1,360 @@ from argparse import ArgumentParser +from functools import lru_cache from typing import List, Tuple import numpy +import facefusion.choices import facefusion.jobs.job_manager import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, wording from facefusion.common_helper import get_first -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.execution import has_execution_provider from facefusion.face_analyser import get_average_face, get_many_faces, get_one_face from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5 from facefusion.face_masker import create_occlusion_mask, create_region_mask, create_static_box_mask -from facefusion.face_selector import find_similar_faces, sort_and_filter_faces +from facefusion.face_selector import find_similar_faces, sort_and_filter_faces, sort_faces_by_order from facefusion.face_store import get_reference_faces from facefusion.filesystem import filter_image_paths, has_image, in_directory, is_image, is_video, resolve_relative_path, same_file_extension -from facefusion.inference_manager import get_static_model_initializer +from facefusion.model_helper import get_static_model_initializer from facefusion.processors import choices as processors_choices from facefusion.processors.pixel_boost import explode_pixel_boost, implode_pixel_boost from facefusion.processors.typing import FaceSwapperInputs -from facefusion.program_helper import find_argument_group, suggest_face_swapper_pixel_boost_choices +from facefusion.program_helper import find_argument_group from facefusion.thread_helper import conditional_thread_semaphore -from facefusion.typing import ApplyStateItem, Args, Embedding, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame +from facefusion.typing import ApplyStateItem, Args, DownloadScope, Embedding, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import read_image, read_static_image, read_static_images, unpack_resolution, write_image -MODEL_SET : ModelSet =\ -{ - 'blendswap_256': + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ { - 'hashes': + 'blendswap_256': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/blendswap_256.hash', - 'path': resolve_relative_path('../.assets/models/blendswap_256.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/blendswap_256.onnx', - 'path': resolve_relative_path('../.assets/models/blendswap_256.onnx') - } - }, - 'type': 'blendswap', - 'template': 'ffhq_512', - 'size': (256, 256), - 'mean': [ 0.0, 0.0, 0.0 ], - 'standard_deviation': [ 1.0, 1.0, 1.0 ] - }, - 'ghost_1_256': - { - 'hashes': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ghost_1_256.hash', - 'path': resolve_relative_path('../.assets/models/ghost_1_256.hash') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'blendswap_256.hash'), + 'path': resolve_relative_path('../.assets/models/blendswap_256.hash') + } }, - 'embedding_converter': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_ghost.hash', - 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ghost_1_256.onnx', - 'path': resolve_relative_path('../.assets/models/ghost_1_256.onnx') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'blendswap_256.onnx'), + 'path': resolve_relative_path('../.assets/models/blendswap_256.onnx') + } }, - 'embedding_converter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_ghost.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') - } + 'type': 'blendswap', + 'template': 'ffhq_512', + 'size': (256, 256), + 'mean': [ 0.0, 0.0, 0.0 ], + 'standard_deviation': [ 1.0, 1.0, 1.0 ] }, - 'type': 'ghost', - 'template': 'arcface_112_v1', - 'size': (256, 256), - 'mean': [ 0.5, 0.5, 0.5 ], - 'standard_deviation': [ 0.5, 0.5, 0.5 ] - }, - 'ghost_2_256': - { - 'hashes': + 'ghost_1_256': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ghost_2_256.hash', - 'path': resolve_relative_path('../.assets/models/ghost_2_256.hash') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'ghost_1_256.hash'), + 'path': resolve_relative_path('../.assets/models/ghost_1_256.hash') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_ghost.hash'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') + } }, - 'embedding_converter': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_ghost.hash', - 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ghost_2_256.onnx', - 'path': resolve_relative_path('../.assets/models/ghost_2_256.onnx') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'ghost_1_256.onnx'), + 'path': resolve_relative_path('../.assets/models/ghost_1_256.onnx') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_ghost.onnx'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') + } }, - 'embedding_converter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_ghost.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') - } + 'type': 'ghost', + 'template': 'arcface_112_v1', + 'size': (256, 256), + 'mean': [ 0.5, 0.5, 0.5 ], + 'standard_deviation': [ 0.5, 0.5, 0.5 ] }, - 'type': 'ghost', - 'template': 'arcface_112_v1', - 'size': (256, 256), - 'mean': [ 0.5, 0.5, 0.5 ], - 'standard_deviation': [ 0.5, 0.5, 0.5 ] - }, - 'ghost_3_256': - { - 'hashes': + 'ghost_2_256': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ghost_3_256.hash', - 'path': resolve_relative_path('../.assets/models/ghost_3_256.hash') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'ghost_2_256.hash'), + 'path': resolve_relative_path('../.assets/models/ghost_2_256.hash') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_ghost.hash'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') + } }, - 'embedding_converter': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_ghost.hash', - 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ghost_3_256.onnx', - 'path': resolve_relative_path('../.assets/models/ghost_3_256.onnx') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'ghost_2_256.onnx'), + 'path': resolve_relative_path('../.assets/models/ghost_2_256.onnx') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_ghost.onnx'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') + } }, - 'embedding_converter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_ghost.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') - } + 'type': 'ghost', + 'template': 'arcface_112_v1', + 'size': (256, 256), + 'mean': [ 0.5, 0.5, 0.5 ], + 'standard_deviation': [ 0.5, 0.5, 0.5 ] }, - 'type': 'ghost', - 'template': 'arcface_112_v1', - 'size': (256, 256), - 'mean': [ 0.5, 0.5, 0.5 ], - 'standard_deviation': [ 0.5, 0.5, 0.5 ] - }, - 'inswapper_128': - { - 'hashes': + 'ghost_3_256': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/inswapper_128.hash', - 'path': resolve_relative_path('../.assets/models/inswapper_128.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/inswapper_128.onnx', - 'path': resolve_relative_path('../.assets/models/inswapper_128.onnx') - } - }, - 'type': 'inswapper', - 'template': 'arcface_128_v2', - 'size': (128, 128), - 'mean': [ 0.0, 0.0, 0.0 ], - 'standard_deviation': [ 1.0, 1.0, 1.0 ] - }, - 'inswapper_128_fp16': - { - 'hashes': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/inswapper_128_fp16.hash', - 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/inswapper_128_fp16.onnx', - 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx') - } - }, - 'type': 'inswapper', - 'template': 'arcface_128_v2', - 'size': (128, 128), - 'mean': [ 0.0, 0.0, 0.0 ], - 'standard_deviation': [ 1.0, 1.0, 1.0 ] - }, - 'simswap_256': - { - 'hashes': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/simswap_256.hash', - 'path': resolve_relative_path('../.assets/models/simswap_256.hash') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'ghost_3_256.hash'), + 'path': resolve_relative_path('../.assets/models/ghost_3_256.hash') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_ghost.hash'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') + } }, - 'embedding_converter': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_simswap.hash', - 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/simswap_256.onnx', - 'path': resolve_relative_path('../.assets/models/simswap_256.onnx') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'ghost_3_256.onnx'), + 'path': resolve_relative_path('../.assets/models/ghost_3_256.onnx') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_ghost.onnx'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') + } }, - 'embedding_converter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_simswap.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.onnx') - } + 'type': 'ghost', + 'template': 'arcface_112_v1', + 'size': (256, 256), + 'mean': [ 0.5, 0.5, 0.5 ], + 'standard_deviation': [ 0.5, 0.5, 0.5 ] }, - 'type': 'simswap', - 'template': 'arcface_112_v1', - 'size': (256, 256), - 'mean': [ 0.485, 0.456, 0.406 ], - 'standard_deviation': [ 0.229, 0.224, 0.225 ] - }, - 'simswap_unofficial_512': - { - 'hashes': + 'hififace_unofficial_256': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/simswap_unofficial_512.hash', - 'path': resolve_relative_path('../.assets/models/simswap_unofficial_512.hash') + 'face_swapper': + { + 'url': resolve_download_url('models-3.1.0', 'hififace_unofficial_256.hash'), + 'path': resolve_relative_path('../.assets/models/hififace_unofficial_256.hash') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.1.0', 'arcface_converter_hififace.hash'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_hififace.hash') + } }, - 'embedding_converter': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_simswap.hash', - 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/simswap_unofficial_512.onnx', - 'path': resolve_relative_path('../.assets/models/simswap_unofficial_512.onnx') + 'face_swapper': + { + 'url': resolve_download_url('models-3.1.0', 'hififace_unofficial_256.onnx'), + 'path': resolve_relative_path('../.assets/models/hififace_unofficial_256.onnx') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.1.0', 'arcface_converter_hififace.onnx'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_hififace.onnx') + } }, - 'embedding_converter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_simswap.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.onnx') - } + 'type': 'hififace', + 'template': 'mtcnn_512', + 'size': (256, 256), + 'mean': [ 0.5, 0.5, 0.5 ], + 'standard_deviation': [ 0.5, 0.5, 0.5 ] }, - 'type': 'simswap', - 'template': 'arcface_112_v1', - 'size': (512, 512), - 'mean': [ 0.0, 0.0, 0.0 ], - 'standard_deviation': [ 1.0, 1.0, 1.0 ] - }, - 'uniface_256': - { - 'hashes': + 'inswapper_128': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/uniface_256.hash', - 'path': resolve_relative_path('../.assets/models/uniface_256.hash') - } + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'inswapper_128.hash'), + 'path': resolve_relative_path('../.assets/models/inswapper_128.hash') + } + }, + 'sources': + { + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'inswapper_128.onnx'), + 'path': resolve_relative_path('../.assets/models/inswapper_128.onnx') + } + }, + 'type': 'inswapper', + 'template': 'arcface_128_v2', + 'size': (128, 128), + 'mean': [ 0.0, 0.0, 0.0 ], + 'standard_deviation': [ 1.0, 1.0, 1.0 ] }, - 'sources': + 'inswapper_128_fp16': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/uniface_256.onnx', - 'path': resolve_relative_path('../.assets/models/uniface_256.onnx') - } + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'inswapper_128_fp16.hash'), + 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.hash') + } + }, + 'sources': + { + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'inswapper_128_fp16.onnx'), + 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx') + } + }, + 'type': 'inswapper', + 'template': 'arcface_128_v2', + 'size': (128, 128), + 'mean': [ 0.0, 0.0, 0.0 ], + 'standard_deviation': [ 1.0, 1.0, 1.0 ] }, - 'type': 'uniface', - 'template': 'ffhq_512', - 'size': (256, 256), - 'mean': [ 0.5, 0.5, 0.5 ], - 'standard_deviation': [ 0.5, 0.5, 0.5 ] + 'simswap_256': + { + 'hashes': + { + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'simswap_256.hash'), + 'path': resolve_relative_path('../.assets/models/simswap_256.hash') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_simswap.hash'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.hash') + } + }, + 'sources': + { + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'simswap_256.onnx'), + 'path': resolve_relative_path('../.assets/models/simswap_256.onnx') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_simswap.onnx'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.onnx') + } + }, + 'type': 'simswap', + 'template': 'arcface_112_v1', + 'size': (256, 256), + 'mean': [ 0.485, 0.456, 0.406 ], + 'standard_deviation': [ 0.229, 0.224, 0.225 ] + }, + 'simswap_unofficial_512': + { + 'hashes': + { + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'simswap_unofficial_512.hash'), + 'path': resolve_relative_path('../.assets/models/simswap_unofficial_512.hash') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_simswap.hash'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.hash') + } + }, + 'sources': + { + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'simswap_unofficial_512.onnx'), + 'path': resolve_relative_path('../.assets/models/simswap_unofficial_512.onnx') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_simswap.onnx'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.onnx') + } + }, + 'type': 'simswap', + 'template': 'arcface_112_v1', + 'size': (512, 512), + 'mean': [ 0.0, 0.0, 0.0 ], + 'standard_deviation': [ 1.0, 1.0, 1.0 ] + }, + 'uniface_256': + { + 'hashes': + { + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'uniface_256.hash'), + 'path': resolve_relative_path('../.assets/models/uniface_256.hash') + } + }, + 'sources': + { + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'uniface_256.onnx'), + 'path': resolve_relative_path('../.assets/models/uniface_256.onnx') + } + }, + 'type': 'uniface', + 'template': 'ffhq_512', + 'size': (256, 256), + 'mean': [ 0.5, 0.5, 0.5 ], + 'standard_deviation': [ 0.5, 0.5, 0.5 ] + } } -} def get_inference_pool() -> InferencePool: model_sources = get_model_options().get('sources') - model_context = __name__ + '.' + state_manager.get_item('face_swapper_model') - return inference_manager.get_inference_pool(model_context, model_sources) + return inference_manager.get_inference_pool(__name__, model_sources) def clear_inference_pool() -> None: - model_context = __name__ + '.' + state_manager.get_item('face_swapper_model') - inference_manager.clear_inference_pool(model_context) + inference_manager.clear_inference_pool(__name__) def get_model_options() -> ModelOptions: face_swapper_model = state_manager.get_item('face_swapper_model') - face_swapper_model = 'inswapper_128' if has_execution_provider('coreml') and face_swapper_model == 'inswapper_128_fp16' else face_swapper_model - return MODEL_SET.get(face_swapper_model) + return create_static_model_set('full').get(face_swapper_model) def register_args(program : ArgumentParser) -> None: group_processors = find_argument_group(program, 'processors') if group_processors: - group_processors.add_argument('--face-swapper-model', help = wording.get('help.face_swapper_model'), default = config.get_str_value('processors.face_swapper_model', 'inswapper_128_fp16'), choices = processors_choices.face_swapper_set.keys()) - face_swapper_pixel_boost_choices = suggest_face_swapper_pixel_boost_choices(program) + group_processors.add_argument('--face-swapper-model', help = wording.get('help.face_swapper_model'), default = config.get_str_value('processors.face_swapper_model', 'inswapper_128_fp16'), choices = processors_choices.face_swapper_models) + known_args, _ = program.parse_known_args() + face_swapper_pixel_boost_choices = processors_choices.face_swapper_set.get(known_args.face_swapper_model) group_processors.add_argument('--face-swapper-pixel-boost', help = wording.get('help.face_swapper_pixel_boost'), default = config.get_str_value('processors.face_swapper_pixel_boost', get_first(face_swapper_pixel_boost_choices)), choices = face_swapper_pixel_boost_choices) facefusion.jobs.job_store.register_step_keys([ 'face_swapper_model', 'face_swapper_pixel_boost' ]) @@ -328,11 +365,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes = get_model_options().get('hashes') model_sources = get_model_options().get('sources') - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) def pre_process(mode : ProcessMode) -> bool: @@ -410,9 +446,12 @@ def forward_swap_face(source_face : Face, crop_vision_frame : VisionFrame) -> Vi model_type = get_model_options().get('type') face_swapper_inputs = {} + if has_execution_provider('coreml') and model_type in [ 'ghost', 'uniface' ]: + face_swapper.set_providers([ facefusion.choices.execution_provider_set.get('cpu') ]) + for face_swapper_input in face_swapper.get_inputs(): if face_swapper_input.name == 'source': - if model_type == 'blendswap' or model_type == 'uniface': + if model_type in [ 'blendswap', 'uniface' ]: face_swapper_inputs[face_swapper_input.name] = prepare_source_frame(source_face) else: face_swapper_inputs[face_swapper_input.name] = prepare_source_embedding(source_face) @@ -493,7 +532,7 @@ def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame: model_standard_deviation = get_model_options().get('standard_deviation') crop_vision_frame = crop_vision_frame.transpose(1, 2, 0) - if model_type == 'ghost' or model_type == 'uniface': + if model_type in [ 'ghost', 'hififace', 'uniface' ]: crop_vision_frame = crop_vision_frame * model_standard_deviation + model_mean crop_vision_frame = crop_vision_frame.clip(0, 1) crop_vision_frame = crop_vision_frame[:, :, ::-1] * 255 @@ -529,7 +568,13 @@ def process_frame(inputs : FaceSwapperInputs) -> VisionFrame: def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None source_frames = read_static_images(source_paths) - source_faces = get_many_faces(source_frames) + source_faces = [] + + for source_frame in source_frames: + temp_faces = get_many_faces([ source_frame ]) + temp_faces = sort_faces_by_order(temp_faces, 'large-small') + if temp_faces: + source_faces.append(get_first(temp_faces)) source_face = get_average_face(source_faces) for queue_payload in process_manager.manage(queue_payloads): @@ -548,7 +593,13 @@ def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload] def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None source_frames = read_static_images(source_paths) - source_faces = get_many_faces(source_frames) + source_faces = [] + + for source_frame in source_frames: + temp_faces = get_many_faces([ source_frame ]) + temp_faces = sort_faces_by_order(temp_faces, 'large-small') + if temp_faces: + source_faces.append(get_first(temp_faces)) source_face = get_average_face(source_faces) target_vision_frame = read_static_image(target_path) output_vision_frame = process_frame( diff --git a/facefusion/processors/modules/frame_colorizer.py b/facefusion/processors/modules/frame_colorizer.py index 256d8594..f61cc275 100644 --- a/facefusion/processors/modules/frame_colorizer.py +++ b/facefusion/processors/modules/frame_colorizer.py @@ -1,4 +1,5 @@ from argparse import ArgumentParser +from functools import lru_cache from typing import List import cv2 @@ -9,134 +10,135 @@ import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, inference_manager, logger, process_manager, state_manager, wording from facefusion.common_helper import create_int_metavar -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension from facefusion.processors import choices as processors_choices from facefusion.processors.typing import FrameColorizerInputs from facefusion.program_helper import find_argument_group from facefusion.thread_helper import thread_semaphore -from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame +from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import read_image, read_static_image, unpack_resolution, write_image -MODEL_SET : ModelSet =\ -{ - 'ddcolor': + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ { - 'hashes': + 'ddcolor': { - 'frame_colorizer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ddcolor.hash', - 'path': resolve_relative_path('../.assets/models/ddcolor.hash') - } + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'ddcolor.hash'), + 'path': resolve_relative_path('../.assets/models/ddcolor.hash') + } + }, + 'sources': + { + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'ddcolor.onnx'), + 'path': resolve_relative_path('../.assets/models/ddcolor.onnx') + } + }, + 'type': 'ddcolor' }, - 'sources': + 'ddcolor_artistic': { - 'frame_colorizer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ddcolor.onnx', - 'path': resolve_relative_path('../.assets/models/ddcolor.onnx') - } + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'ddcolor_artistic.hash'), + 'path': resolve_relative_path('../.assets/models/ddcolor_artistic.hash') + } + }, + 'sources': + { + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'ddcolor_artistic.onnx'), + 'path': resolve_relative_path('../.assets/models/ddcolor_artistic.onnx') + } + }, + 'type': 'ddcolor' }, - 'type': 'ddcolor' - }, - 'ddcolor_artistic': - { - 'hashes': + 'deoldify': { - 'frame_colorizer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ddcolor_artistic.hash', - 'path': resolve_relative_path('../.assets/models/ddcolor_artistic.hash') - } + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'deoldify.hash'), + 'path': resolve_relative_path('../.assets/models/deoldify.hash') + } + }, + 'sources': + { + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'deoldify.onnx'), + 'path': resolve_relative_path('../.assets/models/deoldify.onnx') + } + }, + 'type': 'deoldify' }, - 'sources': + 'deoldify_artistic': { - 'frame_colorizer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ddcolor_artistic.onnx', - 'path': resolve_relative_path('../.assets/models/ddcolor_artistic.onnx') - } + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'deoldify_artistic.hash'), + 'path': resolve_relative_path('../.assets/models/deoldify_artistic.hash') + } + }, + 'sources': + { + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'deoldify_artistic.onnx'), + 'path': resolve_relative_path('../.assets/models/deoldify_artistic.onnx') + } + }, + 'type': 'deoldify' }, - 'type': 'ddcolor' - }, - 'deoldify': - { - 'hashes': + 'deoldify_stable': { - 'frame_colorizer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/deoldify.hash', - 'path': resolve_relative_path('../.assets/models/deoldify.hash') - } - }, - 'sources': - { - 'frame_colorizer': + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'deoldify_stable.hash'), + 'path': resolve_relative_path('../.assets/models/deoldify_stable.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/deoldify.onnx', - 'path': resolve_relative_path('../.assets/models/deoldify.onnx') - } - }, - 'type': 'deoldify' - }, - 'deoldify_artistic': - { - 'hashes': - { - 'frame_colorizer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/deoldify_artistic.hash', - 'path': resolve_relative_path('../.assets/models/deoldify_artistic.hash') - } - }, - 'sources': - { - 'frame_colorizer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/deoldify_artistic.onnx', - 'path': resolve_relative_path('../.assets/models/deoldify_artistic.onnx') - } - }, - 'type': 'deoldify' - }, - 'deoldify_stable': - { - 'hashes': - { - 'frame_colorizer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/deoldify_stable.hash', - 'path': resolve_relative_path('../.assets/models/deoldify_stable.hash') - } - }, - 'sources': - { - 'frame_colorizer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/deoldify_stable.onnx', - 'path': resolve_relative_path('../.assets/models/deoldify_stable.onnx') - } - }, - 'type': 'deoldify' + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'deoldify_stable.onnx'), + 'path': resolve_relative_path('../.assets/models/deoldify_stable.onnx') + } + }, + 'type': 'deoldify' + } } -} def get_inference_pool() -> InferencePool: model_sources = get_model_options().get('sources') - model_context = __name__ + '.' + state_manager.get_item('frame_colorizer_model') - return inference_manager.get_inference_pool(model_context, model_sources) + return inference_manager.get_inference_pool(__name__, model_sources) def clear_inference_pool() -> None: - model_context = __name__ + '.' + state_manager.get_item('frame_colorizer_model') - inference_manager.clear_inference_pool(model_context) + inference_manager.clear_inference_pool(__name__) def get_model_options() -> ModelOptions: frame_colorizer_model = state_manager.get_item('frame_colorizer_model') - return MODEL_SET.get(frame_colorizer_model) + return create_static_model_set('full').get(frame_colorizer_model) def register_args(program : ArgumentParser) -> None: @@ -155,11 +157,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes = get_model_options().get('hashes') model_sources = get_model_options().get('sources') - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) def pre_process(mode : ProcessMode) -> bool: diff --git a/facefusion/processors/modules/frame_enhancer.py b/facefusion/processors/modules/frame_enhancer.py index 8e9c9df2..33df5f16 100644 --- a/facefusion/processors/modules/frame_enhancer.py +++ b/facefusion/processors/modules/frame_enhancer.py @@ -1,4 +1,5 @@ from argparse import ArgumentParser +from functools import lru_cache from typing import List import cv2 @@ -9,286 +10,392 @@ import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, inference_manager, logger, process_manager, state_manager, wording from facefusion.common_helper import create_int_metavar -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension from facefusion.processors import choices as processors_choices from facefusion.processors.typing import FrameEnhancerInputs from facefusion.program_helper import find_argument_group from facefusion.thread_helper import conditional_thread_semaphore -from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame +from facefusion.typing import ApplyStateItem, Args, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import create_tile_frames, merge_tile_frames, read_image, read_static_image, write_image -MODEL_SET : ModelSet =\ -{ - 'clear_reality_x4': + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ { - 'hashes': + 'clear_reality_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/clear_reality_x4.hash', - 'path': resolve_relative_path('../.assets/models/clear_reality_x4.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'clear_reality_x4.hash'), + 'path': resolve_relative_path('../.assets/models/clear_reality_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'clear_reality_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/clear_reality_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'sources': + 'lsdir_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/clear_reality_x4.onnx', - 'path': resolve_relative_path('../.assets/models/clear_reality_x4.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'lsdir_x4.hash'), + 'path': resolve_relative_path('../.assets/models/lsdir_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'lsdir_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/lsdir_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'size': (128, 8, 4), - 'scale': 4 - }, - 'lsdir_x4': - { - 'hashes': + 'nomos8k_sc_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/lsdir_x4.hash', - 'path': resolve_relative_path('../.assets/models/lsdir_x4.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'nomos8k_sc_x4.hash'), + 'path': resolve_relative_path('../.assets/models/nomos8k_sc_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'nomos8k_sc_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/nomos8k_sc_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'sources': + 'real_esrgan_x2': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/lsdir_x4.onnx', - 'path': resolve_relative_path('../.assets/models/lsdir_x4.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x2.hash'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x2.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x2.onnx'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x2.onnx') + } + }, + 'size': (256, 16, 8), + 'scale': 2 }, - 'size': (128, 8, 4), - 'scale': 4 - }, - 'nomos8k_sc_x4': - { - 'hashes': + 'real_esrgan_x2_fp16': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/nomos8k_sc_x4.hash', - 'path': resolve_relative_path('../.assets/models/nomos8k_sc_x4.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x2_fp16.hash'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x2_fp16.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x2_fp16.onnx'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x2_fp16.onnx') + } + }, + 'size': (256, 16, 8), + 'scale': 2 }, - 'sources': + 'real_esrgan_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/nomos8k_sc_x4.onnx', - 'path': resolve_relative_path('../.assets/models/nomos8k_sc_x4.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x4.hash'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x4.onnx') + } + }, + 'size': (256, 16, 8), + 'scale': 4 }, - 'size': (128, 8, 4), - 'scale': 4 - }, - 'real_esrgan_x2': - { - 'hashes': + 'real_esrgan_x4_fp16': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x2.hash', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x2.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x4_fp16.hash'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x4_fp16.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x4_fp16.onnx'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x4_fp16.onnx') + } + }, + 'size': (256, 16, 8), + 'scale': 4 }, - 'sources': + 'real_esrgan_x8': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x2.onnx', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x2.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x8.hash'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x8.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x8.onnx'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x8.onnx') + } + }, + 'size': (256, 16, 8), + 'scale': 8 }, - 'size': (256, 16, 8), - 'scale': 2 - }, - 'real_esrgan_x2_fp16': - { - 'hashes': + 'real_esrgan_x8_fp16': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x2_fp16.hash', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x2_fp16.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x8_fp16.hash'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x8_fp16.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x8_fp16.onnx'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x8_fp16.onnx') + } + }, + 'size': (256, 16, 8), + 'scale': 8 }, - 'sources': + 'real_hatgan_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x2_fp16.onnx', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x2_fp16.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_hatgan_x4.hash'), + 'path': resolve_relative_path('../.assets/models/real_hatgan_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_hatgan_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/real_hatgan_x4.onnx') + } + }, + 'size': (256, 16, 8), + 'scale': 4 }, - 'size': (256, 16, 8), - 'scale': 2 - }, - 'real_esrgan_x4': - { - 'hashes': + 'real_web_photo_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x4.hash', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x4.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.1.0', 'real_web_photo_x4.hash'), + 'path': resolve_relative_path('../.assets/models/real_web_photo_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.1.0', 'real_web_photo_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/real_web_photo_x4.onnx') + } + }, + 'size': (64, 4, 2), + 'scale': 4 }, - 'sources': + 'realistic_rescaler_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x4.onnx', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x4.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.1.0', 'realistic_rescaler_x4.hash'), + 'path': resolve_relative_path('../.assets/models/realistic_rescaler_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.1.0', 'realistic_rescaler_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/realistic_rescaler_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'size': (256, 16, 8), - 'scale': 4 - }, - 'real_esrgan_x4_fp16': - { - 'hashes': + 'remacri_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x4_fp16.hash', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x4_fp16.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.1.0', 'remacri_x4.hash'), + 'path': resolve_relative_path('../.assets/models/remacri_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.1.0', 'remacri_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/remacri_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'sources': + 'siax_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x4_fp16.onnx', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x4_fp16.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.1.0', 'siax_x4.hash'), + 'path': resolve_relative_path('../.assets/models/siax_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.1.0', 'siax_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/siax_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'size': (256, 16, 8), - 'scale': 4 - }, - 'real_esrgan_x8': - { - 'hashes': + 'span_kendata_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x8.hash', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x8.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'span_kendata_x4.hash'), + 'path': resolve_relative_path('../.assets/models/span_kendata_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'span_kendata_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/span_kendata_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'sources': + 'swin2_sr_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x8.onnx', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x8.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.1.0', 'swin2_sr_x4.hash'), + 'path': resolve_relative_path('../.assets/models/swin2_sr_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.1.0', 'swin2_sr_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/swin2_sr_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'size': (256, 16, 8), - 'scale': 8 - }, - 'real_esrgan_x8_fp16': - { - 'hashes': + 'ultra_sharp_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x8_fp16.hash', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x8_fp16.hash') - } - }, - 'sources': - { - 'frame_enhancer': + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'ultra_sharp_x4.hash'), + 'path': resolve_relative_path('../.assets/models/ultra_sharp_x4.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x8_fp16.onnx', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x8_fp16.onnx') - } - }, - 'size': (256, 16, 8), - 'scale': 8 - }, - 'real_hatgan_x4': - { - 'hashes': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_hatgan_x4.hash', - 'path': resolve_relative_path('../.assets/models/real_hatgan_x4.hash') - } - }, - 'sources': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_hatgan_x4.onnx', - 'path': resolve_relative_path('../.assets/models/real_hatgan_x4.onnx') - } - }, - 'size': (256, 16, 8), - 'scale': 4 - }, - 'span_kendata_x4': - { - 'hashes': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/span_kendata_x4.hash', - 'path': resolve_relative_path('../.assets/models/span_kendata_x4.hash') - } - }, - 'sources': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/span_kendata_x4.onnx', - 'path': resolve_relative_path('../.assets/models/span_kendata_x4.onnx') - } - }, - 'size': (128, 8, 4), - 'scale': 4 - }, - 'ultra_sharp_x4': - { - 'hashes': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ultra_sharp_x4.hash', - 'path': resolve_relative_path('../.assets/models/ultra_sharp_x4.hash') - } - }, - 'sources': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ultra_sharp_x4.onnx', - 'path': resolve_relative_path('../.assets/models/ultra_sharp_x4.onnx') - } - }, - 'size': (128, 8, 4), - 'scale': 4 + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'ultra_sharp_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/ultra_sharp_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 + } } -} def get_inference_pool() -> InferencePool: model_sources = get_model_options().get('sources') - model_context = __name__ + '.' + state_manager.get_item('frame_enhancer_model') - return inference_manager.get_inference_pool(model_context, model_sources) + return inference_manager.get_inference_pool(__name__, model_sources) def clear_inference_pool() -> None: - model_context = __name__ + '.' + state_manager.get_item('frame_enhancer_model') - inference_manager.clear_inference_pool(model_context) + inference_manager.clear_inference_pool(__name__) def get_model_options() -> ModelOptions: frame_enhancer_model = state_manager.get_item('frame_enhancer_model') - return MODEL_SET.get(frame_enhancer_model) + return create_static_model_set('full').get(frame_enhancer_model) def register_args(program : ArgumentParser) -> None: @@ -305,11 +412,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes = get_model_options().get('hashes') model_sources = get_model_options().get('sources') - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) def pre_process(mode : ProcessMode) -> bool: diff --git a/facefusion/processors/modules/lip_syncer.py b/facefusion/processors/modules/lip_syncer.py index 6be93294..27794566 100755 --- a/facefusion/processors/modules/lip_syncer.py +++ b/facefusion/processors/modules/lip_syncer.py @@ -1,4 +1,5 @@ from argparse import ArgumentParser +from functools import lru_cache from typing import List import cv2 @@ -10,7 +11,7 @@ import facefusion.processors.core as processors from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, voice_extractor, wording from facefusion.audio import create_empty_audio_frame, get_voice_frame, read_static_voice from facefusion.common_helper import get_first -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.face_analyser import get_many_faces, get_one_face from facefusion.face_helper import create_bounding_box, paste_back, warp_face_by_bounding_box, warp_face_by_face_landmark_5 from facefusion.face_masker import create_mouth_mask, create_occlusion_mask, create_static_box_mask @@ -21,68 +22,69 @@ from facefusion.processors import choices as processors_choices from facefusion.processors.typing import LipSyncerInputs from facefusion.program_helper import find_argument_group from facefusion.thread_helper import conditional_thread_semaphore -from facefusion.typing import ApplyStateItem, Args, AudioFrame, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame +from facefusion.typing import ApplyStateItem, Args, AudioFrame, DownloadScope, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import read_image, read_static_image, restrict_video_fps, write_image -MODEL_SET : ModelSet =\ -{ - 'wav2lip_96': + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ { - 'hashes': + 'wav2lip_96': { - 'lip_syncer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/wav2lip_96.hash', - 'path': resolve_relative_path('../.assets/models/wav2lip_96.hash') - } + 'lip_syncer': + { + 'url': resolve_download_url('models-3.0.0', 'wav2lip_96.hash'), + 'path': resolve_relative_path('../.assets/models/wav2lip_96.hash') + } + }, + 'sources': + { + 'lip_syncer': + { + 'url': resolve_download_url('models-3.0.0', 'wav2lip_96.onnx'), + 'path': resolve_relative_path('../.assets/models/wav2lip_96.onnx') + } + }, + 'size': (96, 96) }, - 'sources': + 'wav2lip_gan_96': { - 'lip_syncer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/wav2lip_96.onnx', - 'path': resolve_relative_path('../.assets/models/wav2lip_96.onnx') - } - }, - 'size': (96, 96) - }, - 'wav2lip_gan_96': - { - 'hashes': - { - 'lip_syncer': + 'lip_syncer': + { + 'url': resolve_download_url('models-3.0.0', 'wav2lip_gan_96.hash'), + 'path': resolve_relative_path('../.assets/models/wav2lip_gan_96.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/wav2lip_gan_96.hash', - 'path': resolve_relative_path('../.assets/models/wav2lip_gan_96.hash') - } - }, - 'sources': - { - 'lip_syncer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/wav2lip_gan_96.onnx', - 'path': resolve_relative_path('../.assets/models/wav2lip_gan_96.onnx') - } - }, - 'size': (96, 96) + 'lip_syncer': + { + 'url': resolve_download_url('models-3.0.0', 'wav2lip_gan_96.onnx'), + 'path': resolve_relative_path('../.assets/models/wav2lip_gan_96.onnx') + } + }, + 'size': (96, 96) + } } -} def get_inference_pool() -> InferencePool: model_sources = get_model_options().get('sources') - model_context = __name__ + '.' + state_manager.get_item('lip_syncer_model') - return inference_manager.get_inference_pool(model_context, model_sources) + return inference_manager.get_inference_pool(__name__, model_sources) def clear_inference_pool() -> None: - model_context = __name__ + '.' + state_manager.get_item('lip_syncer_model') - inference_manager.clear_inference_pool(model_context) + inference_manager.clear_inference_pool(__name__) def get_model_options() -> ModelOptions: lip_syncer_model = state_manager.get_item('lip_syncer_model') - return MODEL_SET.get(lip_syncer_model) + return create_static_model_set('full').get(lip_syncer_model) def register_args(program : ArgumentParser) -> None: @@ -97,11 +99,10 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes = get_model_options().get('hashes') model_sources = get_model_options().get('sources') - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) def pre_process(mode : ProcessMode) -> bool: diff --git a/facefusion/processors/typing.py b/facefusion/processors/typing.py index 16dcfc53..79e9bfef 100644 --- a/facefusion/processors/typing.py +++ b/facefusion/processors/typing.py @@ -5,13 +5,14 @@ from numpy._typing import NDArray from facefusion.typing import AppContext, AudioFrame, Face, FaceSet, VisionFrame AgeModifierModel = Literal['styleganex_age'] +DeepSwapperModel = str ExpressionRestorerModel = Literal['live_portrait'] FaceDebuggerItem = Literal['bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender', 'race'] FaceEditorModel = Literal['live_portrait'] FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus'] -FaceSwapperModel = Literal['blendswap_256', 'ghost_1_256', 'ghost_2_256', 'ghost_3_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_unofficial_512', 'uniface_256'] +FaceSwapperModel = Literal['blendswap_256', 'ghost_1_256', 'ghost_2_256', 'ghost_3_256', 'hififace_unofficial_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_unofficial_512', 'uniface_256'] FrameColorizerModel = Literal['ddcolor', 'ddcolor_artistic', 'deoldify', 'deoldify_artistic', 'deoldify_stable'] -FrameEnhancerModel = Literal['clear_reality_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_hatgan_x4', 'real_esrgan_x8', 'real_esrgan_x8_fp16', 'span_kendata_x4', 'ultra_sharp_x4'] +FrameEnhancerModel = Literal['clear_reality_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_esrgan_x8', 'real_esrgan_x8_fp16', 'real_hatgan_x4', 'real_web_photo_x4', 'realistic_rescaler_x4', 'remacri_x4', 'siax_x4', 'span_kendata_x4', 'swin2_sr_x4', 'ultra_sharp_x4'] LipSyncerModel = Literal['wav2lip_96', 'wav2lip_gan_96'] FaceSwapperSet = Dict[FaceSwapperModel, List[str]] @@ -21,6 +22,11 @@ AgeModifierInputs = TypedDict('AgeModifierInputs', 'reference_faces' : FaceSet, 'target_vision_frame' : VisionFrame }) +DeepSwapperInputs = TypedDict('DeepSwapperInputs', +{ + 'reference_faces' : FaceSet, + 'target_vision_frame' : VisionFrame +}) ExpressionRestorerInputs = TypedDict('ExpressionRestorerInputs', { 'reference_faces' : FaceSet, @@ -67,6 +73,8 @@ ProcessorStateKey = Literal\ [ 'age_modifier_model', 'age_modifier_direction', + 'deep_swapper_model', + 'deep_swapper_morph', 'expression_restorer_model', 'expression_restorer_factor', 'face_debugger_items', @@ -87,6 +95,7 @@ ProcessorStateKey = Literal\ 'face_editor_head_roll', 'face_enhancer_model', 'face_enhancer_blend', + 'face_enhancer_weight', 'face_swapper_model', 'face_swapper_pixel_boost', 'frame_colorizer_model', @@ -100,6 +109,8 @@ ProcessorState = TypedDict('ProcessorState', { 'age_modifier_model' : AgeModifierModel, 'age_modifier_direction' : int, + 'deep_swapper_model' : DeepSwapperModel, + 'deep_swapper_morph' : int, 'expression_restorer_model' : ExpressionRestorerModel, 'expression_restorer_factor' : int, 'face_debugger_items' : List[FaceDebuggerItem], @@ -120,6 +131,7 @@ ProcessorState = TypedDict('ProcessorState', 'face_editor_head_roll' : float, 'face_enhancer_model' : FaceEnhancerModel, 'face_enhancer_blend' : int, + 'face_enhancer_weight' : float, 'face_swapper_model' : FaceSwapperModel, 'face_swapper_pixel_boost' : str, 'frame_colorizer_model' : FrameColorizerModel, @@ -131,6 +143,9 @@ ProcessorState = TypedDict('ProcessorState', }) ProcessorStateSet = Dict[AppContext, ProcessorState] +AgeModifierDirection = NDArray[Any] +DeepSwapperMorph = NDArray[Any] +FaceEnhancerWeight = NDArray[Any] LivePortraitPitch = float LivePortraitYaw = float LivePortraitRoll = float diff --git a/facefusion/program.py b/facefusion/program.py index 314cb794..1a25aab2 100755 --- a/facefusion/program.py +++ b/facefusion/program.py @@ -1,13 +1,13 @@ +import tempfile from argparse import ArgumentParser, HelpFormatter import facefusion.choices from facefusion import config, metadata, state_manager, wording -from facefusion.common_helper import create_float_metavar, create_int_metavar -from facefusion.execution import get_execution_provider_choices +from facefusion.common_helper import create_float_metavar, create_int_metavar, get_last +from facefusion.execution import get_available_execution_providers from facefusion.filesystem import list_directory from facefusion.jobs import job_store from facefusion.processors.core import get_processors_modules -from facefusion.program_helper import remove_args, suggest_face_detector_choices def create_help_formatter_small(prog : str) -> HelpFormatter: @@ -18,38 +18,86 @@ def create_help_formatter_large(prog : str) -> HelpFormatter: return HelpFormatter(prog, max_help_position = 300) -def create_config_program() -> ArgumentParser: +def create_config_path_program() -> ArgumentParser: program = ArgumentParser(add_help = False) group_paths = program.add_argument_group('paths') - group_paths.add_argument('-c', '--config-path', help = wording.get('help.config_path'), default = 'facefusion.ini') - job_store.register_job_keys([ 'config-path' ]) + group_paths.add_argument('--config-path', help = wording.get('help.config_path'), default = 'facefusion.ini') + job_store.register_job_keys([ 'config_path' ]) apply_config_path(program) return program +def create_temp_path_program() -> ArgumentParser: + program = ArgumentParser(add_help = False) + group_paths = program.add_argument_group('paths') + group_paths.add_argument('--temp-path', help = wording.get('help.temp_path'), default = config.get_str_value('paths.temp_path', tempfile.gettempdir())) + job_store.register_job_keys([ 'temp_path' ]) + return program + + def create_jobs_path_program() -> ArgumentParser: program = ArgumentParser(add_help = False) group_paths = program.add_argument_group('paths') - group_paths.add_argument('-j', '--jobs-path', help = wording.get('help.jobs_path'), default = config.get_str_value('paths.jobs_path', '.jobs')) + group_paths.add_argument('--jobs-path', help = wording.get('help.jobs_path'), default = config.get_str_value('paths.jobs_path', '.jobs')) job_store.register_job_keys([ 'jobs_path' ]) return program -def create_paths_program() -> ArgumentParser: +def create_source_paths_program() -> ArgumentParser: + program = ArgumentParser(add_help = False) + group_paths = program.add_argument_group('paths') + group_paths.add_argument('-s', '--source-paths', help = wording.get('help.source_paths'), default = config.get_str_list('paths.source_paths'), nargs = '+') + job_store.register_step_keys([ 'source_paths' ]) + return program + + +def create_target_path_program() -> ArgumentParser: program = ArgumentParser(add_help = False) group_paths = program.add_argument_group('paths') - group_paths.add_argument('-s', '--source-paths', help = wording.get('help.source_paths'), action = 'append', default = config.get_str_list('paths.source_paths')) group_paths.add_argument('-t', '--target-path', help = wording.get('help.target_path'), default = config.get_str_value('paths.target_path')) + job_store.register_step_keys([ 'target_path' ]) + return program + + +def create_output_path_program() -> ArgumentParser: + program = ArgumentParser(add_help = False) + group_paths = program.add_argument_group('paths') group_paths.add_argument('-o', '--output-path', help = wording.get('help.output_path'), default = config.get_str_value('paths.output_path')) - job_store.register_step_keys([ 'source_paths', 'target_path', 'output_path' ]) + job_store.register_step_keys([ 'output_path' ]) + return program + + +def create_source_pattern_program() -> ArgumentParser: + program = ArgumentParser(add_help = False) + group_patterns = program.add_argument_group('patterns') + group_patterns.add_argument('-s', '--source-pattern', help = wording.get('help.source_pattern'), default = config.get_str_value('patterns.source_pattern')) + job_store.register_job_keys([ 'source_pattern' ]) + return program + + +def create_target_pattern_program() -> ArgumentParser: + program = ArgumentParser(add_help = False) + group_patterns = program.add_argument_group('patterns') + group_patterns.add_argument('-t', '--target-pattern', help = wording.get('help.target_pattern'), default = config.get_str_value('patterns.target_pattern')) + job_store.register_job_keys([ 'target_pattern' ]) + return program + + +def create_output_pattern_program() -> ArgumentParser: + program = ArgumentParser(add_help = False) + group_patterns = program.add_argument_group('patterns') + group_patterns.add_argument('-o', '--output-pattern', help = wording.get('help.output_pattern'), default = config.get_str_value('patterns.output_pattern')) + job_store.register_job_keys([ 'output_pattern' ]) return program def create_face_detector_program() -> ArgumentParser: program = ArgumentParser(add_help = False) group_face_detector = program.add_argument_group('face detector') - group_face_detector.add_argument('--face-detector-model', help = wording.get('help.face_detector_model'), default = config.get_str_value('face_detector.face_detector_model', 'yoloface'), choices = facefusion.choices.face_detector_set.keys()) - group_face_detector.add_argument('--face-detector-size', help = wording.get('help.face_detector_size'), default = config.get_str_value('face_detector.face_detector_size', '640x640'), choices = suggest_face_detector_choices(program)) + group_face_detector.add_argument('--face-detector-model', help = wording.get('help.face_detector_model'), default = config.get_str_value('face_detector.face_detector_model', 'yoloface'), choices = facefusion.choices.face_detector_models) + known_args, _ = program.parse_known_args() + face_detector_size_choices = facefusion.choices.face_detector_set.get(known_args.face_detector_model) + group_face_detector.add_argument('--face-detector-size', help = wording.get('help.face_detector_size'), default = config.get_str_value('face_detector.face_detector_size', get_last(face_detector_size_choices)), choices = face_detector_size_choices) group_face_detector.add_argument('--face-detector-angles', help = wording.get('help.face_detector_angles'), type = int, default = config.get_int_list('face_detector.face_detector_angles', '0'), choices = facefusion.choices.face_detector_angles, nargs = '+', metavar = 'FACE_DETECTOR_ANGLES') group_face_detector.add_argument('--face-detector-score', help = wording.get('help.face_detector_score'), type = float, default = config.get_float_value('face_detector.face_detector_score', '0.5'), choices = facefusion.choices.face_detector_score_range, metavar = create_float_metavar(facefusion.choices.face_detector_score_range)) job_store.register_step_keys([ 'face_detector_model', 'face_detector_angles', 'face_detector_size', 'face_detector_score' ]) @@ -84,11 +132,13 @@ def create_face_selector_program() -> ArgumentParser: def create_face_masker_program() -> ArgumentParser: program = ArgumentParser(add_help = False) group_face_masker = program.add_argument_group('face masker') + group_face_masker.add_argument('--face-occluder-model', help = wording.get('help.face_occluder_model'), default = config.get_str_value('face_detector.face_occluder_model', 'xseg_1'), choices = facefusion.choices.face_occluder_models) + group_face_masker.add_argument('--face-parser-model', help = wording.get('help.face_parser_model'), default = config.get_str_value('face_detector.face_parser_model', 'bisenet_resnet_34'), choices = facefusion.choices.face_parser_models) group_face_masker.add_argument('--face-mask-types', help = wording.get('help.face_mask_types').format(choices = ', '.join(facefusion.choices.face_mask_types)), default = config.get_str_list('face_masker.face_mask_types', 'box'), choices = facefusion.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES') group_face_masker.add_argument('--face-mask-blur', help = wording.get('help.face_mask_blur'), type = float, default = config.get_float_value('face_masker.face_mask_blur', '0.3'), choices = facefusion.choices.face_mask_blur_range, metavar = create_float_metavar(facefusion.choices.face_mask_blur_range)) group_face_masker.add_argument('--face-mask-padding', help = wording.get('help.face_mask_padding'), type = int, default = config.get_int_list('face_masker.face_mask_padding', '0 0 0 0'), nargs = '+') group_face_masker.add_argument('--face-mask-regions', help = wording.get('help.face_mask_regions').format(choices = ', '.join(facefusion.choices.face_mask_regions)), default = config.get_str_list('face_masker.face_mask_regions', ' '.join(facefusion.choices.face_mask_regions)), choices = facefusion.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS') - job_store.register_step_keys([ 'face_mask_types', 'face_mask_blur', 'face_mask_padding', 'face_mask_regions' ]) + job_store.register_step_keys([ 'face_occluder_model', 'face_parser_model', 'face_mask_types', 'face_mask_blur', 'face_mask_padding', 'face_mask_regions' ]) return program @@ -121,7 +171,7 @@ def create_output_creation_program() -> ArgumentParser: def create_processors_program() -> ArgumentParser: program = ArgumentParser(add_help = False) - available_processors = list_directory('facefusion/processors/modules') + available_processors = [ file.get('name') for file in list_directory('facefusion/processors/modules') ] group_processors = program.add_argument_group('processors') group_processors.add_argument('--processors', help = wording.get('help.processors').format(choices = ', '.join(available_processors)), default = config.get_str_list('processors.processors', 'face_swapper'), nargs = '+') job_store.register_step_keys([ 'processors' ]) @@ -132,7 +182,7 @@ def create_processors_program() -> ArgumentParser: def create_uis_program() -> ArgumentParser: program = ArgumentParser(add_help = False) - available_ui_layouts = list_directory('facefusion/uis/layouts') + available_ui_layouts = [ file.get('name') for file in list_directory('facefusion/uis/layouts') ] group_uis = program.add_argument_group('uis') group_uis.add_argument('--open-browser', help = wording.get('help.open_browser'), action = 'store_true', default = config.get_bool_value('uis.open_browser')) group_uis.add_argument('--ui-layouts', help = wording.get('help.ui_layouts').format(choices = ', '.join(available_ui_layouts)), default = config.get_str_list('uis.ui_layouts', 'default'), nargs = '+') @@ -142,16 +192,33 @@ def create_uis_program() -> ArgumentParser: def create_execution_program() -> ArgumentParser: program = ArgumentParser(add_help = False) - execution_providers = get_execution_provider_choices() + available_execution_providers = get_available_execution_providers() group_execution = program.add_argument_group('execution') group_execution.add_argument('--execution-device-id', help = wording.get('help.execution_device_id'), default = config.get_str_value('execution.execution_device_id', '0')) - group_execution.add_argument('--execution-providers', help = wording.get('help.execution_providers').format(choices = ', '.join(execution_providers)), default = config.get_str_list('execution.execution_providers', 'cpu'), choices = execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS') + group_execution.add_argument('--execution-providers', help = wording.get('help.execution_providers').format(choices = ', '.join(available_execution_providers)), default = config.get_str_list('execution.execution_providers', 'cpu'), choices = available_execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS') group_execution.add_argument('--execution-thread-count', help = wording.get('help.execution_thread_count'), type = int, default = config.get_int_value('execution.execution_thread_count', '4'), choices = facefusion.choices.execution_thread_count_range, metavar = create_int_metavar(facefusion.choices.execution_thread_count_range)) group_execution.add_argument('--execution-queue-count', help = wording.get('help.execution_queue_count'), type = int, default = config.get_int_value('execution.execution_queue_count', '1'), choices = facefusion.choices.execution_queue_count_range, metavar = create_int_metavar(facefusion.choices.execution_queue_count_range)) job_store.register_job_keys([ 'execution_device_id', 'execution_providers', 'execution_thread_count', 'execution_queue_count' ]) return program +def create_download_providers_program() -> ArgumentParser: + program = ArgumentParser(add_help = False) + download_providers = list(facefusion.choices.download_provider_set.keys()) + group_download = program.add_argument_group('download') + group_download.add_argument('--download-providers', help = wording.get('help.download_providers').format(choices = ', '.join(download_providers)), default = config.get_str_list('download.download_providers', ' '.join(facefusion.choices.download_providers)), choices = download_providers, nargs = '+', metavar = 'DOWNLOAD_PROVIDERS') + job_store.register_job_keys([ 'download_providers' ]) + return program + + +def create_download_scope_program() -> ArgumentParser: + program = ArgumentParser(add_help = False) + group_download = program.add_argument_group('download') + group_download.add_argument('--download-scope', help = wording.get('help.download_scope'), default = config.get_str_value('download.download_scope', 'lite'), choices = facefusion.choices.download_scopes) + job_store.register_job_keys([ 'download_scope' ]) + return program + + def create_memory_program() -> ArgumentParser: program = ArgumentParser(add_help = False) group_memory = program.add_argument_group('memory') @@ -161,18 +228,11 @@ def create_memory_program() -> ArgumentParser: return program -def create_skip_download_program() -> ArgumentParser: +def create_misc_program() -> ArgumentParser: program = ArgumentParser(add_help = False) + log_level_keys = list(facefusion.choices.log_level_set.keys()) group_misc = program.add_argument_group('misc') - group_misc.add_argument('--skip-download', help = wording.get('help.skip_download'), action = 'store_true', default = config.get_bool_value('misc.skip_download')) - job_store.register_job_keys([ 'skip_download' ]) - return program - - -def create_log_level_program() -> ArgumentParser: - program = ArgumentParser(add_help = False) - group_misc = program.add_argument_group('misc') - group_misc.add_argument('--log-level', help = wording.get('help.log_level'), default = config.get_str_value('misc.log_level', 'info'), choices = facefusion.choices.log_level_set.keys()) + group_misc.add_argument('--log-level', help = wording.get('help.log_level'), default = config.get_str_value('misc.log_level', 'info'), choices = log_level_keys) job_store.register_job_keys([ 'log_level' ]) return program @@ -197,11 +257,11 @@ def create_step_index_program() -> ArgumentParser: def collect_step_program() -> ArgumentParser: - return ArgumentParser(parents= [ create_config_program(), create_jobs_path_program(), create_paths_program(), create_face_detector_program(), create_face_landmarker_program(), create_face_selector_program(), create_face_masker_program(), create_frame_extraction_program(), create_output_creation_program(), create_processors_program() ], add_help = False) + return ArgumentParser(parents= [ create_face_detector_program(), create_face_landmarker_program(), create_face_selector_program(), create_face_masker_program(), create_frame_extraction_program(), create_output_creation_program(), create_processors_program() ], add_help = False) def collect_job_program() -> ArgumentParser: - return ArgumentParser(parents= [ create_execution_program(), create_memory_program(), create_skip_download_program(), create_log_level_program() ], add_help = False) + return ArgumentParser(parents= [ create_execution_program(), create_download_providers_program(), create_memory_program(), create_misc_program() ], add_help = False) def create_program() -> ArgumentParser: @@ -210,25 +270,26 @@ def create_program() -> ArgumentParser: program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version') sub_program = program.add_subparsers(dest = 'command') # general - sub_program.add_parser('run', help = wording.get('help.run'), parents = [ collect_step_program(), create_uis_program(), collect_job_program() ], formatter_class = create_help_formatter_large) - sub_program.add_parser('headless-run', help = wording.get('help.headless_run'), parents = [ collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large) - sub_program.add_parser('force-download', help = wording.get('help.force_download'), parents = [ create_log_level_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('run', help = wording.get('help.run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_uis_program(), collect_job_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('headless-run', help = wording.get('help.headless_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('batch-run', help = wording.get('help.batch_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_pattern_program(), create_target_pattern_program(), create_output_pattern_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('force-download', help = wording.get('help.force_download'), parents = [ create_download_providers_program(), create_download_scope_program(), create_misc_program() ], formatter_class = create_help_formatter_large) # job manager - sub_program.add_parser('job-list', help = wording.get('help.job_list'), parents = [ create_job_status_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) - sub_program.add_parser('job-create', help = wording.get('help.job_create'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) - sub_program.add_parser('job-submit', help = wording.get('help.job_submit'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) - sub_program.add_parser('job-submit-all', help = wording.get('help.job_submit_all'), parents = [ create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) - sub_program.add_parser('job-delete', help = wording.get('help.job_delete'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) - sub_program.add_parser('job-delete-all', help = wording.get('help.job_delete_all'), parents = [ create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) - sub_program.add_parser('job-add-step', help = wording.get('help.job_add_step'), parents = [ create_job_id_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) - sub_program.add_parser('job-remix-step', help = wording.get('help.job_remix_step'), parents = [ create_job_id_program(), create_step_index_program(), remove_args(collect_step_program(), [ 'target_path' ]), create_log_level_program() ], formatter_class = create_help_formatter_large) - sub_program.add_parser('job-insert-step', help = wording.get('help.job_insert_step'), parents = [ create_job_id_program(), create_step_index_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) - sub_program.add_parser('job-remove-step', help = wording.get('help.job_remove_step'), parents = [ create_job_id_program(), create_step_index_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('job-list', help = wording.get('help.job_list'), parents = [ create_job_status_program(), create_jobs_path_program(), create_misc_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('job-create', help = wording.get('help.job_create'), parents = [ create_job_id_program(), create_jobs_path_program(), create_misc_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('job-submit', help = wording.get('help.job_submit'), parents = [ create_job_id_program(), create_jobs_path_program(), create_misc_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('job-submit-all', help = wording.get('help.job_submit_all'), parents = [ create_jobs_path_program(), create_misc_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('job-delete', help = wording.get('help.job_delete'), parents = [ create_job_id_program(), create_jobs_path_program(), create_misc_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('job-delete-all', help = wording.get('help.job_delete_all'), parents = [ create_jobs_path_program(), create_misc_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('job-add-step', help = wording.get('help.job_add_step'), parents = [ create_job_id_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_misc_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('job-remix-step', help = wording.get('help.job_remix_step'), parents = [ create_job_id_program(), create_step_index_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_output_path_program(), collect_step_program(), create_misc_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('job-insert-step', help = wording.get('help.job_insert_step'), parents = [ create_job_id_program(), create_step_index_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_misc_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('job-remove-step', help = wording.get('help.job_remove_step'), parents = [ create_job_id_program(), create_step_index_program(), create_jobs_path_program(), create_misc_program() ], formatter_class = create_help_formatter_large) # job runner - sub_program.add_parser('job-run', help = wording.get('help.job_run'), parents = [ create_job_id_program(), create_config_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large) - sub_program.add_parser('job-run-all', help = wording.get('help.job_run_all'), parents = [ create_config_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large) - sub_program.add_parser('job-retry', help = wording.get('help.job_retry'), parents = [ create_job_id_program(), create_config_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large) - sub_program.add_parser('job-retry-all', help = wording.get('help.job_retry_all'), parents = [ create_config_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('job-run', help = wording.get('help.job_run'), parents = [ create_job_id_program(), create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('job-run-all', help = wording.get('help.job_run_all'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('job-retry', help = wording.get('help.job_retry'), parents = [ create_job_id_program(), create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large) + sub_program.add_parser('job-retry-all', help = wording.get('help.job_retry_all'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large) return ArgumentParser(parents = [ program ], formatter_class = create_help_formatter_small, add_help = True) diff --git a/facefusion/program_helper.py b/facefusion/program_helper.py index fb7738de..bec9bf1e 100644 --- a/facefusion/program_helper.py +++ b/facefusion/program_helper.py @@ -1,8 +1,5 @@ from argparse import ArgumentParser, _ArgumentGroup, _SubParsersAction -from typing import List, Optional - -import facefusion.choices -from facefusion.processors import choices as processors_choices +from typing import Optional def find_argument_group(program : ArgumentParser, group_name : str) -> Optional[_ArgumentGroup]: @@ -32,21 +29,3 @@ def validate_actions(program : ArgumentParser) -> bool: elif action.default not in action.choices: return False return True - - -def remove_args(program : ArgumentParser, remove_names : List[str]) -> ArgumentParser: - actions = [ action for action in program._actions if action.dest in remove_names ] - - for action in actions: - program._actions.remove(action) - return program - - -def suggest_face_detector_choices(program : ArgumentParser) -> List[str]: - known_args, _ = program.parse_known_args() - return facefusion.choices.face_detector_set.get(known_args.face_detector_model) #type:ignore[call-overload] - - -def suggest_face_swapper_pixel_boost_choices(program : ArgumentParser) -> List[str]: - known_args, _ = program.parse_known_args() - return processors_choices.face_swapper_set.get(known_args.face_swapper_model) #type:ignore[call-overload] diff --git a/facefusion/temp_helper.py b/facefusion/temp_helper.py index c1798366..16e9c605 100644 --- a/facefusion/temp_helper.py +++ b/facefusion/temp_helper.py @@ -1,10 +1,8 @@ -import glob import os -import tempfile from typing import List from facefusion import state_manager -from facefusion.filesystem import create_directory, move_file, remove_directory +from facefusion.filesystem import create_directory, move_file, remove_directory, resolve_file_pattern def get_temp_file_path(file_path : str) -> str: @@ -18,34 +16,9 @@ def move_temp_file(file_path : str, move_path : str) -> bool: return move_file(temp_file_path, move_path) -def get_temp_frame_paths(target_path : str) -> List[str]: - temp_frames_pattern = get_temp_frames_pattern(target_path, '*') - return sorted(glob.glob(temp_frames_pattern)) - - -def get_temp_frames_pattern(target_path : str, temp_frame_prefix : str) -> str: - temp_directory_path = get_temp_directory_path(target_path) - return os.path.join(temp_directory_path, temp_frame_prefix + '.' + state_manager.get_item('temp_frame_format')) - - -def get_base_directory_path() -> str: - return os.path.join(tempfile.gettempdir(), 'facefusion') - - -def create_base_directory() -> bool: - base_directory_path = get_base_directory_path() - return create_directory(base_directory_path) - - -def clear_base_directory() -> bool: - base_directory_path = get_base_directory_path() - return remove_directory(base_directory_path) - - def get_temp_directory_path(file_path : str) -> str: temp_file_name, _ = os.path.splitext(os.path.basename(file_path)) - base_directory_path = get_base_directory_path() - return os.path.join(base_directory_path, temp_file_name) + return os.path.join(state_manager.get_item('temp_path'), 'facefusion', temp_file_name) def create_temp_directory(file_path : str) -> bool: @@ -58,3 +31,13 @@ def clear_temp_directory(file_path : str) -> bool: temp_directory_path = get_temp_directory_path(file_path) return remove_directory(temp_directory_path) return True + + +def get_temp_frame_paths(target_path : str) -> List[str]: + temp_frames_pattern = get_temp_frames_pattern(target_path, '*') + return resolve_file_pattern(temp_frames_pattern) + + +def get_temp_frames_pattern(target_path : str, temp_frame_prefix : str) -> str: + temp_directory_path = get_temp_directory_path(target_path) + return os.path.join(temp_directory_path, temp_frame_prefix + '.' + state_manager.get_item('temp_frame_format')) diff --git a/facefusion/typing.py b/facefusion/typing.py index 5ce43ad5..4b2f441a 100755 --- a/facefusion/typing.py +++ b/facefusion/typing.py @@ -67,6 +67,7 @@ Mel = NDArray[Any] MelFilterBank = NDArray[Any] Fps = float +Duration = float Padding = Tuple[int, int, int, int] Orientation = Literal['landscape', 'portrait'] Resolution = Tuple[int, int] @@ -84,7 +85,7 @@ ProcessStep = Callable[[str, int, Args], bool] Content = Dict[str, Any] -WarpTemplate = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128_v2', 'ffhq_512'] +WarpTemplate = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128_v2', 'dfl_whole_face', 'ffhq_512', 'mtcnn_512', 'styleganex_384'] WarpTemplateSet = Dict[WarpTemplate, NDArray[Any]] ProcessMode = Literal['output', 'preview', 'stream'] @@ -95,34 +96,28 @@ LogLevelSet = Dict[LogLevel, int] TableHeaders = List[str] TableContents = List[List[Any]] -VideoMemoryStrategy = Literal['strict', 'moderate', 'tolerant'] FaceDetectorModel = Literal['many', 'retinaface', 'scrfd', 'yoloface'] FaceLandmarkerModel = Literal['many', '2dfan4', 'peppa_wutz'] FaceDetectorSet = Dict[FaceDetectorModel, List[str]] FaceSelectorMode = Literal['many', 'one', 'reference'] FaceSelectorOrder = Literal['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best'] +FaceOccluderModel = Literal['xseg_1', 'xseg_2'] +FaceParserModel = Literal['bisenet_resnet_18', 'bisenet_resnet_34'] FaceMaskType = Literal['box', 'occlusion', 'region'] FaceMaskRegion = Literal['skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip'] -TempFrameFormat = Literal['jpg', 'png', 'bmp'] +FaceMaskRegionSet = Dict[FaceMaskRegion, int] +TempFrameFormat = Literal['bmp', 'jpg', 'png'] OutputAudioEncoder = Literal['aac', 'libmp3lame', 'libopus', 'libvorbis'] -OutputVideoEncoder = Literal['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc', 'h264_amf', 'hevc_amf', 'h264_videotoolbox', 'hevc_videotoolbox'] +OutputVideoEncoder = Literal['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc', 'h264_amf', 'hevc_amf','h264_qsv', 'hevc_qsv', 'h264_videotoolbox', 'hevc_videotoolbox'] OutputVideoPreset = Literal['ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow'] -Download = TypedDict('Download', -{ - 'url' : str, - 'path' : str -}) -DownloadSet = Dict[str, Download] - ModelOptions = Dict[str, Any] ModelSet = Dict[str, ModelOptions] ModelInitializer = NDArray[Any] -ExecutionProviderKey = Literal['cpu', 'coreml', 'cuda', 'directml', 'openvino', 'rocm', 'tensorrt'] +ExecutionProvider = Literal['cpu', 'coreml', 'cuda', 'directml', 'openvino', 'rocm', 'tensorrt'] ExecutionProviderValue = Literal['CPUExecutionProvider', 'CoreMLExecutionProvider', 'CUDAExecutionProvider', 'DmlExecutionProvider', 'OpenVINOExecutionProvider', 'ROCMExecutionProvider', 'TensorrtExecutionProvider'] -ExecutionProviderSet = Dict[ExecutionProviderKey, ExecutionProviderValue] - +ExecutionProviderSet = Dict[ExecutionProvider, ExecutionProviderValue] ValueAndUnit = TypedDict('ValueAndUnit', { 'value' : int, @@ -140,13 +135,18 @@ ExecutionDeviceProduct = TypedDict('ExecutionDeviceProduct', }) ExecutionDeviceVideoMemory = TypedDict('ExecutionDeviceVideoMemory', { - 'total' : ValueAndUnit, - 'free' : ValueAndUnit + 'total' : Optional[ValueAndUnit], + 'free' : Optional[ValueAndUnit] +}) +ExecutionDeviceTemperature = TypedDict('ExecutionDeviceTemperature', +{ + 'gpu' : Optional[ValueAndUnit], + 'memory' : Optional[ValueAndUnit] }) ExecutionDeviceUtilization = TypedDict('ExecutionDeviceUtilization', { - 'gpu' : ValueAndUnit, - 'memory' : ValueAndUnit + 'gpu' : Optional[ValueAndUnit], + 'memory' : Optional[ValueAndUnit] }) ExecutionDevice = TypedDict('ExecutionDevice', { @@ -154,9 +154,34 @@ ExecutionDevice = TypedDict('ExecutionDevice', 'framework' : ExecutionDeviceFramework, 'product' : ExecutionDeviceProduct, 'video_memory' : ExecutionDeviceVideoMemory, + 'temperature': ExecutionDeviceTemperature, 'utilization' : ExecutionDeviceUtilization }) +DownloadProvider = Literal['github', 'huggingface'] +DownloadProviderValue = TypedDict('DownloadProviderValue', +{ + 'url' : str, + 'path' : str +}) +DownloadProviderSet = Dict[DownloadProvider, DownloadProviderValue] +DownloadScope = Literal['lite', 'full'] +Download = TypedDict('Download', +{ + 'url' : str, + 'path' : str +}) +DownloadSet = Dict[str, Download] + +VideoMemoryStrategy = Literal['strict', 'moderate', 'tolerant'] + +File = TypedDict('File', +{ + 'name' : str, + 'extension' : str, + 'path': str +}) + AppContext = Literal['cli', 'ui'] InferencePool = Dict[str, InferenceSession] @@ -191,10 +216,14 @@ StateKey = Literal\ [ 'command', 'config_path', + 'temp_path', 'jobs_path', 'source_paths', 'target_path', 'output_path', + 'source_pattern', + 'target_pattern', + 'output_pattern', 'face_detector_model', 'face_detector_size', 'face_detector_angles', @@ -210,6 +239,8 @@ StateKey = Literal\ 'reference_face_position', 'reference_face_distance', 'reference_frame_number', + 'face_occluder_model', + 'face_parser_model', 'face_mask_types', 'face_mask_blur', 'face_mask_padding', @@ -235,9 +266,10 @@ StateKey = Literal\ 'execution_providers', 'execution_thread_count', 'execution_queue_count', + 'download_providers', + 'download_scope', 'video_memory_strategy', 'system_memory_limit', - 'skip_download', 'log_level', 'job_id', 'job_status', @@ -247,10 +279,14 @@ State = TypedDict('State', { 'command' : str, 'config_path' : str, + 'temp_path' : str, 'jobs_path' : str, 'source_paths' : List[str], 'target_path' : str, 'output_path' : str, + 'source_pattern' : str, + 'target_pattern' : str, + 'output_pattern' : str, 'face_detector_model' : FaceDetectorModel, 'face_detector_size' : str, 'face_detector_angles' : List[Angle], @@ -266,6 +302,8 @@ State = TypedDict('State', 'reference_face_position' : int, 'reference_face_distance' : float, 'reference_frame_number' : int, + 'face_occluder_model' : FaceOccluderModel, + 'face_parser_model' : FaceParserModel, 'face_mask_types' : List[FaceMaskType], 'face_mask_blur' : float, 'face_mask_padding' : Padding, @@ -288,12 +326,13 @@ State = TypedDict('State', 'ui_layouts' : List[str], 'ui_workflow' : UiWorkflow, 'execution_device_id' : str, - 'execution_providers' : List[ExecutionProviderKey], + 'execution_providers' : List[ExecutionProvider], 'execution_thread_count' : int, 'execution_queue_count' : int, + 'download_providers' : List[DownloadProvider], + 'download_scope' : DownloadScope, 'video_memory_strategy' : VideoMemoryStrategy, 'system_memory_limit' : int, - 'skip_download' : bool, 'log_level' : LogLevel, 'job_id' : str, 'job_status' : JobStatus, diff --git a/facefusion/uis/assets/overrides.css b/facefusion/uis/assets/overrides.css index 639c9870..b044ac62 100644 --- a/facefusion/uis/assets/overrides.css +++ b/facefusion/uis/assets/overrides.css @@ -6,7 +6,29 @@ :root:root:root:root input[type="number"] { - max-width: 6rem; + appearance: textfield; + border-radius: unset; + text-align: center; + order: 1; + padding: unset +} + +:root:root:root:root input[type="number"]::-webkit-inner-spin-button +{ + appearance: none; +} + +:root:root:root:root input[type="number"]:focus +{ + outline: unset; +} + +:root:root:root:root .reset-button +{ + background: var(--background-fill-secondary); + border: unset; + font-size: unset; + padding: unset; } :root:root:root:root [type="checkbox"], @@ -17,39 +39,25 @@ width: 1.125rem; } -:root:root:root:root input[type="range"], -:root:root:root:root .range-slider div +:root:root:root:root input[type="range"] { - height: 0.5rem; - border-radius: 0.5rem; + background: transparent; } :root:root:root:root input[type="range"]::-moz-range-thumb, :root:root:root:root input[type="range"]::-webkit-slider-thumb { background: var(--neutral-300); - border: unset; + box-shadow: unset; border-radius: 50%; height: 1.125rem; width: 1.125rem; } -:root:root:root:root input[type="range"]::-webkit-slider-thumb +:root:root:root:root .thumbnail-item { - margin-top: 0.375rem; -} - -:root:root:root:root .range-slider input[type="range"]::-webkit-slider-thumb -{ - margin-top: 0.125rem; -} - -:root:root:root:root .range-slider div, -:root:root:root:root .range-slider input[type="range"] -{ - bottom: 50%; - margin-top: -0.25rem; - top: 50%; + border: unset; + box-shadow: unset; } :root:root:root:root .grid-wrap.fixed-height @@ -57,27 +65,61 @@ min-height: unset; } -:root:root:root:root .generating, -:root:root:root:root .thumbnail-item +:root:root:root:root .tab-wrapper { - border: unset; + padding: 0 0.625rem; } +:root:root:root:root .tab-container +{ + gap: 0.5em; +} + +:root:root:root:root .tab-container button +{ + background: unset; + border-bottom: 0.125rem solid; +} + +:root:root:root:root .tab-container button.selected +{ + color: var(--primary-500) +} + +:root:root:root:root .toast-body +{ + background: white; + color: var(--primary-500); + border: unset; + border-radius: unset; +} + +:root:root:root:root .dark .toast-body +{ + background: var(--neutral-900); + color: var(--primary-600); +} + +:root:root:root:root .toast-icon, +:root:root:root:root .toast-title, +:root:root:root:root .toast-text, +:root:root:root:root .toast-close +{ + color: unset; +} + +:root:root:root:root .toast-body .timer +{ + background: currentColor; +} + +:root:root:root:root .slider_input_container > span, :root:root:root:root .feather-upload, :root:root:root:root footer { display: none; } -:root:root:root:root .tab-nav > button -{ - border: unset; - box-shadow: 0 0.125rem; - font-size: 1.125em; - margin: 0.5rem 0.75rem; - padding: unset; -} - :root:root:root:root .image-frame { width: 100%; @@ -94,3 +136,11 @@ top: 0; z-index: 100; } + +:root:root:root:root .block .error +{ + border: 0.125rem solid; + padding: 0.375rem 0.75rem; + font-size: 0.75rem; + text-transform: uppercase; +} diff --git a/facefusion/uis/choices.py b/facefusion/uis/choices.py index 3650365e..da0aa267 100644 --- a/facefusion/uis/choices.py +++ b/facefusion/uis/choices.py @@ -5,7 +5,7 @@ from facefusion.uis.typing import JobManagerAction, JobRunnerAction, WebcamMode job_manager_actions : List[JobManagerAction] = [ 'job-create', 'job-submit', 'job-delete', 'job-add-step', 'job-remix-step', 'job-insert-step', 'job-remove-step' ] job_runner_actions : List[JobRunnerAction] = [ 'job-run', 'job-run-all', 'job-retry', 'job-retry-all' ] -common_options : List[str] = [ 'keep-temp', 'skip-audio', 'skip-download' ] +common_options : List[str] = [ 'keep-temp', 'skip-audio' ] webcam_modes : List[WebcamMode] = [ 'inline', 'udp', 'v4l2' ] webcam_resolutions : List[str] = [ '320x240', '640x480', '800x600', '1024x768', '1280x720', '1280x960', '1920x1080', '2560x1440', '3840x2160' ] diff --git a/facefusion/uis/components/age_modifier_options.py b/facefusion/uis/components/age_modifier_options.py index 73b1de23..ef80b78a 100755 --- a/facefusion/uis/components/age_modifier_options.py +++ b/facefusion/uis/components/age_modifier_options.py @@ -17,11 +17,12 @@ def render() -> None: global AGE_MODIFIER_MODEL_DROPDOWN global AGE_MODIFIER_DIRECTION_SLIDER + has_age_modifier = 'age_modifier' in state_manager.get_item('processors') AGE_MODIFIER_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.age_modifier_model_dropdown'), choices = processors_choices.age_modifier_models, value = state_manager.get_item('age_modifier_model'), - visible = 'age_modifier' in state_manager.get_item('processors') + visible = has_age_modifier ) AGE_MODIFIER_DIRECTION_SLIDER = gradio.Slider( label = wording.get('uis.age_modifier_direction_slider'), @@ -29,7 +30,7 @@ def render() -> None: step = calc_float_step(processors_choices.age_modifier_direction_range), minimum = processors_choices.age_modifier_direction_range[0], maximum = processors_choices.age_modifier_direction_range[-1], - visible = 'age_modifier' in state_manager.get_item('processors') + visible = has_age_modifier ) register_ui_component('age_modifier_model_dropdown', AGE_MODIFIER_MODEL_DROPDOWN) register_ui_component('age_modifier_direction_slider', AGE_MODIFIER_DIRECTION_SLIDER) diff --git a/facefusion/uis/components/benchmark_options.py b/facefusion/uis/components/benchmark_options.py index 5b5cda02..c087487b 100644 --- a/facefusion/uis/components/benchmark_options.py +++ b/facefusion/uis/components/benchmark_options.py @@ -16,8 +16,8 @@ def render() -> None: BENCHMARK_RUNS_CHECKBOX_GROUP = gradio.CheckboxGroup( label = wording.get('uis.benchmark_runs_checkbox_group'), - value = list(BENCHMARKS.keys()), - choices = list(BENCHMARKS.keys()) + choices = list(BENCHMARKS.keys()), + value = list(BENCHMARKS.keys()) ) BENCHMARK_CYCLES_SLIDER = gradio.Slider( label = wording.get('uis.benchmark_cycles_slider'), diff --git a/facefusion/uis/components/common_options.py b/facefusion/uis/components/common_options.py index 0352ff34..b44b60c3 100644 --- a/facefusion/uis/components/common_options.py +++ b/facefusion/uis/components/common_options.py @@ -13,8 +13,6 @@ def render() -> None: common_options = [] - if state_manager.get_item('skip_download'): - common_options.append('skip-download') if state_manager.get_item('keep_temp'): common_options.append('keep-temp') if state_manager.get_item('skip_audio'): @@ -32,9 +30,7 @@ def listen() -> None: def update(common_options : List[str]) -> None: - skip_temp = 'skip-download' in common_options keep_temp = 'keep-temp' in common_options skip_audio = 'skip-audio' in common_options - state_manager.set_item('skip_download', skip_temp) state_manager.set_item('keep_temp', keep_temp) state_manager.set_item('skip_audio', skip_audio) diff --git a/facefusion/uis/components/deep_swapper_options.py b/facefusion/uis/components/deep_swapper_options.py new file mode 100755 index 00000000..21f34670 --- /dev/null +++ b/facefusion/uis/components/deep_swapper_options.py @@ -0,0 +1,65 @@ +from typing import List, Optional, Tuple + +import gradio + +from facefusion import state_manager, wording +from facefusion.common_helper import calc_int_step +from facefusion.processors import choices as processors_choices +from facefusion.processors.core import load_processor_module +from facefusion.processors.modules.deep_swapper import has_morph_input +from facefusion.processors.typing import DeepSwapperModel +from facefusion.uis.core import get_ui_component, register_ui_component + +DEEP_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None +DEEP_SWAPPER_MORPH_SLIDER : Optional[gradio.Slider] = None + + +def render() -> None: + global DEEP_SWAPPER_MODEL_DROPDOWN + global DEEP_SWAPPER_MORPH_SLIDER + + has_deep_swapper = 'deep_swapper' in state_manager.get_item('processors') + DEEP_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown( + label = wording.get('uis.deep_swapper_model_dropdown'), + choices = processors_choices.deep_swapper_models, + value = state_manager.get_item('deep_swapper_model'), + visible = has_deep_swapper + ) + DEEP_SWAPPER_MORPH_SLIDER = gradio.Slider( + label = wording.get('uis.deep_swapper_morph_slider'), + value = state_manager.get_item('deep_swapper_morph'), + step = calc_int_step(processors_choices.deep_swapper_morph_range), + minimum = processors_choices.deep_swapper_morph_range[0], + maximum = processors_choices.deep_swapper_morph_range[-1], + visible = has_deep_swapper and has_morph_input() + ) + register_ui_component('deep_swapper_model_dropdown', DEEP_SWAPPER_MODEL_DROPDOWN) + register_ui_component('deep_swapper_morph_slider', DEEP_SWAPPER_MORPH_SLIDER) + + +def listen() -> None: + DEEP_SWAPPER_MODEL_DROPDOWN.change(update_deep_swapper_model, inputs = DEEP_SWAPPER_MODEL_DROPDOWN, outputs = [ DEEP_SWAPPER_MODEL_DROPDOWN, DEEP_SWAPPER_MORPH_SLIDER ]) + DEEP_SWAPPER_MORPH_SLIDER.release(update_deep_swapper_morph, inputs = DEEP_SWAPPER_MORPH_SLIDER) + + processors_checkbox_group = get_ui_component('processors_checkbox_group') + if processors_checkbox_group: + processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [ DEEP_SWAPPER_MODEL_DROPDOWN, DEEP_SWAPPER_MORPH_SLIDER ]) + + +def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider]: + has_deep_swapper = 'deep_swapper' in processors + return gradio.Dropdown(visible = has_deep_swapper), gradio.Slider(visible = has_deep_swapper and has_morph_input()) + + +def update_deep_swapper_model(deep_swapper_model : DeepSwapperModel) -> Tuple[gradio.Dropdown, gradio.Slider]: + deep_swapper_module = load_processor_module('deep_swapper') + deep_swapper_module.clear_inference_pool() + state_manager.set_item('deep_swapper_model', deep_swapper_model) + + if deep_swapper_module.pre_check(): + return gradio.Dropdown(value = state_manager.get_item('deep_swapper_model')), gradio.Slider(visible = has_morph_input()) + return gradio.Dropdown(), gradio.Slider() + + +def update_deep_swapper_morph(deep_swapper_morph : int) -> None: + state_manager.set_item('deep_swapper_morph', deep_swapper_morph) diff --git a/facefusion/uis/components/download.py b/facefusion/uis/components/download.py new file mode 100644 index 00000000..7fa8fafb --- /dev/null +++ b/facefusion/uis/components/download.py @@ -0,0 +1,48 @@ +from typing import List, Optional + +import gradio + +import facefusion.choices +from facefusion import content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, state_manager, voice_extractor, wording +from facefusion.filesystem import list_directory +from facefusion.processors.core import get_processors_modules +from facefusion.typing import DownloadProvider + +DOWNLOAD_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None + + +def render() -> None: + global DOWNLOAD_PROVIDERS_CHECKBOX_GROUP + + DOWNLOAD_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup( + label = wording.get('uis.download_providers_checkbox_group'), + choices = facefusion.choices.download_providers, + value = state_manager.get_item('download_providers') + ) + + +def listen() -> None: + DOWNLOAD_PROVIDERS_CHECKBOX_GROUP.change(update_download_providers, inputs = DOWNLOAD_PROVIDERS_CHECKBOX_GROUP, outputs = DOWNLOAD_PROVIDERS_CHECKBOX_GROUP) + + +def update_download_providers(download_providers : List[DownloadProvider]) -> gradio.CheckboxGroup: + common_modules =\ + [ + content_analyser, + face_classifier, + face_detector, + face_landmarker, + face_recognizer, + face_masker, + voice_extractor + ] + available_processors = [ file.get('name') for file in list_directory('facefusion/processors/modules') ] + processor_modules = get_processors_modules(available_processors) + + for module in common_modules + processor_modules: + if hasattr(module, 'create_static_model_set'): + module.create_static_model_set.cache_clear() + + download_providers = download_providers or facefusion.choices.download_providers + state_manager.set_item('download_providers', download_providers) + return gradio.CheckboxGroup(value = state_manager.get_item('download_providers')) diff --git a/facefusion/uis/components/execution.py b/facefusion/uis/components/execution.py index baf1888c..53275ad3 100644 --- a/facefusion/uis/components/execution.py +++ b/facefusion/uis/components/execution.py @@ -3,9 +3,10 @@ from typing import List, Optional import gradio from facefusion import content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, state_manager, voice_extractor, wording -from facefusion.execution import get_execution_provider_choices -from facefusion.processors.core import clear_processors_modules -from facefusion.typing import ExecutionProviderKey +from facefusion.execution import get_available_execution_providers +from facefusion.filesystem import list_directory +from facefusion.processors.core import get_processors_modules +from facefusion.typing import ExecutionProvider EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None @@ -15,7 +16,7 @@ def render() -> None: EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup( label = wording.get('uis.execution_providers_checkbox_group'), - choices = get_execution_provider_choices(), + choices = get_available_execution_providers(), value = state_manager.get_item('execution_providers') ) @@ -24,15 +25,24 @@ def listen() -> None: EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP) -def update_execution_providers(execution_providers : List[ExecutionProviderKey]) -> gradio.CheckboxGroup: - content_analyser.clear_inference_pool() - face_classifier.clear_inference_pool() - face_detector.clear_inference_pool() - face_landmarker.clear_inference_pool() - face_masker.clear_inference_pool() - face_recognizer.clear_inference_pool() - voice_extractor.clear_inference_pool() - clear_processors_modules(state_manager.get_item('processors')) - execution_providers = execution_providers or get_execution_provider_choices() +def update_execution_providers(execution_providers : List[ExecutionProvider]) -> gradio.CheckboxGroup: + common_modules =\ + [ + content_analyser, + face_classifier, + face_detector, + face_landmarker, + face_masker, + face_recognizer, + voice_extractor + ] + available_processors = [ file.get('name') for file in list_directory('facefusion/processors/modules') ] + processor_modules = get_processors_modules(available_processors) + + for module in common_modules + processor_modules: + if hasattr(module, 'clear_inference_pool'): + module.clear_inference_pool() + + execution_providers = execution_providers or get_available_execution_providers() state_manager.set_item('execution_providers', execution_providers) return gradio.CheckboxGroup(value = state_manager.get_item('execution_providers')) diff --git a/facefusion/uis/components/expression_restorer_options.py b/facefusion/uis/components/expression_restorer_options.py index eaa48ecf..06bc3fa3 100755 --- a/facefusion/uis/components/expression_restorer_options.py +++ b/facefusion/uis/components/expression_restorer_options.py @@ -17,11 +17,12 @@ def render() -> None: global EXPRESSION_RESTORER_MODEL_DROPDOWN global EXPRESSION_RESTORER_FACTOR_SLIDER + has_expression_restorer = 'expression_restorer' in state_manager.get_item('processors') EXPRESSION_RESTORER_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.expression_restorer_model_dropdown'), choices = processors_choices.expression_restorer_models, value = state_manager.get_item('expression_restorer_model'), - visible = 'expression_restorer' in state_manager.get_item('processors') + visible = has_expression_restorer ) EXPRESSION_RESTORER_FACTOR_SLIDER = gradio.Slider( label = wording.get('uis.expression_restorer_factor_slider'), @@ -29,7 +30,7 @@ def render() -> None: step = calc_float_step(processors_choices.expression_restorer_factor_range), minimum = processors_choices.expression_restorer_factor_range[0], maximum = processors_choices.expression_restorer_factor_range[-1], - visible = 'expression_restorer' in state_manager.get_item('processors'), + visible = has_expression_restorer ) register_ui_component('expression_restorer_model_dropdown', EXPRESSION_RESTORER_MODEL_DROPDOWN) register_ui_component('expression_restorer_factor_slider', EXPRESSION_RESTORER_FACTOR_SLIDER) diff --git a/facefusion/uis/components/face_debugger_options.py b/facefusion/uis/components/face_debugger_options.py index 088b0877..4133638d 100755 --- a/facefusion/uis/components/face_debugger_options.py +++ b/facefusion/uis/components/face_debugger_options.py @@ -13,11 +13,12 @@ FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None def render() -> None: global FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP + has_face_debugger = 'face_debugger' in state_manager.get_item('processors') FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP = gradio.CheckboxGroup( label = wording.get('uis.face_debugger_items_checkbox_group'), choices = processors_choices.face_debugger_items, value = state_manager.get_item('face_debugger_items'), - visible = 'face_debugger' in state_manager.get_item('processors') + visible = has_face_debugger ) register_ui_component('face_debugger_items_checkbox_group', FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP) diff --git a/facefusion/uis/components/face_detector.py b/facefusion/uis/components/face_detector.py index eb0e20fc..1b1c30d7 100644 --- a/facefusion/uis/components/face_detector.py +++ b/facefusion/uis/components/face_detector.py @@ -3,7 +3,7 @@ from typing import Optional, Sequence, Tuple import gradio import facefusion.choices -from facefusion import choices, face_detector, state_manager, wording +from facefusion import face_detector, state_manager, wording from facefusion.common_helper import calc_float_step, get_last from facefusion.typing import Angle, FaceDetectorModel, Score from facefusion.uis.core import register_ui_component @@ -31,7 +31,7 @@ def render() -> None: with gradio.Row(): FACE_DETECTOR_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_detector_model_dropdown'), - choices = facefusion.choices.face_detector_set.keys(), + choices = facefusion.choices.face_detector_models, value = state_manager.get_item('face_detector_model') ) FACE_DETECTOR_SIZE_DROPDOWN = gradio.Dropdown(**face_detector_size_dropdown_options) @@ -65,7 +65,7 @@ def update_face_detector_model(face_detector_model : FaceDetectorModel) -> Tuple state_manager.set_item('face_detector_model', face_detector_model) if face_detector.pre_check(): - face_detector_size_choices = choices.face_detector_set.get(state_manager.get_item('face_detector_model')) + face_detector_size_choices = facefusion.choices.face_detector_set.get(state_manager.get_item('face_detector_model')) state_manager.set_item('face_detector_size', get_last(face_detector_size_choices)) return gradio.Dropdown(value = state_manager.get_item('face_detector_model')), gradio.Dropdown(value = state_manager.get_item('face_detector_size'), choices = face_detector_size_choices) return gradio.Dropdown(), gradio.Dropdown() diff --git a/facefusion/uis/components/face_editor_options.py b/facefusion/uis/components/face_editor_options.py index cf4efa0f..ef846aaf 100755 --- a/facefusion/uis/components/face_editor_options.py +++ b/facefusion/uis/components/face_editor_options.py @@ -43,11 +43,12 @@ def render() -> None: global FACE_EDITOR_HEAD_YAW_SLIDER global FACE_EDITOR_HEAD_ROLL_SLIDER + has_face_editor = 'face_editor' in state_manager.get_item('processors') FACE_EDITOR_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_editor_model_dropdown'), choices = processors_choices.face_editor_models, value = state_manager.get_item('face_editor_model'), - visible = 'face_editor' in state_manager.get_item('processors') + visible = has_face_editor ) FACE_EDITOR_EYEBROW_DIRECTION_SLIDER = gradio.Slider( label = wording.get('uis.face_editor_eyebrow_direction_slider'), @@ -55,7 +56,7 @@ def render() -> None: step = calc_float_step(processors_choices.face_editor_eyebrow_direction_range), minimum = processors_choices.face_editor_eyebrow_direction_range[0], maximum = processors_choices.face_editor_eyebrow_direction_range[-1], - visible = 'face_editor' in state_manager.get_item('processors'), + visible = has_face_editor ) FACE_EDITOR_EYE_GAZE_HORIZONTAL_SLIDER = gradio.Slider( label = wording.get('uis.face_editor_eye_gaze_horizontal_slider'), @@ -63,7 +64,7 @@ def render() -> None: step = calc_float_step(processors_choices.face_editor_eye_gaze_horizontal_range), minimum = processors_choices.face_editor_eye_gaze_horizontal_range[0], maximum = processors_choices.face_editor_eye_gaze_horizontal_range[-1], - visible = 'face_editor' in state_manager.get_item('processors'), + visible = has_face_editor ) FACE_EDITOR_EYE_GAZE_VERTICAL_SLIDER = gradio.Slider( label = wording.get('uis.face_editor_eye_gaze_vertical_slider'), @@ -71,7 +72,7 @@ def render() -> None: step = calc_float_step(processors_choices.face_editor_eye_gaze_vertical_range), minimum = processors_choices.face_editor_eye_gaze_vertical_range[0], maximum = processors_choices.face_editor_eye_gaze_vertical_range[-1], - visible = 'face_editor' in state_manager.get_item('processors'), + visible = has_face_editor ) FACE_EDITOR_EYE_OPEN_RATIO_SLIDER = gradio.Slider( label = wording.get('uis.face_editor_eye_open_ratio_slider'), @@ -79,7 +80,7 @@ def render() -> None: step = calc_float_step(processors_choices.face_editor_eye_open_ratio_range), minimum = processors_choices.face_editor_eye_open_ratio_range[0], maximum = processors_choices.face_editor_eye_open_ratio_range[-1], - visible = 'face_editor' in state_manager.get_item('processors'), + visible = has_face_editor ) FACE_EDITOR_LIP_OPEN_RATIO_SLIDER = gradio.Slider( label = wording.get('uis.face_editor_lip_open_ratio_slider'), @@ -87,7 +88,7 @@ def render() -> None: step = calc_float_step(processors_choices.face_editor_lip_open_ratio_range), minimum = processors_choices.face_editor_lip_open_ratio_range[0], maximum = processors_choices.face_editor_lip_open_ratio_range[-1], - visible = 'face_editor' in state_manager.get_item('processors'), + visible = has_face_editor ) FACE_EDITOR_MOUTH_GRIM_SLIDER = gradio.Slider( label = wording.get('uis.face_editor_mouth_grim_slider'), @@ -95,7 +96,7 @@ def render() -> None: step = calc_float_step(processors_choices.face_editor_mouth_grim_range), minimum = processors_choices.face_editor_mouth_grim_range[0], maximum = processors_choices.face_editor_mouth_grim_range[-1], - visible = 'face_editor' in state_manager.get_item('processors'), + visible = has_face_editor ) FACE_EDITOR_MOUTH_POUT_SLIDER = gradio.Slider( label = wording.get('uis.face_editor_mouth_pout_slider'), @@ -103,7 +104,7 @@ def render() -> None: step = calc_float_step(processors_choices.face_editor_mouth_pout_range), minimum = processors_choices.face_editor_mouth_pout_range[0], maximum = processors_choices.face_editor_mouth_pout_range[-1], - visible = 'face_editor' in state_manager.get_item('processors'), + visible = has_face_editor ) FACE_EDITOR_MOUTH_PURSE_SLIDER = gradio.Slider( label = wording.get('uis.face_editor_mouth_purse_slider'), @@ -111,7 +112,7 @@ def render() -> None: step = calc_float_step(processors_choices.face_editor_mouth_purse_range), minimum = processors_choices.face_editor_mouth_purse_range[0], maximum = processors_choices.face_editor_mouth_purse_range[-1], - visible = 'face_editor' in state_manager.get_item('processors'), + visible = has_face_editor ) FACE_EDITOR_MOUTH_SMILE_SLIDER = gradio.Slider( label = wording.get('uis.face_editor_mouth_smile_slider'), @@ -119,7 +120,7 @@ def render() -> None: step = calc_float_step(processors_choices.face_editor_mouth_smile_range), minimum = processors_choices.face_editor_mouth_smile_range[0], maximum = processors_choices.face_editor_mouth_smile_range[-1], - visible = 'face_editor' in state_manager.get_item('processors'), + visible = has_face_editor ) FACE_EDITOR_MOUTH_POSITION_HORIZONTAL_SLIDER = gradio.Slider( label = wording.get('uis.face_editor_mouth_position_horizontal_slider'), @@ -127,7 +128,7 @@ def render() -> None: step = calc_float_step(processors_choices.face_editor_mouth_position_horizontal_range), minimum = processors_choices.face_editor_mouth_position_horizontal_range[0], maximum = processors_choices.face_editor_mouth_position_horizontal_range[-1], - visible = 'face_editor' in state_manager.get_item('processors'), + visible = has_face_editor ) FACE_EDITOR_MOUTH_POSITION_VERTICAL_SLIDER = gradio.Slider( label = wording.get('uis.face_editor_mouth_position_vertical_slider'), @@ -135,7 +136,7 @@ def render() -> None: step = calc_float_step(processors_choices.face_editor_mouth_position_vertical_range), minimum = processors_choices.face_editor_mouth_position_vertical_range[0], maximum = processors_choices.face_editor_mouth_position_vertical_range[-1], - visible = 'face_editor' in state_manager.get_item('processors'), + visible = has_face_editor ) FACE_EDITOR_HEAD_PITCH_SLIDER = gradio.Slider( label = wording.get('uis.face_editor_head_pitch_slider'), @@ -143,7 +144,7 @@ def render() -> None: step = calc_float_step(processors_choices.face_editor_head_pitch_range), minimum = processors_choices.face_editor_head_pitch_range[0], maximum = processors_choices.face_editor_head_pitch_range[-1], - visible = 'face_editor' in state_manager.get_item('processors'), + visible = has_face_editor ) FACE_EDITOR_HEAD_YAW_SLIDER = gradio.Slider( label = wording.get('uis.face_editor_head_yaw_slider'), @@ -151,7 +152,7 @@ def render() -> None: step = calc_float_step(processors_choices.face_editor_head_yaw_range), minimum = processors_choices.face_editor_head_yaw_range[0], maximum = processors_choices.face_editor_head_yaw_range[-1], - visible = 'face_editor' in state_manager.get_item('processors'), + visible = has_face_editor ) FACE_EDITOR_HEAD_ROLL_SLIDER = gradio.Slider( label = wording.get('uis.face_editor_head_roll_slider'), @@ -159,7 +160,7 @@ def render() -> None: step = calc_float_step(processors_choices.face_editor_head_roll_range), minimum = processors_choices.face_editor_head_roll_range[0], maximum = processors_choices.face_editor_head_roll_range[-1], - visible = 'face_editor' in state_manager.get_item('processors'), + visible = has_face_editor ) register_ui_component('face_editor_model_dropdown', FACE_EDITOR_MODEL_DROPDOWN) register_ui_component('face_editor_eyebrow_direction_slider', FACE_EDITOR_EYEBROW_DIRECTION_SLIDER) diff --git a/facefusion/uis/components/face_enhancer_options.py b/facefusion/uis/components/face_enhancer_options.py index 5ce4c11a..54a06cee 100755 --- a/facefusion/uis/components/face_enhancer_options.py +++ b/facefusion/uis/components/face_enhancer_options.py @@ -3,25 +3,29 @@ from typing import List, Optional, Tuple import gradio from facefusion import state_manager, wording -from facefusion.common_helper import calc_int_step +from facefusion.common_helper import calc_float_step, calc_int_step from facefusion.processors import choices as processors_choices from facefusion.processors.core import load_processor_module +from facefusion.processors.modules.face_enhancer import has_weight_input from facefusion.processors.typing import FaceEnhancerModel from facefusion.uis.core import get_ui_component, register_ui_component FACE_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FACE_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None +FACE_ENHANCER_WEIGHT_SLIDER : Optional[gradio.Slider] = None def render() -> None: global FACE_ENHANCER_MODEL_DROPDOWN global FACE_ENHANCER_BLEND_SLIDER + global FACE_ENHANCER_WEIGHT_SLIDER + has_face_enhancer = 'face_enhancer' in state_manager.get_item('processors') FACE_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_enhancer_model_dropdown'), choices = processors_choices.face_enhancer_models, value = state_manager.get_item('face_enhancer_model'), - visible = 'face_enhancer' in state_manager.get_item('processors') + visible = has_face_enhancer ) FACE_ENHANCER_BLEND_SLIDER = gradio.Slider( label = wording.get('uis.face_enhancer_blend_slider'), @@ -29,35 +33,50 @@ def render() -> None: step = calc_int_step(processors_choices.face_enhancer_blend_range), minimum = processors_choices.face_enhancer_blend_range[0], maximum = processors_choices.face_enhancer_blend_range[-1], - visible = 'face_enhancer' in state_manager.get_item('processors') + visible = has_face_enhancer + ) + FACE_ENHANCER_WEIGHT_SLIDER = gradio.Slider( + label = wording.get('uis.face_enhancer_weight_slider'), + value = state_manager.get_item('face_enhancer_weight'), + step = calc_float_step(processors_choices.face_enhancer_weight_range), + minimum = processors_choices.face_enhancer_weight_range[0], + maximum = processors_choices.face_enhancer_weight_range[-1], + visible = has_face_enhancer and has_weight_input() ) register_ui_component('face_enhancer_model_dropdown', FACE_ENHANCER_MODEL_DROPDOWN) register_ui_component('face_enhancer_blend_slider', FACE_ENHANCER_BLEND_SLIDER) + register_ui_component('face_enhancer_weight_slider', FACE_ENHANCER_WEIGHT_SLIDER) def listen() -> None: - FACE_ENHANCER_MODEL_DROPDOWN.change(update_face_enhancer_model, inputs = FACE_ENHANCER_MODEL_DROPDOWN, outputs = FACE_ENHANCER_MODEL_DROPDOWN) + FACE_ENHANCER_MODEL_DROPDOWN.change(update_face_enhancer_model, inputs = FACE_ENHANCER_MODEL_DROPDOWN, outputs = [ FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_WEIGHT_SLIDER ]) FACE_ENHANCER_BLEND_SLIDER.release(update_face_enhancer_blend, inputs = FACE_ENHANCER_BLEND_SLIDER) + FACE_ENHANCER_WEIGHT_SLIDER.release(update_face_enhancer_weight, inputs = FACE_ENHANCER_WEIGHT_SLIDER) processors_checkbox_group = get_ui_component('processors_checkbox_group') if processors_checkbox_group: - processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [ FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER ]) + processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [ FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FACE_ENHANCER_WEIGHT_SLIDER ]) -def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider]: +def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider, gradio.Slider]: has_face_enhancer = 'face_enhancer' in processors - return gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer) + return gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer and has_weight_input()) -def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> gradio.Dropdown: +def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> Tuple[gradio.Dropdown, gradio.Slider]: face_enhancer_module = load_processor_module('face_enhancer') face_enhancer_module.clear_inference_pool() state_manager.set_item('face_enhancer_model', face_enhancer_model) if face_enhancer_module.pre_check(): - return gradio.Dropdown(value = state_manager.get_item('face_enhancer_model')) - return gradio.Dropdown() + return gradio.Dropdown(value = state_manager.get_item('face_enhancer_model')), gradio.Slider(visible = has_weight_input()) + return gradio.Dropdown(), gradio.Slider() def update_face_enhancer_blend(face_enhancer_blend : float) -> None: state_manager.set_item('face_enhancer_blend', int(face_enhancer_blend)) + + +def update_face_enhancer_weight(face_enhancer_weight : float) -> None: + state_manager.set_item('face_enhancer_weight', face_enhancer_weight) + diff --git a/facefusion/uis/components/face_masker.py b/facefusion/uis/components/face_masker.py index 46571c5b..fcb830ae 100755 --- a/facefusion/uis/components/face_masker.py +++ b/facefusion/uis/components/face_masker.py @@ -3,11 +3,13 @@ from typing import List, Optional, Tuple import gradio import facefusion.choices -from facefusion import state_manager, wording +from facefusion import face_masker, state_manager, wording from facefusion.common_helper import calc_float_step, calc_int_step -from facefusion.typing import FaceMaskRegion, FaceMaskType +from facefusion.typing import FaceMaskRegion, FaceMaskType, FaceOccluderModel, FaceParserModel from facefusion.uis.core import register_ui_component +FACE_OCCLUDER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None +FACE_PARSER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FACE_MASK_TYPES_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None FACE_MASK_REGIONS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None FACE_MASK_BLUR_SLIDER : Optional[gradio.Slider] = None @@ -18,6 +20,8 @@ FACE_MASK_PADDING_LEFT_SLIDER : Optional[gradio.Slider] = None def render() -> None: + global FACE_OCCLUDER_MODEL_DROPDOWN + global FACE_PARSER_MODEL_DROPDOWN global FACE_MASK_TYPES_CHECKBOX_GROUP global FACE_MASK_REGIONS_CHECKBOX_GROUP global FACE_MASK_BLUR_SLIDER @@ -28,6 +32,17 @@ def render() -> None: has_box_mask = 'box' in state_manager.get_item('face_mask_types') has_region_mask = 'region' in state_manager.get_item('face_mask_types') + with gradio.Row(): + FACE_OCCLUDER_MODEL_DROPDOWN = gradio.Dropdown( + label = wording.get('uis.face_occluder_model_dropdown'), + choices = facefusion.choices.face_occluder_models, + value = state_manager.get_item('face_occluder_model') + ) + FACE_PARSER_MODEL_DROPDOWN = gradio.Dropdown( + label = wording.get('uis.face_parser_model_dropdown'), + choices = facefusion.choices.face_parser_models, + value = state_manager.get_item('face_parser_model') + ) FACE_MASK_TYPES_CHECKBOX_GROUP = gradio.CheckboxGroup( label = wording.get('uis.face_mask_types_checkbox_group'), choices = facefusion.choices.face_mask_types, @@ -82,6 +97,8 @@ def render() -> None: value = state_manager.get_item('face_mask_padding')[3], visible = has_box_mask ) + register_ui_component('face_occluder_model_dropdown', FACE_OCCLUDER_MODEL_DROPDOWN) + register_ui_component('face_parser_model_dropdown', FACE_PARSER_MODEL_DROPDOWN) register_ui_component('face_mask_types_checkbox_group', FACE_MASK_TYPES_CHECKBOX_GROUP) register_ui_component('face_mask_regions_checkbox_group', FACE_MASK_REGIONS_CHECKBOX_GROUP) register_ui_component('face_mask_blur_slider', FACE_MASK_BLUR_SLIDER) @@ -92,6 +109,8 @@ def render() -> None: def listen() -> None: + FACE_OCCLUDER_MODEL_DROPDOWN.change(update_face_occluder_model, inputs = FACE_OCCLUDER_MODEL_DROPDOWN) + FACE_PARSER_MODEL_DROPDOWN.change(update_face_parser_model, inputs = FACE_PARSER_MODEL_DROPDOWN) FACE_MASK_TYPES_CHECKBOX_GROUP.change(update_face_mask_types, inputs = FACE_MASK_TYPES_CHECKBOX_GROUP, outputs = [ FACE_MASK_TYPES_CHECKBOX_GROUP, FACE_MASK_REGIONS_CHECKBOX_GROUP, FACE_MASK_BLUR_SLIDER, FACE_MASK_PADDING_TOP_SLIDER, FACE_MASK_PADDING_RIGHT_SLIDER, FACE_MASK_PADDING_BOTTOM_SLIDER, FACE_MASK_PADDING_LEFT_SLIDER ]) FACE_MASK_REGIONS_CHECKBOX_GROUP.change(update_face_mask_regions, inputs = FACE_MASK_REGIONS_CHECKBOX_GROUP, outputs = FACE_MASK_REGIONS_CHECKBOX_GROUP) FACE_MASK_BLUR_SLIDER.release(update_face_mask_blur, inputs = FACE_MASK_BLUR_SLIDER) @@ -100,6 +119,24 @@ def listen() -> None: face_mask_padding_slider.release(update_face_mask_padding, inputs = face_mask_padding_sliders) +def update_face_occluder_model(face_occluder_model : FaceOccluderModel) -> gradio.Dropdown: + face_masker.clear_inference_pool() + state_manager.set_item('face_occluder_model', face_occluder_model) + + if face_masker.pre_check(): + return gradio.Dropdown(value = state_manager.get_item('face_occluder_model')) + return gradio.Dropdown() + + +def update_face_parser_model(face_parser_model : FaceParserModel) -> gradio.Dropdown: + face_masker.clear_inference_pool() + state_manager.set_item('face_parser_model', face_parser_model) + + if face_masker.pre_check(): + return gradio.Dropdown(value = state_manager.get_item('face_parser_model')) + return gradio.Dropdown() + + def update_face_mask_types(face_mask_types : List[FaceMaskType]) -> Tuple[gradio.CheckboxGroup, gradio.CheckboxGroup, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider, gradio.Slider]: face_mask_types = face_mask_types or facefusion.choices.face_mask_types state_manager.set_item('face_mask_types', face_mask_types) diff --git a/facefusion/uis/components/face_swapper_options.py b/facefusion/uis/components/face_swapper_options.py index 7eb4b713..049cf16c 100755 --- a/facefusion/uis/components/face_swapper_options.py +++ b/facefusion/uis/components/face_swapper_options.py @@ -17,17 +17,18 @@ def render() -> None: global FACE_SWAPPER_MODEL_DROPDOWN global FACE_SWAPPER_PIXEL_BOOST_DROPDOWN + has_face_swapper = 'face_swapper' in state_manager.get_item('processors') FACE_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_swapper_model_dropdown'), - choices = processors_choices.face_swapper_set.keys(), + choices = processors_choices.face_swapper_models, value = state_manager.get_item('face_swapper_model'), - visible = 'face_swapper' in state_manager.get_item('processors') + visible = has_face_swapper ) FACE_SWAPPER_PIXEL_BOOST_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_swapper_pixel_boost_dropdown'), choices = processors_choices.face_swapper_set.get(state_manager.get_item('face_swapper_model')), value = state_manager.get_item('face_swapper_pixel_boost'), - visible = 'face_swapper' in state_manager.get_item('processors') + visible = has_face_swapper ) register_ui_component('face_swapper_model_dropdown', FACE_SWAPPER_MODEL_DROPDOWN) register_ui_component('face_swapper_pixel_boost_dropdown', FACE_SWAPPER_PIXEL_BOOST_DROPDOWN) diff --git a/facefusion/uis/components/frame_colorizer_options.py b/facefusion/uis/components/frame_colorizer_options.py index 350d3e2e..5d7ed802 100755 --- a/facefusion/uis/components/frame_colorizer_options.py +++ b/facefusion/uis/components/frame_colorizer_options.py @@ -19,17 +19,18 @@ def render() -> None: global FRAME_COLORIZER_SIZE_DROPDOWN global FRAME_COLORIZER_BLEND_SLIDER + has_frame_colorizer = 'frame_colorizer' in state_manager.get_item('processors') FRAME_COLORIZER_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.frame_colorizer_model_dropdown'), choices = processors_choices.frame_colorizer_models, value = state_manager.get_item('frame_colorizer_model'), - visible = 'frame_colorizer' in state_manager.get_item('processors') + visible = has_frame_colorizer ) FRAME_COLORIZER_SIZE_DROPDOWN = gradio.Dropdown( label = wording.get('uis.frame_colorizer_size_dropdown'), choices = processors_choices.frame_colorizer_sizes, value = state_manager.get_item('frame_colorizer_size'), - visible = 'frame_colorizer' in state_manager.get_item('processors') + visible = has_frame_colorizer ) FRAME_COLORIZER_BLEND_SLIDER = gradio.Slider( label = wording.get('uis.frame_colorizer_blend_slider'), @@ -37,7 +38,7 @@ def render() -> None: step = calc_int_step(processors_choices.frame_colorizer_blend_range), minimum = processors_choices.frame_colorizer_blend_range[0], maximum = processors_choices.frame_colorizer_blend_range[-1], - visible = 'frame_colorizer' in state_manager.get_item('processors') + visible = has_frame_colorizer ) register_ui_component('frame_colorizer_model_dropdown', FRAME_COLORIZER_MODEL_DROPDOWN) register_ui_component('frame_colorizer_size_dropdown', FRAME_COLORIZER_SIZE_DROPDOWN) diff --git a/facefusion/uis/components/frame_enhancer_options.py b/facefusion/uis/components/frame_enhancer_options.py index f8629ef7..99547ed2 100755 --- a/facefusion/uis/components/frame_enhancer_options.py +++ b/facefusion/uis/components/frame_enhancer_options.py @@ -17,11 +17,12 @@ def render() -> None: global FRAME_ENHANCER_MODEL_DROPDOWN global FRAME_ENHANCER_BLEND_SLIDER + has_frame_enhancer = 'frame_enhancer' in state_manager.get_item('processors') FRAME_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.frame_enhancer_model_dropdown'), choices = processors_choices.frame_enhancer_models, value = state_manager.get_item('frame_enhancer_model'), - visible = 'frame_enhancer' in state_manager.get_item('processors') + visible = has_frame_enhancer ) FRAME_ENHANCER_BLEND_SLIDER = gradio.Slider( label = wording.get('uis.frame_enhancer_blend_slider'), @@ -29,7 +30,7 @@ def render() -> None: step = calc_int_step(processors_choices.frame_enhancer_blend_range), minimum = processors_choices.frame_enhancer_blend_range[0], maximum = processors_choices.frame_enhancer_blend_range[-1], - visible = 'frame_enhancer' in state_manager.get_item('processors') + visible = has_frame_enhancer ) register_ui_component('frame_enhancer_model_dropdown', FRAME_ENHANCER_MODEL_DROPDOWN) register_ui_component('frame_enhancer_blend_slider', FRAME_ENHANCER_BLEND_SLIDER) diff --git a/facefusion/uis/components/lip_syncer_options.py b/facefusion/uis/components/lip_syncer_options.py index 7c195d93..16b0c0ff 100755 --- a/facefusion/uis/components/lip_syncer_options.py +++ b/facefusion/uis/components/lip_syncer_options.py @@ -14,11 +14,12 @@ LIP_SYNCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None def render() -> None: global LIP_SYNCER_MODEL_DROPDOWN + has_lip_syncer = 'lip_syncer' in state_manager.get_item('processors') LIP_SYNCER_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.lip_syncer_model_dropdown'), choices = processors_choices.lip_syncer_models, value = state_manager.get_item('lip_syncer_model'), - visible = 'lip_syncer' in state_manager.get_item('processors') + visible = has_lip_syncer ) register_ui_component('lip_syncer_model_dropdown', LIP_SYNCER_MODEL_DROPDOWN) diff --git a/facefusion/uis/components/preview.py b/facefusion/uis/components/preview.py index 53835bbe..a38338f7 100755 --- a/facefusion/uis/components/preview.py +++ b/facefusion/uis/components/preview.py @@ -11,6 +11,7 @@ from facefusion.common_helper import get_first from facefusion.content_analyser import analyse_frame from facefusion.core import conditional_append_reference_faces from facefusion.face_analyser import get_average_face, get_many_faces +from facefusion.face_selector import sort_faces_by_order from facefusion.face_store import clear_reference_faces, clear_static_faces, get_reference_faces from facefusion.filesystem import filter_audio_paths, is_image, is_video from facefusion.processors.core import get_processors_modules @@ -74,7 +75,7 @@ def render() -> None: def listen() -> None: PREVIEW_FRAME_SLIDER.release(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE, show_progress = 'hidden') - PREVIEW_FRAME_SLIDER.change(slide_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE, show_progress = 'hidden') + PREVIEW_FRAME_SLIDER.change(slide_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE, show_progress = 'hidden', trigger_mode = 'once') reference_face_position_gallery = get_ui_component('reference_face_position_gallery') if reference_face_position_gallery: @@ -110,6 +111,7 @@ def listen() -> None: for ui_component in get_ui_components( [ 'age_modifier_direction_slider', + 'deep_swapper_morph_slider', 'expression_restorer_factor_slider', 'face_editor_eyebrow_direction_slider', 'face_editor_eye_gaze_horizontal_slider', @@ -126,6 +128,7 @@ def listen() -> None: 'face_editor_head_yaw_slider', 'face_editor_head_roll_slider', 'face_enhancer_blend_slider', + 'face_enhancer_weight_slider', 'frame_colorizer_blend_slider', 'frame_enhancer_blend_slider', 'reference_face_distance_slider', @@ -142,6 +145,7 @@ def listen() -> None: for ui_component in get_ui_components( [ 'age_modifier_model_dropdown', + 'deep_swapper_model_dropdown', 'expression_restorer_model_dropdown', 'processors_checkbox_group', 'face_editor_model_dropdown', @@ -158,7 +162,9 @@ def listen() -> None: 'face_detector_model_dropdown', 'face_detector_size_dropdown', 'face_detector_angles_checkbox_group', - 'face_landmarker_model_dropdown' + 'face_landmarker_model_dropdown', + 'face_occluder_model_dropdown', + 'face_parser_model_dropdown' ]): ui_component.change(clear_and_update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) @@ -190,7 +196,13 @@ def update_preview_image(frame_number : int = 0) -> gradio.Image: conditional_append_reference_faces() reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None source_frames = read_static_images(state_manager.get_item('source_paths')) - source_faces = get_many_faces(source_frames) + source_faces = [] + + for source_frame in source_frames: + temp_faces = get_many_faces([ source_frame ]) + temp_faces = sort_faces_by_order(temp_faces, 'large-small') + if temp_faces: + source_faces.append(get_first(temp_faces)) source_face = get_average_face(source_faces) source_audio_path = get_first(filter_audio_paths(state_manager.get_item('source_paths'))) source_audio_frame = create_empty_audio_frame() diff --git a/facefusion/uis/components/processors.py b/facefusion/uis/components/processors.py index 567f3e11..990006ff 100644 --- a/facefusion/uis/components/processors.py +++ b/facefusion/uis/components/processors.py @@ -4,7 +4,7 @@ import gradio from facefusion import state_manager, wording from facefusion.filesystem import list_directory -from facefusion.processors.core import clear_processors_modules, get_processors_modules +from facefusion.processors.core import get_processors_modules from facefusion.uis.core import register_ui_component PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None @@ -26,15 +26,18 @@ def listen() -> None: def update_processors(processors : List[str]) -> gradio.CheckboxGroup: - clear_processors_modules(state_manager.get_item('processors')) - state_manager.set_item('processors', processors) - for processor_module in get_processors_modules(state_manager.get_item('processors')): + if hasattr(processor_module, 'clear_inference_pool'): + processor_module.clear_inference_pool() + + for processor_module in get_processors_modules(processors): if not processor_module.pre_check(): return gradio.CheckboxGroup() + + state_manager.set_item('processors', processors) return gradio.CheckboxGroup(value = state_manager.get_item('processors'), choices = sort_processors(state_manager.get_item('processors'))) def sort_processors(processors : List[str]) -> List[str]: - available_processors = list_directory('facefusion/processors/modules') + available_processors = [ file.get('name') for file in list_directory('facefusion/processors/modules') ] return sorted(available_processors, key = lambda processor : processors.index(processor) if processor in processors else len(processors)) diff --git a/facefusion/uis/components/terminal.py b/facefusion/uis/components/terminal.py index bbe2a35b..bf57691d 100644 --- a/facefusion/uis/components/terminal.py +++ b/facefusion/uis/components/terminal.py @@ -7,8 +7,8 @@ from typing import Optional import gradio from tqdm import tqdm +import facefusion.choices from facefusion import logger, state_manager, wording -from facefusion.choices import log_level_set from facefusion.typing import LogLevel LOG_LEVEL_DROPDOWN : Optional[gradio.Dropdown] = None @@ -24,7 +24,7 @@ def render() -> None: LOG_LEVEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.log_level_dropdown'), - choices = log_level_set.keys(), + choices = facefusion.choices.log_levels, value = state_manager.get_item('log_level') ) TERMINAL_TEXTBOX = gradio.Textbox( diff --git a/facefusion/uis/components/webcam.py b/facefusion/uis/components/webcam.py index cc3fcebf..46ba38db 100644 --- a/facefusion/uis/components/webcam.py +++ b/facefusion/uis/components/webcam.py @@ -2,7 +2,7 @@ import os import subprocess from collections import deque from concurrent.futures import ThreadPoolExecutor -from typing import Deque, Generator, Optional +from typing import Deque, Generator, List, Optional import cv2 import gradio @@ -10,7 +10,7 @@ from tqdm import tqdm from facefusion import logger, state_manager, wording from facefusion.audio import create_empty_audio_frame -from facefusion.common_helper import is_windows +from facefusion.common_helper import get_first, is_windows from facefusion.content_analyser import analyse_stream from facefusion.face_analyser import get_average_face, get_many_faces from facefusion.ffmpeg import open_ffmpeg @@ -27,14 +27,17 @@ WEBCAM_START_BUTTON : Optional[gradio.Button] = None WEBCAM_STOP_BUTTON : Optional[gradio.Button] = None -def get_webcam_capture() -> Optional[cv2.VideoCapture]: +def get_webcam_capture(webcam_device_id : int) -> Optional[cv2.VideoCapture]: global WEBCAM_CAPTURE if WEBCAM_CAPTURE is None: + cv2.setLogLevel(0) if is_windows(): - webcam_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW) + webcam_capture = cv2.VideoCapture(webcam_device_id, cv2.CAP_DSHOW) else: - webcam_capture = cv2.VideoCapture(0) + webcam_capture = cv2.VideoCapture(webcam_device_id) + cv2.setLogLevel(3) + if webcam_capture and webcam_capture.isOpened(): WEBCAM_CAPTURE = webcam_capture return WEBCAM_CAPTURE @@ -43,7 +46,7 @@ def get_webcam_capture() -> Optional[cv2.VideoCapture]: def clear_webcam_capture() -> None: global WEBCAM_CAPTURE - if WEBCAM_CAPTURE: + if WEBCAM_CAPTURE and WEBCAM_CAPTURE.isOpened(): WEBCAM_CAPTURE.release() WEBCAM_CAPTURE = None @@ -68,32 +71,42 @@ def render() -> None: def listen() -> None: + webcam_device_id_dropdown = get_ui_component('webcam_device_id_dropdown') webcam_mode_radio = get_ui_component('webcam_mode_radio') webcam_resolution_dropdown = get_ui_component('webcam_resolution_dropdown') webcam_fps_slider = get_ui_component('webcam_fps_slider') + source_image = get_ui_component('source_image') - if webcam_mode_radio and webcam_resolution_dropdown and webcam_fps_slider: - start_event = WEBCAM_START_BUTTON.click(start, inputs = [ webcam_mode_radio, webcam_resolution_dropdown, webcam_fps_slider ], outputs = WEBCAM_IMAGE) - WEBCAM_STOP_BUTTON.click(stop, cancels = start_event) + if webcam_device_id_dropdown and webcam_mode_radio and webcam_resolution_dropdown and webcam_fps_slider: + start_event = WEBCAM_START_BUTTON.click(start, inputs = [ webcam_device_id_dropdown, webcam_mode_radio, webcam_resolution_dropdown, webcam_fps_slider ], outputs = WEBCAM_IMAGE) + WEBCAM_STOP_BUTTON.click(stop, cancels = start_event, outputs = WEBCAM_IMAGE) + + if source_image: + source_image.change(stop, cancels = start_event, outputs = WEBCAM_IMAGE) -def start(webcam_mode : WebcamMode, webcam_resolution : str, webcam_fps : Fps) -> Generator[VisionFrame, None, None]: +def start(webcam_device_id : int, webcam_mode : WebcamMode, webcam_resolution : str, webcam_fps : Fps) -> Generator[VisionFrame, None, None]: state_manager.set_item('face_selector_mode', 'one') source_image_paths = filter_image_paths(state_manager.get_item('source_paths')) source_frames = read_static_images(source_image_paths) source_faces = get_many_faces(source_frames) source_face = get_average_face(source_faces) stream = None + webcam_capture = None if webcam_mode in [ 'udp', 'v4l2' ]: stream = open_stream(webcam_mode, webcam_resolution, webcam_fps) #type:ignore[arg-type] webcam_width, webcam_height = unpack_resolution(webcam_resolution) - webcam_capture = get_webcam_capture() + + if isinstance(webcam_device_id, int): + webcam_capture = get_webcam_capture(webcam_device_id) + if webcam_capture and webcam_capture.isOpened(): webcam_capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) #type:ignore[attr-defined] webcam_capture.set(cv2.CAP_PROP_FRAME_WIDTH, webcam_width) webcam_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, webcam_height) webcam_capture.set(cv2.CAP_PROP_FPS, webcam_fps) + for capture_frame in multi_process_capture(source_face, webcam_capture, webcam_fps): if webcam_mode == 'inline': yield normalize_frame_color(capture_frame) @@ -107,19 +120,15 @@ def start(webcam_mode : WebcamMode, webcam_resolution : str, webcam_fps : Fps) - def multi_process_capture(source_face : Face, webcam_capture : cv2.VideoCapture, webcam_fps : Fps) -> Generator[VisionFrame, None, None]: deque_capture_frames: Deque[VisionFrame] = deque() - with tqdm(desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress: - progress.set_postfix( - { - 'execution_providers': state_manager.get_item('execution_providers'), - 'execution_thread_count': state_manager.get_item('execution_thread_count') - }) + + with tqdm(desc = wording.get('streaming'), unit = 'frame', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress: with ThreadPoolExecutor(max_workers = state_manager.get_item('execution_thread_count')) as executor: futures = [] while webcam_capture and webcam_capture.isOpened(): _, capture_frame = webcam_capture.read() if analyse_stream(capture_frame, webcam_fps): - return + yield None future = executor.submit(process_stream_frame, source_face, capture_frame) futures.append(future) @@ -140,6 +149,7 @@ def stop() -> gradio.Image: def process_stream_frame(source_face : Face, target_vision_frame : VisionFrame) -> VisionFrame: source_audio_frame = create_empty_audio_frame() + for processor_module in get_processors_modules(state_manager.get_item('processors')): logger.disable() if processor_module.pre_process('stream'): @@ -155,13 +165,27 @@ def process_stream_frame(source_face : Face, target_vision_frame : VisionFrame) def open_stream(stream_mode : StreamMode, stream_resolution : str, stream_fps : Fps) -> subprocess.Popen[bytes]: commands = [ '-f', 'rawvideo', '-pix_fmt', 'bgr24', '-s', stream_resolution, '-r', str(stream_fps), '-i', '-'] + if stream_mode == 'udp': commands.extend([ '-b:v', '2000k', '-f', 'mpegts', 'udp://localhost:27000?pkt_size=1316' ]) if stream_mode == 'v4l2': try: - device_name = os.listdir('/sys/devices/virtual/video4linux')[0] + device_name = get_first(os.listdir('/sys/devices/virtual/video4linux')) if device_name: commands.extend([ '-f', 'v4l2', '/dev/' + device_name ]) except FileNotFoundError: logger.error(wording.get('stream_not_loaded').format(stream_mode = stream_mode), __name__) return open_ffmpeg(commands) + + +def get_available_webcam_ids(webcam_id_start : int, webcam_id_end : int) -> List[int]: + available_webcam_ids = [] + + for index in range(webcam_id_start, webcam_id_end): + webcam_capture = get_webcam_capture(index) + + if webcam_capture and webcam_capture.isOpened(): + available_webcam_ids.append(index) + clear_webcam_capture() + + return available_webcam_ids diff --git a/facefusion/uis/components/webcam_options.py b/facefusion/uis/components/webcam_options.py index cbe7390c..ec8a4d36 100644 --- a/facefusion/uis/components/webcam_options.py +++ b/facefusion/uis/components/webcam_options.py @@ -3,19 +3,29 @@ from typing import Optional import gradio from facefusion import wording +from facefusion.common_helper import get_first from facefusion.uis import choices as uis_choices +from facefusion.uis.components.webcam import get_available_webcam_ids from facefusion.uis.core import register_ui_component +WEBCAM_DEVICE_ID_DROPDOWN : Optional[gradio.Dropdown] = None WEBCAM_MODE_RADIO : Optional[gradio.Radio] = None WEBCAM_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None WEBCAM_FPS_SLIDER : Optional[gradio.Slider] = None def render() -> None: + global WEBCAM_DEVICE_ID_DROPDOWN global WEBCAM_MODE_RADIO global WEBCAM_RESOLUTION_DROPDOWN global WEBCAM_FPS_SLIDER + available_webcam_ids = get_available_webcam_ids(0, 10) or [ 'none' ] #type:ignore[list-item] + WEBCAM_DEVICE_ID_DROPDOWN = gradio.Dropdown( + value = get_first(available_webcam_ids), + label = wording.get('uis.webcam_device_id_dropdown'), + choices = available_webcam_ids + ) WEBCAM_MODE_RADIO = gradio.Radio( label = wording.get('uis.webcam_mode_radio'), choices = uis_choices.webcam_modes, @@ -33,6 +43,7 @@ def render() -> None: minimum = 1, maximum = 60 ) + register_ui_component('webcam_device_id_dropdown', WEBCAM_DEVICE_ID_DROPDOWN) register_ui_component('webcam_mode_radio', WEBCAM_MODE_RADIO) register_ui_component('webcam_resolution_dropdown', WEBCAM_RESOLUTION_DROPDOWN) register_ui_component('webcam_fps_slider', WEBCAM_FPS_SLIDER) diff --git a/facefusion/uis/core.py b/facefusion/uis/core.py index 60f7b236..90a827b8 100644 --- a/facefusion/uis/core.py +++ b/facefusion/uis/core.py @@ -10,16 +10,8 @@ from gradio.themes import Size from facefusion import logger, metadata, state_manager, wording from facefusion.exit_helper import hard_exit from facefusion.filesystem import resolve_relative_path -from facefusion.uis import overrides from facefusion.uis.typing import Component, ComponentName -os.environ['GRADIO_ANALYTICS_ENABLED'] = '0' - -warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio') - -gradio.processing_utils.encode_array_to_base64 = overrides.encode_array_to_base64 -gradio.processing_utils.encode_pil_to_base64 = overrides.encode_pil_to_base64 - UI_COMPONENTS: Dict[ComponentName, Component] = {} UI_LAYOUT_MODULES : List[ModuleType] = [] UI_LAYOUT_METHODS =\ @@ -77,6 +69,13 @@ def register_ui_component(component_name : ComponentName, component: Component) UI_COMPONENTS[component_name] = component +def init() -> None: + os.environ['GRADIO_ANALYTICS_ENABLED'] = '0' + os.environ['GRADIO_TEMP_DIR'] = os.path.join(state_manager.get_item('temp_path'), 'gradio') + + warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio') + + def launch() -> None: ui_layouts_total = len(state_manager.get_item('ui_layouts')) with gradio.Blocks(theme = get_theme(), css = get_css(), title = metadata.get('name') + ' ' + metadata.get('version'), fill_width = True) as ui: @@ -99,7 +98,20 @@ def launch() -> None: def get_theme() -> gradio.Theme: return gradio.themes.Base( primary_hue = gradio.themes.colors.red, - secondary_hue = gradio.themes.colors.neutral, + secondary_hue = gradio.themes.Color( + name = 'neutral', + c50 = '#fafafa', + c100 = '#f5f5f5', + c200 = '#e5e5e5', + c300 = '#d4d4d4', + c400 = '#a3a3a3', + c500 = '#737373', + c600 = '#525252', + c700 = '#404040', + c800 = '#262626', + c900 = '#212121', + c950 = '#171717', + ), radius_size = Size( xxs = '0.375rem', xs = '0.375rem', @@ -111,11 +123,18 @@ def get_theme() -> gradio.Theme: ), font = gradio.themes.GoogleFont('Open Sans') ).set( + color_accent = 'transparent', + color_accent_soft = 'transparent', + color_accent_soft_dark = 'transparent', background_fill_primary = '*neutral_100', + background_fill_primary_dark = '*neutral_950', + background_fill_secondary = '*neutral_50', + background_fill_secondary_dark = '*neutral_800', block_background_fill = 'white', + block_background_fill_dark = '*neutral_900', block_border_width = '0', block_label_background_fill = '*neutral_100', - block_label_background_fill_dark = '*neutral_700', + block_label_background_fill_dark = '*neutral_800', block_label_border_width = 'none', block_label_margin = '0.5rem', block_label_radius = '*radius_md', @@ -124,39 +143,48 @@ def get_theme() -> gradio.Theme: block_label_text_color_dark = 'white', block_label_text_weight = '600', block_title_background_fill = '*neutral_100', - block_title_background_fill_dark = '*neutral_700', + block_title_background_fill_dark = '*neutral_800', block_title_padding = '*block_label_padding', block_title_radius = '*block_label_radius', block_title_text_color = '*neutral_700', block_title_text_size = '*text_sm', block_title_text_weight = '600', block_padding = '0.5rem', + border_color_accent = 'transparent', + border_color_accent_dark = 'transparent', + border_color_accent_subdued = 'transparent', + border_color_accent_subdued_dark = 'transparent', border_color_primary = 'transparent', border_color_primary_dark = 'transparent', button_large_padding = '2rem 0.5rem', button_large_text_weight = 'normal', button_primary_background_fill = '*primary_500', + button_primary_background_fill_dark = '*primary_600', button_primary_text_color = 'white', button_secondary_background_fill = 'white', - button_secondary_border_color = 'transparent', - button_secondary_border_color_dark = 'transparent', - button_secondary_border_color_hover = 'transparent', - button_secondary_border_color_hover_dark = 'transparent', + button_secondary_background_fill_dark = '*neutral_800', + button_secondary_background_fill_hover = 'white', + button_secondary_background_fill_hover_dark = '*neutral_800', button_secondary_text_color = '*neutral_800', button_small_padding = '0.75rem', + button_small_text_size = '0.875rem', checkbox_background_color = '*neutral_200', + checkbox_background_color_dark = '*neutral_900', checkbox_background_color_selected = '*primary_600', checkbox_background_color_selected_dark = '*primary_700', - checkbox_border_color_focus = '*primary_500', - checkbox_border_color_focus_dark = '*primary_600', - checkbox_border_color_selected = '*primary_600', - checkbox_border_color_selected_dark = '*primary_700', checkbox_label_background_fill = '*neutral_50', + checkbox_label_background_fill_dark = '*neutral_800', checkbox_label_background_fill_hover = '*neutral_50', + checkbox_label_background_fill_hover_dark = '*neutral_800', checkbox_label_background_fill_selected = '*primary_500', checkbox_label_background_fill_selected_dark = '*primary_600', checkbox_label_text_color_selected = 'white', + error_background_fill = 'white', + error_background_fill_dark = '*neutral_900', + error_text_color = '*primary_500', + error_text_color_dark = '*primary_600', input_background_fill = '*neutral_50', + input_background_fill_dark = '*neutral_800', shadow_drop = 'none', slider_color = '*primary_500', slider_color_dark = '*primary_600' diff --git a/facefusion/uis/layouts/benchmark.py b/facefusion/uis/layouts/benchmark.py index 72f75fb6..b119a1f1 100644 --- a/facefusion/uis/layouts/benchmark.py +++ b/facefusion/uis/layouts/benchmark.py @@ -1,26 +1,24 @@ import gradio from facefusion import state_manager -from facefusion.download import conditional_download -from facefusion.uis.components import about, age_modifier_options, benchmark, benchmark_options, execution, execution_queue_count, execution_thread_count, expression_restorer_options, face_debugger_options, face_editor_options, face_enhancer_options, face_swapper_options, frame_colorizer_options, frame_enhancer_options, lip_syncer_options, memory, processors +from facefusion.download import conditional_download, resolve_download_url +from facefusion.uis.components import about, age_modifier_options, benchmark, benchmark_options, deep_swapper_options, download, execution, execution_queue_count, execution_thread_count, expression_restorer_options, face_debugger_options, face_editor_options, face_enhancer_options, face_swapper_options, frame_colorizer_options, frame_enhancer_options, lip_syncer_options, memory, processors def pre_check() -> bool: - if not state_manager.get_item('skip_download'): - conditional_download('.assets/examples', - [ - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/source.jpg', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/source.mp3', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-240p.mp4', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-360p.mp4', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-540p.mp4', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-720p.mp4', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-1080p.mp4', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-1440p.mp4', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-2160p.mp4' - ]) - return True - return False + conditional_download('.assets/examples', + [ + resolve_download_url('examples-3.0.0', 'source.jpg'), + resolve_download_url('examples-3.0.0', 'source.mp3'), + resolve_download_url('examples-3.0.0', 'target-240p.mp4'), + resolve_download_url('examples-3.0.0', 'target-360p.mp4'), + resolve_download_url('examples-3.0.0', 'target-540p.mp4'), + resolve_download_url('examples-3.0.0', 'target-720p.mp4'), + resolve_download_url('examples-3.0.0', 'target-1080p.mp4'), + resolve_download_url('examples-3.0.0', 'target-1440p.mp4'), + resolve_download_url('examples-3.0.0', 'target-2160p.mp4') + ]) + return True def render() -> gradio.Blocks: @@ -33,6 +31,8 @@ def render() -> gradio.Blocks: processors.render() with gradio.Blocks(): age_modifier_options.render() + with gradio.Blocks(): + deep_swapper_options.render() with gradio.Blocks(): expression_restorer_options.render() with gradio.Blocks(): @@ -54,6 +54,9 @@ def render() -> gradio.Blocks: execution_thread_count.render() execution_queue_count.render() with gradio.Blocks(): + download.render() + with gradio.Blocks(): + state_manager.set_item('video_memory_strategy', 'tolerant') memory.render() with gradio.Blocks(): benchmark_options.render() @@ -66,7 +69,9 @@ def render() -> gradio.Blocks: def listen() -> None: processors.listen() age_modifier_options.listen() + deep_swapper_options.listen() expression_restorer_options.listen() + download.listen() face_debugger_options.listen() face_editor_options.listen() face_enhancer_options.listen() diff --git a/facefusion/uis/layouts/default.py b/facefusion/uis/layouts/default.py index b57a9b8d..96553f8a 100755 --- a/facefusion/uis/layouts/default.py +++ b/facefusion/uis/layouts/default.py @@ -1,7 +1,7 @@ import gradio from facefusion import state_manager -from facefusion.uis.components import about, age_modifier_options, common_options, execution, execution_queue_count, execution_thread_count, expression_restorer_options, face_debugger_options, face_detector, face_editor_options, face_enhancer_options, face_landmarker, face_masker, face_selector, face_swapper_options, frame_colorizer_options, frame_enhancer_options, instant_runner, job_manager, job_runner, lip_syncer_options, memory, output, output_options, preview, processors, source, target, temp_frame, terminal, trim_frame, ui_workflow +from facefusion.uis.components import about, age_modifier_options, common_options, deep_swapper_options, download, execution, execution_queue_count, execution_thread_count, expression_restorer_options, face_debugger_options, face_detector, face_editor_options, face_enhancer_options, face_landmarker, face_masker, face_selector, face_swapper_options, frame_colorizer_options, frame_enhancer_options, instant_runner, job_manager, job_runner, lip_syncer_options, memory, output, output_options, preview, processors, source, target, temp_frame, terminal, trim_frame, ui_workflow def pre_check() -> bool: @@ -18,6 +18,8 @@ def render() -> gradio.Blocks: processors.render() with gradio.Blocks(): age_modifier_options.render() + with gradio.Blocks(): + deep_swapper_options.render() with gradio.Blocks(): expression_restorer_options.render() with gradio.Blocks(): @@ -38,6 +40,8 @@ def render() -> gradio.Blocks: execution.render() execution_thread_count.render() execution_queue_count.render() + with gradio.Blocks(): + download.render() with gradio.Blocks(): memory.render() with gradio.Blocks(): @@ -79,6 +83,7 @@ def render() -> gradio.Blocks: def listen() -> None: processors.listen() age_modifier_options.listen() + deep_swapper_options.listen() expression_restorer_options.listen() face_debugger_options.listen() face_editor_options.listen() @@ -90,6 +95,7 @@ def listen() -> None: execution.listen() execution_thread_count.listen() execution_queue_count.listen() + download.listen() memory.listen() temp_frame.listen() output_options.listen() diff --git a/facefusion/uis/layouts/webcam.py b/facefusion/uis/layouts/webcam.py index 2351d081..7b1fcd71 100644 --- a/facefusion/uis/layouts/webcam.py +++ b/facefusion/uis/layouts/webcam.py @@ -1,7 +1,7 @@ import gradio from facefusion import state_manager -from facefusion.uis.components import about, age_modifier_options, execution, execution_thread_count, face_debugger_options, face_enhancer_options, face_swapper_options, frame_colorizer_options, frame_enhancer_options, lip_syncer_options, processors, source, webcam, webcam_options +from facefusion.uis.components import about, age_modifier_options, deep_swapper_options, download, execution, execution_thread_count, expression_restorer_options, face_debugger_options, face_editor_options, face_enhancer_options, face_swapper_options, frame_colorizer_options, frame_enhancer_options, lip_syncer_options, processors, source, webcam, webcam_options def pre_check() -> bool: @@ -18,8 +18,14 @@ def render() -> gradio.Blocks: processors.render() with gradio.Blocks(): age_modifier_options.render() + with gradio.Blocks(): + deep_swapper_options.render() + with gradio.Blocks(): + expression_restorer_options.render() with gradio.Blocks(): face_debugger_options.render() + with gradio.Blocks(): + face_editor_options.render() with gradio.Blocks(): face_enhancer_options.render() with gradio.Blocks(): @@ -33,6 +39,8 @@ def render() -> gradio.Blocks: with gradio.Blocks(): execution.render() execution_thread_count.render() + with gradio.Blocks(): + download.render() with gradio.Blocks(): webcam_options.render() with gradio.Blocks(): @@ -46,7 +54,11 @@ def render() -> gradio.Blocks: def listen() -> None: processors.listen() age_modifier_options.listen() + deep_swapper_options.listen() + expression_restorer_options.listen() + download.listen() face_debugger_options.listen() + face_editor_options.listen() face_enhancer_options.listen() face_swapper_options.listen() frame_colorizer_options.listen() diff --git a/facefusion/uis/overrides.py b/facefusion/uis/overrides.py deleted file mode 100644 index 1a1ee11d..00000000 --- a/facefusion/uis/overrides.py +++ /dev/null @@ -1,15 +0,0 @@ -import base64 -from typing import Any - -import cv2 -import numpy -from numpy._typing import NDArray - - -def encode_array_to_base64(array : NDArray[Any]) -> str: - _, buffer = cv2.imencode('.jpg', array[:, :, ::-1]) - return 'data:image/jpeg;base64,' + base64.b64encode(buffer.tobytes()).decode('utf-8') - - -def encode_pil_to_base64(image : Any) -> str: - return encode_array_to_base64(numpy.asarray(image)[:, :, ::-1]) diff --git a/facefusion/uis/typing.py b/facefusion/uis/typing.py index 1c13759d..6de5730e 100644 --- a/facefusion/uis/typing.py +++ b/facefusion/uis/typing.py @@ -9,6 +9,8 @@ ComponentName = Literal\ 'age_modifier_model_dropdown', 'benchmark_cycles_slider', 'benchmark_runs_checkbox_group', + 'deep_swapper_model_dropdown', + 'deep_swapper_morph_slider', 'expression_restorer_factor_slider', 'expression_restorer_model_dropdown', 'face_debugger_items_checkbox_group', @@ -33,6 +35,7 @@ ComponentName = Literal\ 'face_editor_mouth_smile_slider', 'face_enhancer_blend_slider', 'face_enhancer_model_dropdown', + 'face_enhancer_weight_slider', 'face_landmarker_model_dropdown', 'face_landmarker_score_slider', 'face_mask_blur_slider', @@ -49,6 +52,8 @@ ComponentName = Literal\ 'face_selector_race_dropdown', 'face_swapper_model_dropdown', 'face_swapper_pixel_boost_dropdown', + 'face_occluder_model_dropdown', + 'face_parser_model_dropdown', 'frame_colorizer_blend_slider', 'frame_colorizer_model_dropdown', 'frame_colorizer_size_dropdown', @@ -68,6 +73,7 @@ ComponentName = Literal\ 'target_image', 'target_video', 'ui_workflow_dropdown', + 'webcam_device_id_dropdown', 'webcam_fps_slider', 'webcam_mode_radio', 'webcam_resolution_dropdown' diff --git a/facefusion/uis/ui_helper.py b/facefusion/uis/ui_helper.py index 91fa75da..4e729e9a 100644 --- a/facefusion/uis/ui_helper.py +++ b/facefusion/uis/ui_helper.py @@ -21,6 +21,6 @@ def convert_str_none(value : str) -> Optional[str]: def suggest_output_path(output_directory_path : str, target_path : str) -> Optional[str]: if is_image(target_path) or is_video(target_path): _, target_extension = os.path.splitext(target_path) - output_name = hashlib.sha1(str(state_manager.get_state()).encode('utf-8')).hexdigest()[:8] + output_name = hashlib.sha1(str(state_manager.get_state()).encode()).hexdigest()[:8] return os.path.join(output_directory_path, output_name + target_extension) return None diff --git a/facefusion/vision.py b/facefusion/vision.py index d5c925db..04c1b5ec 100644 --- a/facefusion/vision.py +++ b/facefusion/vision.py @@ -5,10 +5,10 @@ import cv2 import numpy from cv2.typing import Size -from facefusion.choices import image_template_sizes, video_template_sizes +import facefusion.choices from facefusion.common_helper import is_windows from facefusion.filesystem import is_image, is_video, sanitize_path_for_windows -from facefusion.typing import Fps, Orientation, Resolution, VisionFrame +from facefusion.typing import Duration, Fps, Orientation, Resolution, VisionFrame @lru_cache(maxsize = 128) @@ -64,8 +64,8 @@ def create_image_resolutions(resolution : Resolution) -> List[str]: if resolution: width, height = resolution temp_resolutions.append(normalize_resolution(resolution)) - for template_size in image_template_sizes: - temp_resolutions.append(normalize_resolution((width * template_size, height * template_size))) + for image_template_size in facefusion.choices.image_template_sizes: + temp_resolutions.append(normalize_resolution((width * image_template_size, height * image_template_size))) temp_resolutions = sorted(set(temp_resolutions)) for temp_resolution in temp_resolutions: resolutions.append(pack_resolution(temp_resolution)) @@ -119,6 +119,39 @@ def restrict_video_fps(video_path : str, fps : Fps) -> Fps: return fps +def detect_video_duration(video_path : str) -> Duration: + video_frame_total = count_video_frame_total(video_path) + video_fps = detect_video_fps(video_path) + + if video_frame_total and video_fps: + return video_frame_total / video_fps + return 0 + + +def count_trim_frame_total(video_path : str, trim_frame_start : Optional[int], trim_frame_end : Optional[int]) -> int: + trim_frame_start, trim_frame_end = restrict_trim_frame(video_path, trim_frame_start, trim_frame_end) + + return trim_frame_end - trim_frame_start + + +def restrict_trim_frame(video_path : str, trim_frame_start : Optional[int], trim_frame_end : Optional[int]) -> Tuple[int, int]: + video_frame_total = count_video_frame_total(video_path) + + if isinstance(trim_frame_start, int): + trim_frame_start = max(0, min(trim_frame_start, video_frame_total)) + if isinstance(trim_frame_end, int): + trim_frame_end = max(0, min(trim_frame_end, video_frame_total)) + + if isinstance(trim_frame_start, int) and isinstance(trim_frame_end, int): + return trim_frame_start, trim_frame_end + if isinstance(trim_frame_start, int): + return trim_frame_start, video_frame_total + if isinstance(trim_frame_end, int): + return 0, trim_frame_end + + return 0, video_frame_total + + def detect_video_resolution(video_path : str) -> Optional[Resolution]: if is_video(video_path): if is_windows(): @@ -147,11 +180,11 @@ def create_video_resolutions(resolution : Resolution) -> List[str]: if resolution: width, height = resolution temp_resolutions.append(normalize_resolution(resolution)) - for template_size in video_template_sizes: + for video_template_size in facefusion.choices.video_template_sizes: if width > height: - temp_resolutions.append(normalize_resolution((template_size * width / height, template_size))) + temp_resolutions.append(normalize_resolution((video_template_size * width / height, video_template_size))) else: - temp_resolutions.append(normalize_resolution((template_size, template_size * height / width))) + temp_resolutions.append(normalize_resolution((video_template_size, video_template_size * height / width))) temp_resolutions = sorted(set(temp_resolutions)) for temp_resolution in temp_resolutions: resolutions.append(pack_resolution(temp_resolution)) @@ -202,6 +235,42 @@ def normalize_frame_color(vision_frame : VisionFrame) -> VisionFrame: return cv2.cvtColor(vision_frame, cv2.COLOR_BGR2RGB) +def conditional_match_frame_color(source_vision_frame : VisionFrame, target_vision_frame : VisionFrame) -> VisionFrame: + histogram_factor = calc_histogram_difference(source_vision_frame, target_vision_frame) + target_vision_frame = blend_vision_frames(target_vision_frame, match_frame_color(source_vision_frame, target_vision_frame), histogram_factor) + return target_vision_frame + + +def match_frame_color(source_vision_frame : VisionFrame, target_vision_frame : VisionFrame) -> VisionFrame: + color_difference_sizes = numpy.linspace(16, target_vision_frame.shape[0], 3, endpoint = False) + + for color_difference_size in color_difference_sizes: + source_vision_frame = equalize_frame_color(source_vision_frame, target_vision_frame, normalize_resolution((color_difference_size, color_difference_size))) + target_vision_frame = equalize_frame_color(source_vision_frame, target_vision_frame, target_vision_frame.shape[:2][::-1]) + return target_vision_frame + + +def equalize_frame_color(source_vision_frame : VisionFrame, target_vision_frame : VisionFrame, size : Size) -> VisionFrame: + source_frame_resize = cv2.resize(source_vision_frame, size, interpolation = cv2.INTER_AREA).astype(numpy.float32) + target_frame_resize = cv2.resize(target_vision_frame, size, interpolation = cv2.INTER_AREA).astype(numpy.float32) + color_difference_vision_frame = numpy.subtract(source_frame_resize, target_frame_resize) + color_difference_vision_frame = cv2.resize(color_difference_vision_frame, target_vision_frame.shape[:2][::-1], interpolation = cv2.INTER_CUBIC) + target_vision_frame = numpy.add(target_vision_frame, color_difference_vision_frame).clip(0, 255).astype(numpy.uint8) + return target_vision_frame + + +def calc_histogram_difference(source_vision_frame : VisionFrame, target_vision_frame : VisionFrame) -> float: + histogram_source = cv2.calcHist([cv2.cvtColor(source_vision_frame, cv2.COLOR_BGR2HSV)], [ 0, 1 ], None, [ 50, 60 ], [ 0, 180, 0, 256 ]) + histogram_target = cv2.calcHist([cv2.cvtColor(target_vision_frame, cv2.COLOR_BGR2HSV)], [ 0, 1 ], None, [ 50, 60 ], [ 0, 180, 0, 256 ]) + histogram_differnce = float(numpy.interp(cv2.compareHist(histogram_source, histogram_target, cv2.HISTCMP_CORREL), [ -1, 1 ], [ 0, 1 ])) + return histogram_differnce + + +def blend_vision_frames(source_vision_frame : VisionFrame, target_vision_frame : VisionFrame, blend_factor : float) -> VisionFrame: + blend_vision_frame = cv2.addWeighted(source_vision_frame, 1 - blend_factor, target_vision_frame, blend_factor, 0) + return blend_vision_frame + + def create_tile_frames(vision_frame : VisionFrame, size : Size) -> Tuple[List[VisionFrame], int, int]: vision_frame = numpy.pad(vision_frame, ((size[1], size[1]), (size[1], size[1]), (0, 0))) tile_width = size[0] - 2 * size[2] diff --git a/facefusion/voice_extractor.py b/facefusion/voice_extractor.py index 60d8c4a6..c7c4e922 100644 --- a/facefusion/voice_extractor.py +++ b/facefusion/voice_extractor.py @@ -1,36 +1,40 @@ +from functools import lru_cache from typing import Tuple import numpy import scipy from facefusion import inference_manager -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.filesystem import resolve_relative_path from facefusion.thread_helper import thread_semaphore -from facefusion.typing import Audio, AudioChunk, InferencePool, ModelOptions, ModelSet +from facefusion.typing import Audio, AudioChunk, DownloadScope, InferencePool, ModelOptions, ModelSet -MODEL_SET : ModelSet =\ -{ - 'kim_vocal_2': + +@lru_cache(maxsize = None) +def create_static_model_set(download_scope : DownloadScope) -> ModelSet: + return\ { - 'hashes': + 'kim_vocal_2': { - 'voice_extractor': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/kim_vocal_2.hash', - 'path': resolve_relative_path('../.assets/models/kim_vocal_2.hash') - } - }, - 'sources': - { - 'voice_extractor': + 'voice_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'kim_vocal_2.hash'), + 'path': resolve_relative_path('../.assets/models/kim_vocal_2.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/kim_vocal_2.onnx', - 'path': resolve_relative_path('../.assets/models/kim_vocal_2.onnx') + 'voice_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'kim_vocal_2.onnx'), + 'path': resolve_relative_path('../.assets/models/kim_vocal_2.onnx') + } } } } -} def get_inference_pool() -> InferencePool: @@ -43,15 +47,14 @@ def clear_inference_pool() -> None: def get_model_options() -> ModelOptions: - return MODEL_SET.get('kim_vocal_2') + return create_static_model_set('full').get('kim_vocal_2') def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') model_hashes = get_model_options().get('hashes') model_sources = get_model_options().get('sources') - return conditional_download_hashes(download_directory_path, model_hashes) and conditional_download_sources(download_directory_path, model_sources) + return conditional_download_hashes(model_hashes) and conditional_download_sources(model_sources) def batch_extract_voice(audio : Audio, chunk_size : int, step_size : int) -> Audio: diff --git a/facefusion/wording.py b/facefusion/wording.py index 34760966..db7faabd 100755 --- a/facefusion/wording.py +++ b/facefusion/wording.py @@ -11,7 +11,10 @@ WORDING : Dict[str, Any] =\ 'extracting_frames_succeed': 'Extracting frames succeed', 'extracting_frames_failed': 'Extracting frames failed', 'analysing': 'Analysing', + 'extracting': 'Extracting', + 'streaming': 'Streaming', 'processing': 'Processing', + 'merging': 'Merging', 'downloading': 'Downloading', 'temp_frames_not_found': 'Temporary frames not found', 'copying_image': 'Copying image with a resolution of {resolution}', @@ -46,6 +49,7 @@ WORDING : Dict[str, Any] =\ 'ui_layout_not_loaded': 'UI layout {ui_layout} could not be loaded', 'ui_layout_not_implemented': 'UI layout {ui_layout} not implemented correctly', 'stream_not_loaded': 'Stream {stream_mode} could not be loaded', + 'stream_not_supported': 'Stream not supported', 'job_created': 'Job {job_id} created', 'job_not_created': 'Job {job_id} not created', 'job_submitted': 'Job {job_id} submitted', @@ -94,10 +98,15 @@ WORDING : Dict[str, Any] =\ 'skip_conda': 'skip the conda environment check', # paths 'config_path': 'choose the config file to override defaults', + 'temp_path': 'specify the directory for the temporary resources', 'jobs_path': 'specify the directory to store jobs', - 'source_paths': 'choose single or multiple source images or audios', - 'target_path': 'choose single target image or video', - 'output_path': 'specify the output image or video within a directory', + 'source_paths': 'choose the image or audio paths', + 'target_path': 'choose the image or video path', + 'output_path': 'specify the image or video within a directory', + # patterns + 'source_pattern': 'choose the image or audio pattern', + 'target_pattern': 'choose the image or video pattern', + 'output_pattern': 'specify the image or video pattern', # face detector 'face_detector_model': 'choose the model responsible for detecting the faces', 'face_detector_size': 'specify the frame size provided to the face detector', @@ -117,8 +126,10 @@ WORDING : Dict[str, Any] =\ 'reference_face_distance': 'specify the similarity between the reference face and target face', 'reference_frame_number': 'specify the frame used to create the reference face', # face masker + 'face_occluder_model': 'choose the model responsible for the occlusion mask', + 'face_parser_model': 'choose the model responsible for the region mask', 'face_mask_types': 'mix and match different face mask types (choices: {choices})', - 'face_mask_blur': 'specify the degree of blur applied the box mask', + 'face_mask_blur': 'specify the degree of blur applied to the box mask', 'face_mask_padding': 'apply top, right, bottom and left padding to the box mask', 'face_mask_regions': 'choose the facial features used for the region mask (choices: {choices})', # frame extraction @@ -140,6 +151,8 @@ WORDING : Dict[str, Any] =\ 'processors': 'load a single or multiple processors (choices: {choices}, ...)', 'age_modifier_model': 'choose the model responsible for aging the face', 'age_modifier_direction': 'specify the direction in which the age should be modified', + 'deep_swapper_model': 'choose the model responsible for swapping the face', + 'deep_swapper_morph': 'morph between source face and target faces', 'expression_restorer_model': 'choose the model responsible for restoring the expression', 'expression_restorer_factor': 'restore factor of expression from the target face', 'face_debugger_items': 'load a single or multiple processors (choices: {choices})', @@ -160,6 +173,7 @@ WORDING : Dict[str, Any] =\ 'face_editor_head_roll': 'specify the head roll', 'face_enhancer_model': 'choose the model responsible for enhancing the face', 'face_enhancer_blend': 'blend the enhanced into the previous face', + 'face_enhancer_weight': 'specify the degree of weight applied to the face', 'face_swapper_model': 'choose the model responsible for swapping the face', 'face_swapper_pixel_boost': 'choose the pixel boost resolution for the face swapper', 'frame_colorizer_model': 'choose the model responsible for colorizing the frame', @@ -174,18 +188,21 @@ WORDING : Dict[str, Any] =\ 'ui_workflow': 'choose the ui workflow', # execution 'execution_device_id': 'specify the device used for processing', - 'execution_providers': 'accelerate the model inference using different providers (choices: {choices}, ...)', + 'execution_providers': 'inference using different providers (choices: {choices}, ...)', 'execution_thread_count': 'specify the amount of parallel threads while processing', 'execution_queue_count': 'specify the amount of frames each thread is processing', + # download + 'download_providers': 'download using different providers (choices: {choices}, ...)', + 'download_scope': 'specify the download scope', # memory 'video_memory_strategy': 'balance fast processing and low VRAM usage', 'system_memory_limit': 'limit the available RAM that can be used while processing', # misc - 'skip_download': 'omit downloads and remote lookups', 'log_level': 'adjust the message severity displayed in the terminal', # run 'run': 'run the program', 'headless_run': 'run the program in headless mode', + 'batch_run': 'run the program in batch mode', 'force_download': 'force automate downloads and exit', # jobs 'job_id': 'specify the job id', @@ -223,6 +240,9 @@ WORDING : Dict[str, Any] =\ 'benchmark_runs_checkbox_group': 'BENCHMARK RUNS', 'clear_button': 'CLEAR', 'common_options_checkbox_group': 'OPTIONS', + 'download_providers_checkbox_group': 'DOWNLOAD PROVIDERS', + 'deep_swapper_model_dropdown': 'DEEP SWAPPER MODEL', + 'deep_swapper_morph_slider': 'DEEP SWAPPER MORPH', 'execution_providers_checkbox_group': 'EXECUTION PROVIDERS', 'execution_queue_count_slider': 'EXECUTION QUEUE COUNT', 'execution_thread_count_slider': 'EXECUTION THREAD COUNT', @@ -250,6 +270,7 @@ WORDING : Dict[str, Any] =\ 'face_editor_mouth_smile_slider': 'FACE EDITOR MOUTH SMILE', 'face_enhancer_blend_slider': 'FACE ENHANCER BLEND', 'face_enhancer_model_dropdown': 'FACE ENHANCER MODEL', + 'face_enhancer_weight_slider': 'FACE ENHANCER WEIGHT', 'face_landmarker_model_dropdown': 'FACE LANDMARKER MODEL', 'face_landmarker_score_slider': 'FACE LANDMARKER SCORE', 'face_mask_blur_slider': 'FACE MASK BLUR', @@ -266,6 +287,8 @@ WORDING : Dict[str, Any] =\ 'face_selector_race_dropdown': 'FACE SELECTOR RACE', 'face_swapper_model_dropdown': 'FACE SWAPPER MODEL', 'face_swapper_pixel_boost_dropdown': 'FACE SWAPPER PIXEL BOOST', + 'face_occluder_model_dropdown': 'FACE OCCLUDER MODEL', + 'face_parser_model_dropdown': 'FACE PARSER MODEL', 'frame_colorizer_blend_slider': 'FRAME COLORIZER BLEND', 'frame_colorizer_model_dropdown': 'FRAME COLORIZER MODEL', 'frame_colorizer_size_dropdown': 'FRAME COLORIZER SIZE', @@ -307,6 +330,7 @@ WORDING : Dict[str, Any] =\ 'video_memory_strategy_dropdown': 'VIDEO MEMORY STRATEGY', 'webcam_fps_slider': 'WEBCAM FPS', 'webcam_image': 'WEBCAM', + 'webcam_device_id_dropdown': 'WEBCAM DEVICE ID', 'webcam_mode_radio': 'WEBCAM MODE', 'webcam_resolution_dropdown': 'WEBCAM RESOLUTION' } diff --git a/requirements.txt b/requirements.txt index 659bb6ac..4460a3a5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,10 @@ filetype==1.2.0 -gradio==4.44.0 -gradio-rangeslider==0.0.6 -numpy==2.1.0 -onnx==1.16.1 -onnxruntime==1.19.2 +gradio==5.9.1 +gradio-rangeslider==0.0.8 +numpy==2.2.0 +onnx==1.17.0 +onnxruntime==1.20.1 opencv-python==4.10.0.84 -psutil==6.0.0 -tqdm==4.66.5 +psutil==6.1.1 +tqdm==4.67.1 scipy==1.14.1 diff --git a/tests/helper.py b/tests/helper.py index 45d810bb..7f89c2c5 100644 --- a/tests/helper.py +++ b/tests/helper.py @@ -1,7 +1,7 @@ import os +import tempfile from facefusion.filesystem import create_directory, is_directory, is_file, remove_directory -from facefusion.temp_helper import get_base_directory_path from facefusion.typing import JobStatus @@ -14,7 +14,7 @@ def get_test_job_file(file_path : str, job_status : JobStatus) -> str: def get_test_jobs_directory() -> str: - return os.path.join(get_base_directory_path(), 'test-jobs') + return os.path.join(tempfile.gettempdir(), 'facefusion-test-jobs') def get_test_example_file(file_path : str) -> str: @@ -22,7 +22,7 @@ def get_test_example_file(file_path : str) -> str: def get_test_examples_directory() -> str: - return os.path.join(get_base_directory_path(), 'test-examples') + return os.path.join(tempfile.gettempdir(), 'facefusion-test-examples') def is_test_output_file(file_path : str) -> bool: @@ -34,7 +34,7 @@ def get_test_output_file(file_path : str) -> str: def get_test_outputs_directory() -> str: - return os.path.join(get_base_directory_path(), 'test-outputs') + return os.path.join(tempfile.gettempdir(), 'facefusion-test-outputs') def prepare_test_output_directory() -> bool: diff --git a/tests/test_audio.py b/tests/test_audio.py index 66039f1e..36faf9b0 100644 --- a/tests/test_audio.py +++ b/tests/test_audio.py @@ -17,8 +17,8 @@ def before_all() -> None: def test_get_audio_frame() -> None: - assert get_audio_frame(get_test_example_file('source.mp3'), 25) is not None - assert get_audio_frame(get_test_example_file('source.wav'), 25) is not None + assert hasattr(get_audio_frame(get_test_example_file('source.mp3'), 25), '__array_interface__') + assert hasattr(get_audio_frame(get_test_example_file('source.wav'), 25), '__array_interface__') assert get_audio_frame('invalid', 25) is None diff --git a/tests/test_cli_age_modifier.py b/tests/test_cli_age_modifier.py index d4f28143..2184ac88 100644 --- a/tests/test_cli_age_modifier.py +++ b/tests/test_cli_age_modifier.py @@ -25,14 +25,14 @@ def before_each() -> None: def test_modify_age_to_image() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'age_modifier', '--age-modifier-direction', '100', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-age-face-to-image.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'age_modifier', '--age-modifier-direction', '100', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-age-face-to-image.jpg') ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-age-face-to-image.jpg') is True def test_modify_age_to_video() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'age_modifier', '--age-modifier-direction', '100', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-age-face-to-video.mp4'), '--trim-frame-end', '1' ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'age_modifier', '--age-modifier-direction', '100', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-age-face-to-video.mp4'), '--trim-frame-end', '1' ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-age-face-to-video.mp4') is True diff --git a/tests/test_cli_batch_runner.py b/tests/test_cli_batch_runner.py new file mode 100644 index 00000000..963e5a95 --- /dev/null +++ b/tests/test_cli_batch_runner.py @@ -0,0 +1,45 @@ +import subprocess +import sys + +import pytest + +from facefusion.download import conditional_download +from facefusion.jobs.job_manager import clear_jobs, init_jobs +from .helper import get_test_example_file, get_test_examples_directory, get_test_jobs_directory, get_test_output_file, is_test_output_file, prepare_test_output_directory + + +@pytest.fixture(scope = 'module', autouse = True) +def before_all() -> None: + conditional_download(get_test_examples_directory(), + [ + 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-240p.mp4' + ]) + subprocess.run([ 'ffmpeg', '-i', get_test_example_file('target-240p.mp4'), '-vframes', '1', get_test_example_file('target-240p-batch-1.jpg') ]) + subprocess.run([ 'ffmpeg', '-i', get_test_example_file('target-240p.mp4'), '-vframes', '2', get_test_example_file('target-240p-batch-2.jpg') ]) + + +@pytest.fixture(scope = 'function', autouse = True) +def before_each() -> None: + clear_jobs(get_test_jobs_directory()) + init_jobs(get_test_jobs_directory()) + prepare_test_output_directory() + + +def test_batch_run_targets() -> None: + commands = [ sys.executable, 'facefusion.py', 'batch-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p-batch-*.jpg'), '-o', get_test_output_file('test-batch-run-targets-{index}.jpg') ] + + assert subprocess.run(commands).returncode == 0 + assert is_test_output_file('test-batch-run-targets-0.jpg') is True + assert is_test_output_file('test-batch-run-targets-1.jpg') is True + assert is_test_output_file('test-batch-run-targets-2.jpg') is False + + +def test_batch_run_sources_to_targets() -> None: + commands = [ sys.executable, 'facefusion.py', 'batch-run', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('target-240p-batch-*.jpg'), '-t', get_test_example_file('target-240p-batch-*.jpg'), '-o', get_test_output_file('test-batch-run-sources-to-targets-{index}.jpg') ] + + assert subprocess.run(commands).returncode == 0 + assert is_test_output_file('test-batch-run-sources-to-targets-0.jpg') is True + assert is_test_output_file('test-batch-run-sources-to-targets-1.jpg') is True + assert is_test_output_file('test-batch-run-sources-to-targets-2.jpg') is True + assert is_test_output_file('test-batch-run-sources-to-targets-3.jpg') is True + assert is_test_output_file('test-batch-run-sources-to-targets-4.jpg') is False diff --git a/tests/test_cli_expression_restorer.py b/tests/test_cli_expression_restorer.py index 03dd3064..236cf78f 100644 --- a/tests/test_cli_expression_restorer.py +++ b/tests/test_cli_expression_restorer.py @@ -25,14 +25,14 @@ def before_each() -> None: def test_restore_expression_to_image() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'expression_restorer', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-restore-expression-to-image.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'expression_restorer', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-restore-expression-to-image.jpg') ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-restore-expression-to-image.jpg') is True def test_restore_expression_to_video() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'expression_restorer', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-restore-expression-to-video.mp4'), '--trim-frame-end', '1' ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'expression_restorer', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-restore-expression-to-video.mp4'), '--trim-frame-end', '1' ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-restore-expression-to-video.mp4') is True diff --git a/tests/test_cli_face_debugger.py b/tests/test_cli_face_debugger.py index e41fafb9..b393881d 100644 --- a/tests/test_cli_face_debugger.py +++ b/tests/test_cli_face_debugger.py @@ -26,14 +26,14 @@ def before_each() -> None: def test_debug_face_to_image() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-debug-face-to-image.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-debug-face-to-image.jpg') ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-debug-face-to-image.jpg') is True def test_debug_face_to_video() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-debug-face-to-video.mp4'), '--trim-frame-end', '1' ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-debug-face-to-video.mp4'), '--trim-frame-end', '1' ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-debug-face-to-video.mp4') is True diff --git a/tests/test_cli_face_editor.py b/tests/test_cli_face_editor.py index 633ff65f..27b289ec 100644 --- a/tests/test_cli_face_editor.py +++ b/tests/test_cli_face_editor.py @@ -26,14 +26,14 @@ def before_each() -> None: def test_edit_face_to_image() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'face_editor', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-edit-face-to-image.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_editor', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-edit-face-to-image.jpg') ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-edit-face-to-image.jpg') is True def test_edit_face_to_video() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'face_editor', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-edit-face-to-video.mp4'), '--trim-frame-end', '1' ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_editor', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-edit-face-to-video.mp4'), '--trim-frame-end', '1' ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-edit-face-to-video.mp4') is True diff --git a/tests/test_cli_face_enhancer.py b/tests/test_cli_face_enhancer.py index cc01d8dc..c1e5128f 100644 --- a/tests/test_cli_face_enhancer.py +++ b/tests/test_cli_face_enhancer.py @@ -26,14 +26,14 @@ def before_each() -> None: def test_enhance_face_to_image() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'face_enhancer', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-enhance-face-to-image.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_enhancer', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-enhance-face-to-image.jpg') ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-enhance-face-to-image.jpg') is True def test_enhance_face_to_video() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'face_enhancer', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-enhance-face-to-video.mp4'), '--trim-frame-end', '1' ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_enhancer', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-enhance-face-to-video.mp4'), '--trim-frame-end', '1' ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-enhance-face-to-video.mp4') is True diff --git a/tests/test_cli_face_swapper.py b/tests/test_cli_face_swapper.py index bf0c0983..be68cde5 100644 --- a/tests/test_cli_face_swapper.py +++ b/tests/test_cli_face_swapper.py @@ -26,14 +26,14 @@ def before_each() -> None: def test_swap_face_to_image() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'face_swapper', '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-swap-face-to-image.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_swapper', '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-swap-face-to-image.jpg') ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-swap-face-to-image.jpg') is True def test_swap_face_to_video() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'face_swapper', '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-swap-face-to-video.mp4'), '--trim-frame-end', '1' ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_swapper', '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-swap-face-to-video.mp4'), '--trim-frame-end', '1' ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-swap-face-to-video.mp4') is True diff --git a/tests/test_cli_frame_colorizer.py b/tests/test_cli_frame_colorizer.py index 9796651e..38774c0a 100644 --- a/tests/test_cli_frame_colorizer.py +++ b/tests/test_cli_frame_colorizer.py @@ -27,14 +27,14 @@ def before_each() -> None: def test_colorize_frame_to_image() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'frame_colorizer', '-t', get_test_example_file('target-240p-0sat.jpg'), '-o', get_test_output_file('test_colorize-frame-to-image.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_colorizer', '-t', get_test_example_file('target-240p-0sat.jpg'), '-o', get_test_output_file('test_colorize-frame-to-image.jpg') ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test_colorize-frame-to-image.jpg') is True def test_colorize_frame_to_video() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'frame_colorizer', '-t', get_test_example_file('target-240p-0sat.mp4'), '-o', get_test_output_file('test-colorize-frame-to-video.mp4'), '--trim-frame-end', '1' ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_colorizer', '-t', get_test_example_file('target-240p-0sat.mp4'), '-o', get_test_output_file('test-colorize-frame-to-video.mp4'), '--trim-frame-end', '1' ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-colorize-frame-to-video.mp4') is True diff --git a/tests/test_cli_frame_enhancer.py b/tests/test_cli_frame_enhancer.py index 0530bdae..7892fa2d 100644 --- a/tests/test_cli_frame_enhancer.py +++ b/tests/test_cli_frame_enhancer.py @@ -26,14 +26,14 @@ def before_each() -> None: def test_enhance_frame_to_image() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'frame_enhancer', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-enhance-frame-to-image.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_enhancer', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-enhance-frame-to-image.jpg') ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-enhance-frame-to-image.jpg') is True def test_enhance_frame_to_video() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'frame_enhancer', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-enhance-frame-to-video.mp4'), '--trim-frame-end', '1' ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'frame_enhancer', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-enhance-frame-to-video.mp4'), '--trim-frame-end', '1' ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test-enhance-frame-to-video.mp4') is True diff --git a/tests/test_cli_job_manager.py b/tests/test_cli_job_manager.py index 316488b0..fac789c8 100644 --- a/tests/test_cli_job_manager.py +++ b/tests/test_cli_job_manager.py @@ -30,30 +30,30 @@ def test_job_list() -> None: def test_job_create() -> None: - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-create', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-create', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 0 assert is_test_job_file('test-job-create.json', 'drafted') is True - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-create', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-create', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 1 def test_job_submit() -> None: - commands = [ sys.executable, 'facefusion.py', 'job-submit', 'test-job-submit', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-submit', 'test-job-submit', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 1 - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-submit', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-submit', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) assert subprocess.run(commands).returncode == 1 - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-submit', '-j', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-submit', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-submit', 'test-job-submit', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-submit', 'test-job-submit', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 0 assert is_test_job_file('test-job-submit.json', 'queued') is True @@ -61,25 +61,25 @@ def test_job_submit() -> None: def test_submit_all() -> None: - commands = [ sys.executable, 'facefusion.py', 'job-submit-all', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-submit-all', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 1 - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-submit-all-1', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-submit-all-1', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-submit-all-2', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-submit-all-2', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) assert subprocess.run(commands).returncode == 1 - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-submit-all-1', '-j', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-submit-all-1', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-submit-all-2', '-j', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-submit-all-2', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-submit-all', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-submit-all', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 0 assert is_test_job_file('test-job-submit-all-1.json', 'queued') is True @@ -88,14 +88,14 @@ def test_submit_all() -> None: def test_job_delete() -> None: - commands = [ sys.executable, 'facefusion.py', 'job-delete', 'test-job-delete', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-delete', 'test-job-delete', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 1 - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-delete', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-delete', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-delete', 'test-job-delete', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-delete', 'test-job-delete', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 0 assert is_test_job_file('test-job-delete.json', 'drafted') is False @@ -103,17 +103,17 @@ def test_job_delete() -> None: def test_job_delete_all() -> None: - commands = [ sys.executable, 'facefusion.py', 'job-delete-all', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-delete-all', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 1 - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-delete-all-1', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-delete-all-1', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-delete-all-2', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-delete-all-2', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-delete-all', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-delete-all', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 0 assert is_test_job_file('test-job-delete-all-1.json', 'drafted') is False @@ -122,87 +122,87 @@ def test_job_delete_all() -> None: def test_job_add_step() -> None: - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-add-step', '-j', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-add-step', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] assert subprocess.run(commands).returncode == 1 assert count_step_total('test-job-add-step') == 0 - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-add-step', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-add-step', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-add-step', '-j', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-add-step', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] assert subprocess.run(commands).returncode == 0 assert count_step_total('test-job-add-step') == 1 def test_job_remix() -> None: - commands = [ sys.executable, 'facefusion.py', 'job-remix-step', 'test-job-remix-step', '0', '-j', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-remix-step', 'test-job-remix-step', '0', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] assert subprocess.run(commands).returncode == 1 assert count_step_total('test-job-remix-step') == 0 - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-remix-step', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-remix-step', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-remix-step', '-j', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-remix-step', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-remix-step', 'test-job-remix-step', '0', '-j', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-remix-step', 'test-job-remix-step', '0', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] assert count_step_total('test-job-remix-step') == 1 assert subprocess.run(commands).returncode == 0 assert count_step_total('test-job-remix-step') == 2 - commands = [ sys.executable, 'facefusion.py', 'job-remix-step', 'test-job-remix-step', '-1', '-j', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-remix-step', 'test-job-remix-step', '-1', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] assert subprocess.run(commands).returncode == 0 assert count_step_total('test-job-remix-step') == 3 def test_job_insert_step() -> None: - commands = [ sys.executable, 'facefusion.py', 'job-insert-step', 'test-job-insert-step', '0', '-j', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-insert-step', 'test-job-insert-step', '0', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] assert subprocess.run(commands).returncode == 1 assert count_step_total('test-job-insert-step') == 0 - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-insert-step', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-insert-step', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-insert-step', '-j', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-insert-step', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-insert-step', 'test-job-insert-step', '0', '-j', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-insert-step', 'test-job-insert-step', '0', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] assert count_step_total('test-job-insert-step') == 1 assert subprocess.run(commands).returncode == 0 assert count_step_total('test-job-insert-step') == 2 - commands = [ sys.executable, 'facefusion.py', 'job-insert-step', 'test-job-insert-step', '-1', '-j', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-insert-step', 'test-job-insert-step', '-1', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] assert subprocess.run(commands).returncode == 0 assert count_step_total('test-job-insert-step') == 3 def test_job_remove_step() -> None: - commands = [ sys.executable, 'facefusion.py', 'job-remove-step', 'test-job-remove-step', '0', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-remove-step', 'test-job-remove-step', '0', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 1 - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-remove-step', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-remove-step', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-remove-step', '-j', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-remove-step', '--jobs-path', get_test_jobs_directory(), '-s', get_test_example_file('source.jpg'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-remix-step.jpg') ] subprocess.run(commands) subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-remove-step', 'test-job-remove-step', '0', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-remove-step', 'test-job-remove-step', '0', '--jobs-path', get_test_jobs_directory() ] assert count_step_total('test-job-remove-step') == 2 assert subprocess.run(commands).returncode == 0 assert count_step_total('test-job-remove-step') == 1 - commands = [ sys.executable, 'facefusion.py', 'job-remove-step', 'test-job-remove-step', '-1', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-remove-step', 'test-job-remove-step', '-1', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 0 assert subprocess.run(commands).returncode == 1 diff --git a/tests/test_cli_job_runner.py b/tests/test_cli_job_runner.py index 906ef243..89dbad54 100644 --- a/tests/test_cli_job_runner.py +++ b/tests/test_cli_job_runner.py @@ -26,24 +26,24 @@ def before_each() -> None: def test_job_run() -> None: - commands = [ sys.executable, 'facefusion.py', 'job-run', 'test-job-run', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-run', 'test-job-run', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 1 - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-run', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-run', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-run', '-j', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-run.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-run.jpg') ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-run', 'test-job-run', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-run', 'test-job-run', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 1 - commands = [ sys.executable, 'facefusion.py', 'job-submit', 'test-job-run', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-submit', 'test-job-run', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-run', 'test-job-run', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-run', 'test-job-run', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 0 assert subprocess.run(commands).returncode == 1 @@ -51,33 +51,33 @@ def test_job_run() -> None: def test_job_run_all() -> None: - commands = [ sys.executable, 'facefusion.py', 'job-run-all', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-run-all', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 1 - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-run-all-1', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-run-all-1', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-run-all-2', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-run-all-2', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-run-all-1', '-j', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-run-all-1.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-run-all-1', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-run-all-1.jpg') ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-run-all-2', '-j', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-job-run-all-2.mp4'), '--trim-frame-end', '1' ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-run-all-2', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-job-run-all-2.mp4'), '--trim-frame-end', '1' ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-run-all-2', '-j', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-job-run-all-2.mp4'), '--trim-frame-start', '0', '--trim-frame-end', '1' ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-run-all-2', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-job-run-all-2.mp4'), '--trim-frame-start', '0', '--trim-frame-end', '1' ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-run-all', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-run-all', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 1 - commands = [ sys.executable, 'facefusion.py', 'job-submit-all', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-submit-all', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-run-all', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-run-all', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 0 assert subprocess.run(commands).returncode == 1 @@ -86,24 +86,24 @@ def test_job_run_all() -> None: def test_job_retry() -> None: - commands = [ sys.executable, 'facefusion.py', 'job-retry', 'test-job-retry', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-retry', 'test-job-retry', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 1 - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-retry', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-retry', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-retry', '-j', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-retry.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-retry', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-retry.jpg') ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-retry', 'test-job-retry', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-retry', 'test-job-retry', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 1 set_steps_status('test-job-retry', 'failed') move_job_file('test-job-retry', 'failed') - commands = [ sys.executable, 'facefusion.py', 'job-retry', 'test-job-retry', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-retry', 'test-job-retry', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 0 assert subprocess.run(commands).returncode == 1 @@ -111,26 +111,26 @@ def test_job_retry() -> None: def test_job_retry_all() -> None: - commands = [ sys.executable, 'facefusion.py', 'job-retry-all', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-retry-all', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 1 - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-retry-all-1', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-retry-all-1', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-retry-all-2', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-create', 'test-job-retry-all-2', '--jobs-path', get_test_jobs_directory() ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-retry-all-1', '-j', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-retry-all-1.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-retry-all-1', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test-job-retry-all-1.jpg') ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-retry-all-2', '-j', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-job-retry-all-2.mp4'), '--trim-frame-end', '1' ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-retry-all-2', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-job-retry-all-2.mp4'), '--trim-frame-end', '1' ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-retry-all-2', '-j', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-job-retry-all-2.mp4'), '--trim-frame-start', '0', '--trim-frame-end', '1' ] + commands = [ sys.executable, 'facefusion.py', 'job-add-step', 'test-job-retry-all-2', '--jobs-path', get_test_jobs_directory(), '--processors', 'face_debugger', '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test-job-retry-all-2.mp4'), '--trim-frame-start', '0', '--trim-frame-end', '1' ] subprocess.run(commands) - commands = [ sys.executable, 'facefusion.py', 'job-retry-all', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-retry-all', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 1 @@ -139,7 +139,7 @@ def test_job_retry_all() -> None: move_job_file('test-job-retry-all-1', 'failed') move_job_file('test-job-retry-all-2', 'failed') - commands = [ sys.executable, 'facefusion.py', 'job-retry-all', '-j', get_test_jobs_directory() ] + commands = [ sys.executable, 'facefusion.py', 'job-retry-all', '--jobs-path', get_test_jobs_directory() ] assert subprocess.run(commands).returncode == 0 assert subprocess.run(commands).returncode == 1 diff --git a/tests/test_cli_lip_syncer.py b/tests/test_cli_lip_syncer.py index bd8d078d..c0e5cc33 100644 --- a/tests/test_cli_lip_syncer.py +++ b/tests/test_cli_lip_syncer.py @@ -27,14 +27,14 @@ def before_each() -> None: def test_sync_lip_to_image() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'lip_syncer', '-s', get_test_example_file('source.mp3'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test_sync_lip_to_image.jpg') ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'lip_syncer', '-s', get_test_example_file('source.mp3'), '-t', get_test_example_file('target-240p.jpg'), '-o', get_test_output_file('test_sync_lip_to_image.jpg') ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test_sync_lip_to_image.jpg') is True def test_sync_lip_to_video() -> None: - commands = [ sys.executable, 'facefusion.py', 'headless-run', '-j', get_test_jobs_directory(), '--processors', 'lip_syncer', '-s', get_test_example_file('source.mp3'), '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test_sync_lip_to_video.mp4'), '--trim-frame-end', '1' ] + commands = [ sys.executable, 'facefusion.py', 'headless-run', '--jobs-path', get_test_jobs_directory(), '--processors', 'lip_syncer', '-s', get_test_example_file('source.mp3'), '-t', get_test_example_file('target-240p.mp4'), '-o', get_test_output_file('test_sync_lip_to_video.mp4'), '--trim-frame-end', '1' ] assert subprocess.run(commands).returncode == 0 assert is_test_output_file('test_sync_lip_to_video.mp4') is True diff --git a/tests/test_date_helper.py b/tests/test_date_helper.py index 7ec714f8..e8d0cd0a 100644 --- a/tests/test_date_helper.py +++ b/tests/test_date_helper.py @@ -10,6 +10,6 @@ def get_time_ago(days : int, hours : int, minutes : int) -> datetime: def test_describe_time_ago() -> None: assert describe_time_ago(get_time_ago(0, 0, 0)) == 'just now' - assert describe_time_ago(get_time_ago(0, 0, 5)) == '5 minutes ago' + assert describe_time_ago(get_time_ago(0, 0, 10)) == '10 minutes ago' assert describe_time_ago(get_time_ago(0, 5, 10)) == '5 hours and 10 minutes ago' - assert describe_time_ago(get_time_ago(5, 10, 15)) == '5 days, 10 hours and 15 minutes ago' + assert describe_time_ago(get_time_ago(1, 5, 10)) == '1 days, 5 hours and 10 minutes ago' diff --git a/tests/test_download.py b/tests/test_download.py index 8ca1d368..48698aad 100644 --- a/tests/test_download.py +++ b/tests/test_download.py @@ -1,24 +1,18 @@ -import pytest - -from facefusion.download import conditional_download, get_download_size, is_download_done -from .helper import get_test_example_file, get_test_examples_directory +from facefusion.download import get_static_download_size, ping_static_url, resolve_download_url_by_provider -@pytest.fixture(scope = 'module', autouse = True) -def before_all() -> None: - conditional_download(get_test_examples_directory(), - [ - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-240p.mp4' - ]) +def test_get_static_download_size() -> None: + assert get_static_download_size('https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/fairface.onnx') == 85170772 + assert get_static_download_size('https://huggingface.co/facefusion/models-3.0.0/resolve/main/fairface.onnx') == 85170772 + assert get_static_download_size('invalid') == 0 -def test_get_download_size() -> None: - assert get_download_size('https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-240p.mp4') == 191675 - assert get_download_size('https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-360p.mp4') == 370732 - assert get_download_size('invalid') == 0 +def test_static_ping_url() -> None: + assert ping_static_url('https://github.com') is True + assert ping_static_url('https://huggingface.co') is True + assert ping_static_url('invalid') is False -def test_is_download_done() -> None: - assert is_download_done('https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-240p.mp4', get_test_example_file('target-240p.mp4')) is True - assert is_download_done('https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-240p.mp4', 'invalid') is False - assert is_download_done('invalid', 'invalid') is False +def test_resolve_download_url_by_provider() -> None: + assert resolve_download_url_by_provider('github', 'models-3.0.0', 'fairface.onnx') == 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/fairface.onnx' + assert resolve_download_url_by_provider('huggingface', 'models-3.0.0', 'fairface.onnx') == 'https://huggingface.co/facefusion/models-3.0.0/resolve/main/fairface.onnx' diff --git a/tests/test_execution.py b/tests/test_execution.py index 790b7408..1ce83344 100644 --- a/tests/test_execution.py +++ b/tests/test_execution.py @@ -1,8 +1,4 @@ -from facefusion.execution import create_execution_providers, get_execution_provider_choices, has_execution_provider - - -def test_get_execution_provider_choices() -> None: - assert 'cpu' in get_execution_provider_choices() +from facefusion.execution import create_inference_execution_providers, get_available_execution_providers, has_execution_provider def test_has_execution_provider() -> None: @@ -10,15 +6,18 @@ def test_has_execution_provider() -> None: assert has_execution_provider('openvino') is False -def test_multiple_execution_providers() -> None: +def test_get_available_execution_providers() -> None: + assert 'cpu' in get_available_execution_providers() + + +def test_create_inference_execution_providers() -> None: execution_providers =\ [ ('CUDAExecutionProvider', { - 'device_id': '1', - 'cudnn_conv_algo_search': 'DEFAULT' + 'device_id': '1' }), 'CPUExecutionProvider' ] - assert create_execution_providers('1', [ 'cpu', 'cuda' ]) == execution_providers + assert create_inference_execution_providers('1', [ 'cpu', 'cuda' ]) == execution_providers diff --git a/tests/test_face_analyser.py b/tests/test_face_analyser.py index 7c351861..81b479eb 100644 --- a/tests/test_face_analyser.py +++ b/tests/test_face_analyser.py @@ -21,6 +21,7 @@ def before_all() -> None: subprocess.run([ 'ffmpeg', '-i', get_test_example_file('source.jpg'), '-vf', 'crop=iw*0.6:ih*0.6', get_test_example_file('source-60crop.jpg') ]) state_manager.init_item('execution_device_id', 0) state_manager.init_item('execution_providers', [ 'cpu' ]) + state_manager.init_item('download_providers', [ 'github' ]) state_manager.init_item('face_detector_angles', [ 0 ]) state_manager.init_item('face_detector_model', 'many') state_manager.init_item('face_detector_score', 0.5) diff --git a/tests/test_ffmpeg.py b/tests/test_ffmpeg.py index ef2e22cf..a703ef4a 100644 --- a/tests/test_ffmpeg.py +++ b/tests/test_ffmpeg.py @@ -1,12 +1,13 @@ -import glob import subprocess +import tempfile import pytest from facefusion import process_manager, state_manager from facefusion.download import conditional_download -from facefusion.ffmpeg import concat_video, extract_frames, read_audio_buffer -from facefusion.temp_helper import clear_temp_directory, create_temp_directory, get_temp_directory_path +from facefusion.ffmpeg import concat_video, extract_frames, read_audio_buffer, replace_audio, restore_audio +from facefusion.filesystem import copy_file +from facefusion.temp_helper import clear_temp_directory, create_temp_directory, get_temp_file_path, get_temp_frame_paths from .helper import get_test_example_file, get_test_examples_directory, get_test_output_file, prepare_test_output_directory @@ -23,89 +24,40 @@ def before_all() -> None: subprocess.run([ 'ffmpeg', '-i', get_test_example_file('target-240p.mp4'), '-vf', 'fps=25', get_test_example_file('target-240p-25fps.mp4') ]) subprocess.run([ 'ffmpeg', '-i', get_test_example_file('target-240p.mp4'), '-vf', 'fps=30', get_test_example_file('target-240p-30fps.mp4') ]) subprocess.run([ 'ffmpeg', '-i', get_test_example_file('target-240p.mp4'), '-vf', 'fps=60', get_test_example_file('target-240p-60fps.mp4') ]) - state_manager.init_item('temp_frame_format', 'jpg') + subprocess.run([ 'ffmpeg', '-i', get_test_example_file('source.mp3'), '-i', get_test_example_file('target-240p.mp4'), '-ar', '16000', get_test_example_file('target-240p-16khz.mp4') ]) + subprocess.run([ 'ffmpeg', '-i', get_test_example_file('source.mp3'), '-i', get_test_example_file('target-240p.mp4'), '-ar', '48000', get_test_example_file('target-240p-48khz.mp4') ]) + state_manager.init_item('temp_path', tempfile.gettempdir()) + state_manager.init_item('temp_frame_format', 'png') state_manager.init_item('output_audio_encoder', 'aac') @pytest.fixture(scope = 'function', autouse = True) def before_each() -> None: - state_manager.clear_item('trim_frame_start') - state_manager.clear_item('trim_frame_end') prepare_test_output_directory() def test_extract_frames() -> None: - target_paths =\ + extract_set =\ [ - get_test_example_file('target-240p-25fps.mp4'), - get_test_example_file('target-240p-30fps.mp4'), - get_test_example_file('target-240p-60fps.mp4') + (get_test_example_file('target-240p-25fps.mp4'), 0, 270, 324), + (get_test_example_file('target-240p-25fps.mp4'), 224, 270, 55), + (get_test_example_file('target-240p-25fps.mp4'), 124, 224, 120), + (get_test_example_file('target-240p-25fps.mp4'), 0, 100, 120), + (get_test_example_file('target-240p-30fps.mp4'), 0, 324, 324), + (get_test_example_file('target-240p-30fps.mp4'), 224, 324, 100), + (get_test_example_file('target-240p-30fps.mp4'), 124, 224, 100), + (get_test_example_file('target-240p-30fps.mp4'), 0, 100, 100), + (get_test_example_file('target-240p-60fps.mp4'), 0, 648, 324), + (get_test_example_file('target-240p-60fps.mp4'), 224, 648, 212), + (get_test_example_file('target-240p-60fps.mp4'), 124, 224, 50), + (get_test_example_file('target-240p-60fps.mp4'), 0, 100, 50) ] - for target_path in target_paths: - temp_directory_path = get_temp_directory_path(target_path) + for target_path, trim_frame_start, trim_frame_end, frame_total in extract_set: create_temp_directory(target_path) - assert extract_frames(target_path, '452x240', 30.0) is True - assert len(glob.glob1(temp_directory_path, '*.jpg')) == 324 - - clear_temp_directory(target_path) - - -def test_extract_frames_with_trim_start() -> None: - state_manager.init_item('trim_frame_start', 224) - providers =\ - [ - (get_test_example_file('target-240p-25fps.mp4'), 55), - (get_test_example_file('target-240p-30fps.mp4'), 100), - (get_test_example_file('target-240p-60fps.mp4'), 212) - ] - - for target_path, frame_total in providers: - temp_directory_path = get_temp_directory_path(target_path) - create_temp_directory(target_path) - - assert extract_frames(target_path, '452x240', 30.0) is True - assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total - - clear_temp_directory(target_path) - - -def test_extract_frames_with_trim_start_and_trim_end() -> None: - state_manager.init_item('trim_frame_start', 124) - state_manager.init_item('trim_frame_end', 224) - providers =\ - [ - (get_test_example_file('target-240p-25fps.mp4'), 120), - (get_test_example_file('target-240p-30fps.mp4'), 100), - (get_test_example_file('target-240p-60fps.mp4'), 50) - ] - - for target_path, frame_total in providers: - temp_directory_path = get_temp_directory_path(target_path) - create_temp_directory(target_path) - - assert extract_frames(target_path, '452x240', 30.0) is True - assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total - - clear_temp_directory(target_path) - - -def test_extract_frames_with_trim_end() -> None: - state_manager.init_item('trim_frame_end', 100) - providers =\ - [ - (get_test_example_file('target-240p-25fps.mp4'), 120), - (get_test_example_file('target-240p-30fps.mp4'), 100), - (get_test_example_file('target-240p-60fps.mp4'), 50) - ] - - for target_path, frame_total in providers: - temp_directory_path = get_temp_directory_path(target_path) - create_temp_directory(target_path) - - assert extract_frames(target_path, '426x240', 30.0) is True - assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total + assert extract_frames(target_path, '452x240', 30.0, trim_frame_start, trim_frame_end) is True + assert len(get_temp_frame_paths(target_path)) == frame_total clear_temp_directory(target_path) @@ -125,3 +77,33 @@ def test_read_audio_buffer() -> None: assert isinstance(read_audio_buffer(get_test_example_file('source.mp3'), 1, 1), bytes) assert isinstance(read_audio_buffer(get_test_example_file('source.wav'), 1, 1), bytes) assert read_audio_buffer(get_test_example_file('invalid.mp3'), 1, 1) is None + + +def test_restore_audio() -> None: + target_paths =\ + [ + get_test_example_file('target-240p-16khz.mp4'), + get_test_example_file('target-240p-48khz.mp4') + ] + output_path = get_test_output_file('test-restore-audio.mp4') + + for target_path in target_paths: + create_temp_directory(target_path) + copy_file(target_path, get_temp_file_path(target_path)) + + assert restore_audio(target_path, output_path, 30, 0, 270) is True + + clear_temp_directory(target_path) + + +def test_replace_audio() -> None: + target_path = get_test_example_file('target-240p.mp4') + output_path = get_test_output_file('test-replace-audio.mp4') + + create_temp_directory(target_path) + copy_file(target_path, get_temp_file_path(target_path)) + + assert replace_audio(target_path, get_test_example_file('source.mp3'), output_path) is True + assert replace_audio(target_path, get_test_example_file('source.wav'), output_path) is True + + clear_temp_directory(target_path) diff --git a/tests/test_filesystem.py b/tests/test_filesystem.py index fedac41c..137bbcf6 100644 --- a/tests/test_filesystem.py +++ b/tests/test_filesystem.py @@ -105,8 +105,11 @@ def test_create_directory() -> None: def test_list_directory() -> None: - assert list_directory(get_test_examples_directory()) - assert list_directory(get_test_example_file('source.jpg')) is None + files = list_directory(get_test_examples_directory()) + + for file in files: + assert file.get('path') == get_test_example_file(file.get('name') + file.get('extension')) + assert list_directory('invalid') is None diff --git a/tests/test_inference_pool.py b/tests/test_inference_pool.py index 563f1df0..17492267 100644 --- a/tests/test_inference_pool.py +++ b/tests/test_inference_pool.py @@ -9,9 +9,10 @@ from facefusion.inference_manager import INFERENCE_POOLS, get_inference_pool @pytest.fixture(scope = 'module', autouse = True) def before_all() -> None: - content_analyser.pre_check() state_manager.init_item('execution_device_id', 0) state_manager.init_item('execution_providers', [ 'cpu' ]) + state_manager.init_item('download_providers', [ 'github' ]) + content_analyser.pre_check() def test_get_inference_pool() -> None: diff --git a/tests/test_program_helper.py b/tests/test_program_helper.py index f547fbc6..92b64fb2 100644 --- a/tests/test_program_helper.py +++ b/tests/test_program_helper.py @@ -2,7 +2,7 @@ from argparse import ArgumentParser import pytest -from facefusion.program_helper import find_argument_group, remove_args, validate_actions +from facefusion.program_helper import find_argument_group, validate_actions def test_find_argument_group() -> None: @@ -38,23 +38,3 @@ def test_validate_actions() -> None: action.default = args[action.dest] assert validate_actions(program) is False - - -def test_remove_args() -> None: - program = ArgumentParser() - program.add_argument('--test-1') - program.add_argument('--test-2') - program.add_argument('--test-3') - - actions = [ action.dest for action in program._actions ] - - assert 'test_1' in actions - assert 'test_2' in actions - assert 'test_3' in actions - - program = remove_args(program, [ 'test_1', 'test_2' ]) - actions = [ action.dest for action in program._actions ] - - assert 'test_1' not in actions - assert 'test_2' not in actions - assert 'test_3' in actions diff --git a/tests/test_temp_helper.py b/tests/test_temp_helper.py index 48aad129..6903d2ca 100644 --- a/tests/test_temp_helper.py +++ b/tests/test_temp_helper.py @@ -15,6 +15,7 @@ def before_all() -> None: [ 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-240p.mp4' ]) + state_manager.init_item('temp_path', tempfile.gettempdir()) state_manager.init_item('temp_frame_format', 'png') diff --git a/tests/test_vision.py b/tests/test_vision.py index 7cb69860..d79fb07c 100644 --- a/tests/test_vision.py +++ b/tests/test_vision.py @@ -3,7 +3,7 @@ import subprocess import pytest from facefusion.download import conditional_download -from facefusion.vision import count_video_frame_total, create_image_resolutions, create_video_resolutions, detect_image_resolution, detect_video_fps, detect_video_resolution, get_video_frame, normalize_resolution, pack_resolution, restrict_image_resolution, restrict_video_fps, restrict_video_resolution, unpack_resolution +from facefusion.vision import calc_histogram_difference, count_trim_frame_total, count_video_frame_total, create_image_resolutions, create_video_resolutions, detect_image_resolution, detect_video_duration, detect_video_fps, detect_video_resolution, get_video_frame, match_frame_color, normalize_resolution, pack_resolution, read_image, restrict_image_resolution, restrict_trim_frame, restrict_video_fps, restrict_video_resolution, unpack_resolution from .helper import get_test_example_file, get_test_examples_directory @@ -17,6 +17,7 @@ def before_all() -> None: ]) subprocess.run([ 'ffmpeg', '-i', get_test_example_file('target-240p.mp4'), '-vframes', '1', get_test_example_file('target-240p.jpg') ]) subprocess.run([ 'ffmpeg', '-i', get_test_example_file('target-1080p.mp4'), '-vframes', '1', get_test_example_file('target-1080p.jpg') ]) + subprocess.run([ 'ffmpeg', '-i', get_test_example_file('target-240p.mp4'), '-vframes', '1', '-vf', 'hue=s=0', get_test_example_file('target-240p-0sat.jpg') ]) subprocess.run([ 'ffmpeg', '-i', get_test_example_file('target-240p.mp4'), '-vframes', '1', '-vf', 'transpose=0', get_test_example_file('target-240p-90deg.jpg') ]) subprocess.run([ 'ffmpeg', '-i', get_test_example_file('target-1080p.mp4'), '-vframes', '1', '-vf', 'transpose=0', get_test_example_file('target-1080p-90deg.jpg') ]) subprocess.run([ 'ffmpeg', '-i', get_test_example_file('target-240p.mp4'), '-vf', 'fps=25', get_test_example_file('target-240p-25fps.mp4') ]) @@ -49,7 +50,7 @@ def test_create_image_resolutions() -> None: def test_get_video_frame() -> None: - assert get_video_frame(get_test_example_file('target-240p-25fps.mp4')) is not None + assert hasattr(get_video_frame(get_test_example_file('target-240p-25fps.mp4')), '__array_interface__') assert get_video_frame('invalid') is None @@ -73,6 +74,31 @@ def test_restrict_video_fps() -> None: assert restrict_video_fps(get_test_example_file('target-1080p.mp4'), 60.0) == 25.0 +def test_detect_video_duration() -> None: + assert detect_video_duration(get_test_example_file('target-240p.mp4')) == 10.8 + assert detect_video_duration('invalid') == 0 + + +def test_count_trim_frame_total() -> None: + assert count_trim_frame_total(get_test_example_file('target-240p.mp4'), 0, 200) == 200 + assert count_trim_frame_total(get_test_example_file('target-240p.mp4'), 70, 270) == 200 + assert count_trim_frame_total(get_test_example_file('target-240p.mp4'), -10, None) == 270 + assert count_trim_frame_total(get_test_example_file('target-240p.mp4'), None, -10) == 0 + assert count_trim_frame_total(get_test_example_file('target-240p.mp4'), 280, None) == 0 + assert count_trim_frame_total(get_test_example_file('target-240p.mp4'), None, 280) == 270 + assert count_trim_frame_total(get_test_example_file('target-240p.mp4'), None, None) == 270 + + +def test_restrict_trim_frame() -> None: + assert restrict_trim_frame(get_test_example_file('target-240p.mp4'), 0, 200) == (0, 200) + assert restrict_trim_frame(get_test_example_file('target-240p.mp4'), 70, 270) == (70, 270) + assert restrict_trim_frame(get_test_example_file('target-240p.mp4'), -10, None) == (0, 270) + assert restrict_trim_frame(get_test_example_file('target-240p.mp4'), None, -10) == (0, 0) + assert restrict_trim_frame(get_test_example_file('target-240p.mp4'), 280, None) == (270, 270) + assert restrict_trim_frame(get_test_example_file('target-240p.mp4'), None, 280) == (0, 270) + assert restrict_trim_frame(get_test_example_file('target-240p.mp4'), None, None) == (0, 270) + + def test_detect_video_resolution() -> None: assert detect_video_resolution(get_test_example_file('target-240p.mp4')) == (426, 226) assert detect_video_resolution(get_test_example_file('target-240p-90deg.mp4')) == (226, 426) @@ -109,3 +135,19 @@ def test_pack_resolution() -> None: def test_unpack_resolution() -> None: assert unpack_resolution('0x0') == (0, 0) assert unpack_resolution('2x2') == (2, 2) + + +def test_calc_histogram_difference() -> None: + source_vision_frame = read_image(get_test_example_file('target-240p.jpg')) + target_vision_frame = read_image(get_test_example_file('target-240p-0sat.jpg')) + + assert calc_histogram_difference(source_vision_frame, source_vision_frame) == 1.0 + assert calc_histogram_difference(source_vision_frame, target_vision_frame) < 0.5 + + +def test_match_frame_color() -> None: + source_vision_frame = read_image(get_test_example_file('target-240p.jpg')) + target_vision_frame = read_image(get_test_example_file('target-240p-0sat.jpg')) + output_vision_frame = match_frame_color(source_vision_frame, target_vision_frame) + + assert calc_histogram_difference(source_vision_frame, output_vision_frame) > 0.5