diff --git a/facefusion.ini b/facefusion.ini index 309241a7..ae94a71e 100644 --- a/facefusion.ini +++ b/facefusion.ini @@ -94,10 +94,13 @@ execution_providers = execution_thread_count = execution_queue_count = +[download] +download_providers = +skip_download = + [memory] video_memory_strategy = system_memory_limit = [misc] -skip_download = log_level = diff --git a/facefusion/args.py b/facefusion/args.py index 00a13859..caaaf566 100644 --- a/facefusion/args.py +++ b/facefusion/args.py @@ -106,11 +106,13 @@ def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None: apply_state_item('execution_providers', args.get('execution_providers')) apply_state_item('execution_thread_count', args.get('execution_thread_count')) apply_state_item('execution_queue_count', args.get('execution_queue_count')) + # download + apply_state_item('download_providers', args.get('download_providers')) + apply_state_item('skip_download', args.get('skip_download')) # memory apply_state_item('video_memory_strategy', args.get('video_memory_strategy')) apply_state_item('system_memory_limit', args.get('system_memory_limit')) # misc - apply_state_item('skip_download', args.get('skip_download')) apply_state_item('log_level', args.get('log_level')) # jobs apply_state_item('job_id', args.get('job_id')) diff --git a/facefusion/choices.py b/facefusion/choices.py index 0971eb46..f22b6ab0 100755 --- a/facefusion/choices.py +++ b/facefusion/choices.py @@ -2,7 +2,7 @@ import logging from typing import List, Sequence from facefusion.common_helper import create_float_range, create_int_range -from facefusion.typing import Angle, ExecutionProviderSet, FaceDetectorSet, FaceLandmarkerModel, FaceMaskRegion, FaceMaskType, FaceSelectorMode, FaceSelectorOrder, Gender, JobStatus, LogLevelSet, OutputAudioEncoder, OutputVideoEncoder, OutputVideoPreset, Race, Score, TempFrameFormat, UiWorkflow, VideoMemoryStrategy +from facefusion.typing import Angle, DownloadProviderSet, ExecutionProviderSet, FaceDetectorSet, FaceLandmarkerModel, FaceMaskRegion, FaceMaskType, FaceSelectorMode, FaceSelectorOrder, Gender, JobStatus, LogLevelSet, OutputAudioEncoder, OutputVideoEncoder, OutputVideoPreset, Race, Score, TempFrameFormat, UiWorkflow, VideoMemoryStrategy video_memory_strategies : List[VideoMemoryStrategy] = [ 'strict', 'moderate', 'tolerant' ] @@ -28,14 +28,6 @@ output_video_presets : List[OutputVideoPreset] = [ 'ultrafast', 'superfast', 've image_template_sizes : List[float] = [ 0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 3.5, 4 ] video_template_sizes : List[int] = [ 240, 360, 480, 540, 720, 1080, 1440, 2160, 4320 ] -log_level_set : LogLevelSet =\ -{ - 'error': logging.ERROR, - 'warn': logging.WARNING, - 'info': logging.INFO, - 'debug': logging.DEBUG -} - execution_provider_set : ExecutionProviderSet =\ { 'cpu': 'CPUExecutionProvider', @@ -46,6 +38,19 @@ execution_provider_set : ExecutionProviderSet =\ 'rocm': 'ROCMExecutionProvider', 'tensorrt': 'TensorrtExecutionProvider' } +download_provider_set : DownloadProviderSet =\ +{ + 'github': 'https://github.com/facefusion/facefusion-assets/releases/download/{base_name}/{file_name}', + 'huggingface': 'https://huggingface.co/facefusion/{base_name}/resolve/main/{file_name}' +} + +log_level_set : LogLevelSet =\ +{ + 'error': logging.ERROR, + 'warn': logging.WARNING, + 'info': logging.INFO, + 'debug': logging.DEBUG +} ui_workflows : List[UiWorkflow] = [ 'instant_runner', 'job_runner', 'job_manager' ] job_statuses : List[JobStatus] = [ 'drafted', 'queued', 'completed', 'failed' ] diff --git a/facefusion/core.py b/facefusion/core.py index 72417681..fa5e939a 100755 --- a/facefusion/core.py +++ b/facefusion/core.py @@ -167,8 +167,8 @@ def force_download() -> ErrorCode: processor_modules = get_processors_modules(available_processors) for module in common_modules + processor_modules: - if hasattr(module, 'MODEL_SET'): - for model in module.MODEL_SET.values(): + if hasattr(module, 'create_model_set'): + for model in module.create_model_set().values(): model_hashes = model.get('hashes') model_sources = model.get('sources') diff --git a/facefusion/download.py b/facefusion/download.py index 43c92a0e..6b9c9ccc 100644 --- a/facefusion/download.py +++ b/facefusion/download.py @@ -4,12 +4,13 @@ import ssl import subprocess import urllib.request from functools import lru_cache -from typing import List, Tuple +from typing import List, Optional, Tuple from urllib.parse import urlparse from tqdm import tqdm from facefusion import logger, process_manager, state_manager, wording +from facefusion.choices import download_provider_set from facefusion.common_helper import is_macos from facefusion.filesystem import get_file_size, is_file, remove_file from facefusion.hash_helper import validate_hash @@ -30,8 +31,7 @@ def conditional_download(download_directory_path : str, urls : List[str]) -> Non with tqdm(total = download_size, initial = initial_size, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress: subprocess.Popen([ shutil.which('curl'), '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ]) current_size = initial_size - - progress.set_postfix(file = download_file_name) + progress.set_postfix(download_providers = state_manager.get_item('download_providers'), file_name = download_file_name) while current_size < download_size: if is_file(download_file_path): current_size = get_file_size(download_file_path) @@ -48,12 +48,6 @@ def get_download_size(url : str) -> int: return 0 -def is_download_done(url : str, file_path : str) -> bool: - if is_file(file_path): - return get_download_size(url) == get_file_size(file_path) - return False - - def conditional_download_hashes(download_directory_path : str, hashes : DownloadSet) -> bool: hash_paths = [ hashes.get(hash_key).get('path') for hash_key in hashes.keys() ] @@ -129,3 +123,12 @@ def validate_source_paths(source_paths : List[str]) -> Tuple[List[str], List[str else: invalid_source_paths.append(source_path) return valid_source_paths, invalid_source_paths + + +def resolve_download_url(base_name : str, file_name : str) -> Optional[str]: + download_providers = state_manager.get_item('download_providers') + + for download_provider in download_provider_set: + if download_provider in download_providers: + return download_provider_set[download_provider].format(base_name = base_name, file_name = file_name) + return None diff --git a/facefusion/execution.py b/facefusion/execution.py index 95565224..efc3d9a5 100644 --- a/facefusion/execution.py +++ b/facefusion/execution.py @@ -11,15 +11,11 @@ from facefusion.typing import ExecutionDevice, ExecutionProviderKey, ExecutionPr set_default_logger_severity(3) -def get_execution_provider_choices() -> List[ExecutionProviderKey]: - return list(get_available_execution_provider_set().keys()) - - def has_execution_provider(execution_provider_key : ExecutionProviderKey) -> bool: - return execution_provider_key in get_execution_provider_choices() + return execution_provider_key in get_execution_provider_set().keys() -def get_available_execution_provider_set() -> ExecutionProviderSet: +def get_execution_provider_set() -> ExecutionProviderSet: available_execution_providers = get_available_providers() available_execution_provider_set : ExecutionProviderSet = {} diff --git a/facefusion/processors/core.py b/facefusion/processors/core.py index 7cff5ebf..2b93bac2 100644 --- a/facefusion/processors/core.py +++ b/facefusion/processors/core.py @@ -62,12 +62,7 @@ def clear_processors_modules(processors : List[str]) -> None: def multi_process_frames(source_paths : List[str], temp_frame_paths : List[str], process_frames : ProcessFrames) -> None: queue_payloads = create_queue_payloads(temp_frame_paths) with tqdm(total = len(queue_payloads), desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = state_manager.get_item('log_level') in [ 'warn', 'error' ]) as progress: - progress.set_postfix( - { - 'execution_providers': state_manager.get_item('execution_providers'), - 'execution_thread_count': state_manager.get_item('execution_thread_count'), - 'execution_queue_count': state_manager.get_item('execution_queue_count') - }) + progress.set_postfix(execution_providers = state_manager.get_item('execution_providers')) with ThreadPoolExecutor(max_workers = state_manager.get_item('execution_thread_count')) as executor: futures = [] queue : Queue[QueuePayload] = create_queue(queue_payloads) diff --git a/facefusion/processors/modules/age_modifier.py b/facefusion/processors/modules/age_modifier.py index 8a4d080a..faa27d55 100755 --- a/facefusion/processors/modules/age_modifier.py +++ b/facefusion/processors/modules/age_modifier.py @@ -10,7 +10,7 @@ import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, wording from facefusion.common_helper import create_int_metavar -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.face_analyser import get_many_faces, get_one_face from facefusion.face_helper import merge_matrix, paste_back, scale_face_landmark_5, warp_face_by_face_landmark_5 from facefusion.face_masker import create_occlusion_mask, create_static_box_mask @@ -24,39 +24,41 @@ from facefusion.thread_helper import thread_semaphore from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import match_frame_color, read_image, read_static_image, write_image -MODEL_SET : ModelSet =\ -{ - 'styleganex_age': - { - 'hashes': - { - 'age_modifier': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/styleganex_age.hash', - 'path': resolve_relative_path('../.assets/models/styleganex_age.hash') - } - }, - 'sources': - { - 'age_modifier': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/styleganex_age.onnx', - 'path': resolve_relative_path('../.assets/models/styleganex_age.onnx') +def create_model_set() -> ModelSet: + return\ + { + 'styleganex_age': + { + 'hashes': + { + 'age_modifier': + { + 'url': resolve_download_url('models-3.1.0', 'styleganex_age.hash'), + 'path': resolve_relative_path('../.assets/models/styleganex_age.hash') + } + }, + 'sources': + { + 'age_modifier': + { + 'url': resolve_download_url('models-3.1.0', 'styleganex_age.onnx'), + 'path': resolve_relative_path('../.assets/models/styleganex_age.onnx') + + } + }, + 'templates': + { + 'target': 'ffhq_512', + 'target_with_background': 'styleganex_384' + }, + 'sizes': + { + 'target': (256, 256), + 'target_with_background': (384, 384) } - }, - 'templates': - { - 'target': 'ffhq_512', - 'target_with_background': 'styleganex_384' - }, - 'sizes': - { - 'target': (256, 256), - 'target_with_background': (384, 384) } } -} def get_inference_pool() -> InferencePool: @@ -72,7 +74,7 @@ def clear_inference_pool() -> None: def get_model_options() -> ModelOptions: age_modifier_model = state_manager.get_item('age_modifier_model') - return MODEL_SET.get(age_modifier_model) + return create_model_set().get(age_modifier_model) def register_args(program : ArgumentParser) -> None: diff --git a/facefusion/processors/modules/expression_restorer.py b/facefusion/processors/modules/expression_restorer.py index 0eaa4baa..1dd4bdf2 100755 --- a/facefusion/processors/modules/expression_restorer.py +++ b/facefusion/processors/modules/expression_restorer.py @@ -9,7 +9,7 @@ import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, wording from facefusion.common_helper import create_int_metavar -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.face_analyser import get_many_faces, get_one_face from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5 from facefusion.face_masker import create_occlusion_mask, create_static_box_mask @@ -25,50 +25,52 @@ from facefusion.thread_helper import conditional_thread_semaphore, thread_semaph from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import get_video_frame, read_image, read_static_image, write_image -MODEL_SET : ModelSet =\ -{ - 'live_portrait': + +def create_model_set() -> ModelSet: + return\ { - 'hashes': + 'live_portrait': { - 'feature_extractor': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_feature_extractor.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.hash') + 'feature_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.hash') + }, + 'motion_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.hash') + }, + 'generator': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_generator.hash') + } }, - 'motion_extractor': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_motion_extractor.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.hash') + 'feature_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.onnx') + }, + 'motion_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.onnx') + }, + 'generator': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx') + } }, - 'generator': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_generator.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_generator.hash') - } - }, - 'sources': - { - 'feature_extractor': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_feature_extractor.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.onnx') - }, - 'motion_extractor': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_motion_extractor.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.onnx') - }, - 'generator': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_generator.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx') - } - }, - 'template': 'arcface_128_v2', - 'size': (512, 512) + 'template': 'arcface_128_v2', + 'size': (512, 512) + } } -} def get_inference_pool() -> InferencePool: @@ -83,7 +85,7 @@ def clear_inference_pool() -> None: def get_model_options() -> ModelOptions: expression_restorer_model = state_manager.get_item('expression_restorer_model') - return MODEL_SET.get(expression_restorer_model) + return create_model_set().get(expression_restorer_model) def register_args(program : ArgumentParser) -> None: diff --git a/facefusion/processors/modules/face_editor.py b/facefusion/processors/modules/face_editor.py index 22682bec..8234cd1c 100755 --- a/facefusion/processors/modules/face_editor.py +++ b/facefusion/processors/modules/face_editor.py @@ -9,7 +9,7 @@ import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, wording from facefusion.common_helper import create_float_metavar -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.face_analyser import get_many_faces, get_one_face from facefusion.face_helper import paste_back, scale_face_landmark_5, warp_face_by_face_landmark_5 from facefusion.face_masker import create_static_box_mask @@ -24,80 +24,82 @@ from facefusion.thread_helper import conditional_thread_semaphore, thread_semaph from facefusion.typing import ApplyStateItem, Args, Face, FaceLandmark68, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import read_image, read_static_image, write_image -MODEL_SET : ModelSet =\ -{ - 'live_portrait': + +def create_model_set() -> ModelSet: + return\ { - 'hashes': + 'live_portrait': { - 'feature_extractor': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_feature_extractor.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.hash') + 'feature_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.hash') + }, + 'motion_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.hash') + }, + 'eye_retargeter': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_eye_retargeter.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.hash') + }, + 'lip_retargeter': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_lip_retargeter.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.hash') + }, + 'stitcher': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_stitcher.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.hash') + }, + 'generator': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.hash'), + 'path': resolve_relative_path('../.assets/models/live_portrait_generator.hash') + } }, - 'motion_extractor': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_motion_extractor.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.hash') + 'feature_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_feature_extractor.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.onnx') + }, + 'motion_extractor': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_motion_extractor.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.onnx') + }, + 'eye_retargeter': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_eye_retargeter.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.onnx') + }, + 'lip_retargeter': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_lip_retargeter.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.onnx') + }, + 'stitcher': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_stitcher.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.onnx') + }, + 'generator': + { + 'url': resolve_download_url('models-3.0.0', 'live_portrait_generator.onnx'), + 'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx') + } }, - 'eye_retargeter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_eye_retargeter.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.hash') - }, - 'lip_retargeter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_lip_retargeter.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.hash') - }, - 'stitcher': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_stitcher.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.hash') - }, - 'generator': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_generator.hash', - 'path': resolve_relative_path('../.assets/models/live_portrait_generator.hash') - } - }, - 'sources': - { - 'feature_extractor': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_feature_extractor.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.onnx') - }, - 'motion_extractor': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_motion_extractor.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.onnx') - }, - 'eye_retargeter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_eye_retargeter.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.onnx') - }, - 'lip_retargeter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_lip_retargeter.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.onnx') - }, - 'stitcher': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_stitcher.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.onnx') - }, - 'generator': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/live_portrait_generator.onnx', - 'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx') - } - }, - 'template': 'ffhq_512', - 'size': (512, 512) + 'template': 'ffhq_512', + 'size': (512, 512) + } } -} def get_inference_pool() -> InferencePool: @@ -113,7 +115,7 @@ def clear_inference_pool() -> None: def get_model_options() -> ModelOptions: face_editor_model = state_manager.get_item('face_editor_model') - return MODEL_SET.get(face_editor_model) + return create_model_set().get(face_editor_model) def register_args(program : ArgumentParser) -> None: diff --git a/facefusion/processors/modules/face_enhancer.py b/facefusion/processors/modules/face_enhancer.py index da8d6498..768968fb 100755 --- a/facefusion/processors/modules/face_enhancer.py +++ b/facefusion/processors/modules/face_enhancer.py @@ -9,7 +9,7 @@ import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, wording from facefusion.common_helper import create_int_metavar -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.face_analyser import get_many_faces, get_one_face from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5 from facefusion.face_masker import create_occlusion_mask, create_static_box_mask @@ -23,198 +23,200 @@ from facefusion.thread_helper import thread_semaphore from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import read_image, read_static_image, write_image -MODEL_SET : ModelSet =\ -{ - 'codeformer': + +def create_model_set() -> ModelSet: + return\ { - 'hashes': + 'codeformer': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/codeformer.hash', - 'path': resolve_relative_path('../.assets/models/codeformer.hash') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'codeformer.hash'), + 'path': resolve_relative_path('../.assets/models/codeformer.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'codeformer.onnx'), + 'path': resolve_relative_path('../.assets/models/codeformer.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (512, 512) }, - 'sources': + 'gfpgan_1.2': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/codeformer.onnx', - 'path': resolve_relative_path('../.assets/models/codeformer.onnx') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gfpgan_1.2.hash'), + 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gfpgan_1.2.onnx'), + 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (512, 512) }, - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'gfpgan_1.2': - { - 'hashes': + 'gfpgan_1.3': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gfpgan_1.2.hash', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.hash') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gfpgan_1.3.hash'), + 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gfpgan_1.3.onnx'), + 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (512, 512) }, - 'sources': + 'gfpgan_1.4': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gfpgan_1.2.onnx', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gfpgan_1.4.hash'), + 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gfpgan_1.4.onnx'), + 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (512, 512) }, - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'gfpgan_1.3': - { - 'hashes': + 'gpen_bfr_256': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gfpgan_1.3.hash', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.hash') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_256.hash'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_256.onnx'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx') + } + }, + 'template': 'arcface_128_v2', + 'size': (256, 256) }, - 'sources': + 'gpen_bfr_512': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gfpgan_1.3.onnx', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_512.hash'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_512.onnx'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (512, 512) }, - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'gfpgan_1.4': - { - 'hashes': + 'gpen_bfr_1024': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gfpgan_1.4.hash', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.hash') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_1024.hash'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_1024.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_1024.onnx'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_1024.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (1024, 1024) }, - 'sources': + 'gpen_bfr_2048': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gfpgan_1.4.onnx', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx') - } + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_2048.hash'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_2048.hash') + } + }, + 'sources': + { + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'gpen_bfr_2048.onnx'), + 'path': resolve_relative_path('../.assets/models/gpen_bfr_2048.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (2048, 2048) }, - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'gpen_bfr_256': - { - 'hashes': + 'restoreformer_plus_plus': { - 'face_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_256.hash', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.hash') - } - }, - 'sources': - { - 'face_enhancer': + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'restoreformer_plus_plus.hash'), + 'path': resolve_relative_path('../.assets/models/restoreformer_plus_plus.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_256.onnx', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx') - } - }, - 'template': 'arcface_128_v2', - 'size': (256, 256) - }, - 'gpen_bfr_512': - { - 'hashes': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_512.hash', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.hash') - } - }, - 'sources': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_512.onnx', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx') - } - }, - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'gpen_bfr_1024': - { - 'hashes': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_1024.hash', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_1024.hash') - } - }, - 'sources': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_1024.onnx', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_1024.onnx') - } - }, - 'template': 'ffhq_512', - 'size': (1024, 1024) - }, - 'gpen_bfr_2048': - { - 'hashes': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_2048.hash', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_2048.hash') - } - }, - 'sources': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/gpen_bfr_2048.onnx', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_2048.onnx') - } - }, - 'template': 'ffhq_512', - 'size': (2048, 2048) - }, - 'restoreformer_plus_plus': - { - 'hashes': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/restoreformer_plus_plus.hash', - 'path': resolve_relative_path('../.assets/models/restoreformer_plus_plus.hash') - } - }, - 'sources': - { - 'face_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/restoreformer_plus_plus.onnx', - 'path': resolve_relative_path('../.assets/models/restoreformer_plus_plus.onnx') - } - }, - 'template': 'ffhq_512', - 'size': (512, 512) + 'face_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'restoreformer_plus_plus.onnx'), + 'path': resolve_relative_path('../.assets/models/restoreformer_plus_plus.onnx') + } + }, + 'template': 'ffhq_512', + 'size': (512, 512) + } } -} def get_inference_pool() -> InferencePool: @@ -230,7 +232,7 @@ def clear_inference_pool() -> None: def get_model_options() -> ModelOptions: face_enhancer_model = state_manager.get_item('face_enhancer_model') - return MODEL_SET.get(face_enhancer_model) + return create_model_set().get(face_enhancer_model) def register_args(program : ArgumentParser) -> None: diff --git a/facefusion/processors/modules/face_swapper.py b/facefusion/processors/modules/face_swapper.py index 878877c9..102af419 100755 --- a/facefusion/processors/modules/face_swapper.py +++ b/facefusion/processors/modules/face_swapper.py @@ -8,7 +8,7 @@ import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, wording from facefusion.common_helper import get_first -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.execution import has_execution_provider from facefusion.face_analyser import get_average_face, get_many_faces, get_one_face from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5 @@ -25,309 +25,311 @@ from facefusion.thread_helper import conditional_thread_semaphore from facefusion.typing import ApplyStateItem, Args, Embedding, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import read_image, read_static_image, read_static_images, unpack_resolution, write_image -MODEL_SET : ModelSet =\ -{ - 'blendswap_256': + +def create_model_set() -> ModelSet: + return\ { - 'hashes': + 'blendswap_256': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/blendswap_256.hash', - 'path': resolve_relative_path('../.assets/models/blendswap_256.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/blendswap_256.onnx', - 'path': resolve_relative_path('../.assets/models/blendswap_256.onnx') - } - }, - 'type': 'blendswap', - 'template': 'ffhq_512', - 'size': (256, 256), - 'mean': [ 0.0, 0.0, 0.0 ], - 'standard_deviation': [ 1.0, 1.0, 1.0 ] - }, - 'ghost_1_256': - { - 'hashes': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ghost_1_256.hash', - 'path': resolve_relative_path('../.assets/models/ghost_1_256.hash') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'blendswap_256.hash'), + 'path': resolve_relative_path('../.assets/models/blendswap_256.hash') + } }, - 'embedding_converter': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_ghost.hash', - 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ghost_1_256.onnx', - 'path': resolve_relative_path('../.assets/models/ghost_1_256.onnx') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'blendswap_256.onnx'), + 'path': resolve_relative_path('../.assets/models/blendswap_256.onnx') + } }, - 'embedding_converter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_ghost.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') - } + 'type': 'blendswap', + 'template': 'ffhq_512', + 'size': (256, 256), + 'mean': [ 0.0, 0.0, 0.0 ], + 'standard_deviation': [ 1.0, 1.0, 1.0 ] }, - 'type': 'ghost', - 'template': 'arcface_112_v1', - 'size': (256, 256), - 'mean': [ 0.5, 0.5, 0.5 ], - 'standard_deviation': [ 0.5, 0.5, 0.5 ] - }, - 'ghost_2_256': - { - 'hashes': + 'ghost_1_256': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ghost_2_256.hash', - 'path': resolve_relative_path('../.assets/models/ghost_2_256.hash') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'ghost_1_256.hash'), + 'path': resolve_relative_path('../.assets/models/ghost_1_256.hash') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_ghost.hash'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') + } }, - 'embedding_converter': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_ghost.hash', - 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ghost_2_256.onnx', - 'path': resolve_relative_path('../.assets/models/ghost_2_256.onnx') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'ghost_1_256.onnx'), + 'path': resolve_relative_path('../.assets/models/ghost_1_256.onnx') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_ghost.onnx'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') + } }, - 'embedding_converter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_ghost.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') - } + 'type': 'ghost', + 'template': 'arcface_112_v1', + 'size': (256, 256), + 'mean': [ 0.5, 0.5, 0.5 ], + 'standard_deviation': [ 0.5, 0.5, 0.5 ] }, - 'type': 'ghost', - 'template': 'arcface_112_v1', - 'size': (256, 256), - 'mean': [ 0.5, 0.5, 0.5 ], - 'standard_deviation': [ 0.5, 0.5, 0.5 ] - }, - 'ghost_3_256': - { - 'hashes': + 'ghost_2_256': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ghost_3_256.hash', - 'path': resolve_relative_path('../.assets/models/ghost_3_256.hash') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'ghost_2_256.hash'), + 'path': resolve_relative_path('../.assets/models/ghost_2_256.hash') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_ghost.hash'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') + } }, - 'embedding_converter': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_ghost.hash', - 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ghost_3_256.onnx', - 'path': resolve_relative_path('../.assets/models/ghost_3_256.onnx') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'ghost_2_256.onnx'), + 'path': resolve_relative_path('../.assets/models/ghost_2_256.onnx') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_ghost.onnx'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') + } }, - 'embedding_converter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_ghost.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') - } + 'type': 'ghost', + 'template': 'arcface_112_v1', + 'size': (256, 256), + 'mean': [ 0.5, 0.5, 0.5 ], + 'standard_deviation': [ 0.5, 0.5, 0.5 ] }, - 'type': 'ghost', - 'template': 'arcface_112_v1', - 'size': (256, 256), - 'mean': [ 0.5, 0.5, 0.5 ], - 'standard_deviation': [ 0.5, 0.5, 0.5 ] - }, - 'hififace_unofficial_256': - { - 'hashes': + 'ghost_3_256': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/hififace_unofficial_256.hash', - 'path': resolve_relative_path('../.assets/models/hififace_unofficial_256.hash') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'ghost_3_256.hash'), + 'path': resolve_relative_path('../.assets/models/ghost_3_256.hash') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_ghost.hash'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') + } }, - 'embedding_converter': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/arcface_converter_hififace.hash', - 'path': resolve_relative_path('../.assets/models/arcface_converter_hififace.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/hififace_unofficial_256.onnx', - 'path': resolve_relative_path('../.assets/models/hififace_unofficial_256.onnx') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'ghost_3_256.onnx'), + 'path': resolve_relative_path('../.assets/models/ghost_3_256.onnx') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_ghost.onnx'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') + } }, - 'embedding_converter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/arcface_converter_hififace.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_converter_hififace.onnx') - } + 'type': 'ghost', + 'template': 'arcface_112_v1', + 'size': (256, 256), + 'mean': [ 0.5, 0.5, 0.5 ], + 'standard_deviation': [ 0.5, 0.5, 0.5 ] }, - 'type': 'hififace', - 'template': 'mtcnn_512', - 'size': (256, 256), - 'mean': [ 0.5, 0.5, 0.5 ], - 'standard_deviation': [ 0.5, 0.5, 0.5 ] - }, - 'inswapper_128': - { - 'hashes': + 'hififace_unofficial_256': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/inswapper_128.hash', - 'path': resolve_relative_path('../.assets/models/inswapper_128.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/inswapper_128.onnx', - 'path': resolve_relative_path('../.assets/models/inswapper_128.onnx') - } - }, - 'type': 'inswapper', - 'template': 'arcface_128_v2', - 'size': (128, 128), - 'mean': [ 0.0, 0.0, 0.0 ], - 'standard_deviation': [ 1.0, 1.0, 1.0 ] - }, - 'inswapper_128_fp16': - { - 'hashes': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/inswapper_128_fp16.hash', - 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/inswapper_128_fp16.onnx', - 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx') - } - }, - 'type': 'inswapper', - 'template': 'arcface_128_v2', - 'size': (128, 128), - 'mean': [ 0.0, 0.0, 0.0 ], - 'standard_deviation': [ 1.0, 1.0, 1.0 ] - }, - 'simswap_256': - { - 'hashes': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/simswap_256.hash', - 'path': resolve_relative_path('../.assets/models/simswap_256.hash') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'hififace_unofficial_256.hash'), + 'path': resolve_relative_path('../.assets/models/hififace_unofficial_256.hash') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_hififace.hash'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_hififace.hash') + } }, - 'embedding_converter': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_simswap.hash', - 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/simswap_256.onnx', - 'path': resolve_relative_path('../.assets/models/simswap_256.onnx') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'hififace_unofficial_256.onnx'), + 'path': resolve_relative_path('../.assets/models/hififace_unofficial_256.onnx') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_hififace.onnx'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_hififace.onnx') + } }, - 'embedding_converter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_simswap.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.onnx') - } + 'type': 'hififace', + 'template': 'mtcnn_512', + 'size': (256, 256), + 'mean': [ 0.5, 0.5, 0.5 ], + 'standard_deviation': [ 0.5, 0.5, 0.5 ] }, - 'type': 'simswap', - 'template': 'arcface_112_v1', - 'size': (256, 256), - 'mean': [ 0.485, 0.456, 0.406 ], - 'standard_deviation': [ 0.229, 0.224, 0.225 ] - }, - 'simswap_unofficial_512': - { - 'hashes': + 'inswapper_128': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/simswap_unofficial_512.hash', - 'path': resolve_relative_path('../.assets/models/simswap_unofficial_512.hash') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'inswapper_128.hash'), + 'path': resolve_relative_path('../.assets/models/inswapper_128.hash') + } }, - 'embedding_converter': + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_simswap.hash', - 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.hash') - } - }, - 'sources': - { - 'face_swapper': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/simswap_unofficial_512.onnx', - 'path': resolve_relative_path('../.assets/models/simswap_unofficial_512.onnx') + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'inswapper_128.onnx'), + 'path': resolve_relative_path('../.assets/models/inswapper_128.onnx') + } }, - 'embedding_converter': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/arcface_converter_simswap.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.onnx') - } + 'type': 'inswapper', + 'template': 'arcface_128_v2', + 'size': (128, 128), + 'mean': [ 0.0, 0.0, 0.0 ], + 'standard_deviation': [ 1.0, 1.0, 1.0 ] }, - 'type': 'simswap', - 'template': 'arcface_112_v1', - 'size': (512, 512), - 'mean': [ 0.0, 0.0, 0.0 ], - 'standard_deviation': [ 1.0, 1.0, 1.0 ] - }, - 'uniface_256': - { - 'hashes': + 'inswapper_128_fp16': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/uniface_256.hash', - 'path': resolve_relative_path('../.assets/models/uniface_256.hash') - } + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'inswapper_128_fp16.hash'), + 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.hash') + } + }, + 'sources': + { + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'inswapper_128_fp16.onnx'), + 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx') + } + }, + 'type': 'inswapper', + 'template': 'arcface_128_v2', + 'size': (128, 128), + 'mean': [ 0.0, 0.0, 0.0 ], + 'standard_deviation': [ 1.0, 1.0, 1.0 ] }, - 'sources': + 'simswap_256': { - 'face_swapper': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/uniface_256.onnx', - 'path': resolve_relative_path('../.assets/models/uniface_256.onnx') - } + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'simswap_256.hash'), + 'path': resolve_relative_path('../.assets/models/simswap_256.hash') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_simswap.hash'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.hash') + } + }, + 'sources': + { + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'simswap_256.onnx'), + 'path': resolve_relative_path('../.assets/models/simswap_256.onnx') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_simswap.onnx'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.onnx') + } + }, + 'type': 'simswap', + 'template': 'arcface_112_v1', + 'size': (256, 256), + 'mean': [ 0.485, 0.456, 0.406 ], + 'standard_deviation': [ 0.229, 0.224, 0.225 ] }, - 'type': 'uniface', - 'template': 'ffhq_512', - 'size': (256, 256), - 'mean': [ 0.5, 0.5, 0.5 ], - 'standard_deviation': [ 0.5, 0.5, 0.5 ] + 'simswap_unofficial_512': + { + 'hashes': + { + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'simswap_unofficial_512.hash'), + 'path': resolve_relative_path('../.assets/models/simswap_unofficial_512.hash') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_simswap.hash'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.hash') + } + }, + 'sources': + { + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'simswap_unofficial_512.onnx'), + 'path': resolve_relative_path('../.assets/models/simswap_unofficial_512.onnx') + }, + 'embedding_converter': + { + 'url': resolve_download_url('models-3.0.0', 'arcface_converter_simswap.onnx'), + 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.onnx') + } + }, + 'type': 'simswap', + 'template': 'arcface_112_v1', + 'size': (512, 512), + 'mean': [ 0.0, 0.0, 0.0 ], + 'standard_deviation': [ 1.0, 1.0, 1.0 ] + }, + 'uniface_256': + { + 'hashes': + { + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'uniface_256.hash'), + 'path': resolve_relative_path('../.assets/models/uniface_256.hash') + } + }, + 'sources': + { + 'face_swapper': + { + 'url': resolve_download_url('models-3.0.0', 'uniface_256.onnx'), + 'path': resolve_relative_path('../.assets/models/uniface_256.onnx') + } + }, + 'type': 'uniface', + 'template': 'ffhq_512', + 'size': (256, 256), + 'mean': [ 0.5, 0.5, 0.5 ], + 'standard_deviation': [ 0.5, 0.5, 0.5 ] + } } -} def get_inference_pool() -> InferencePool: @@ -344,7 +346,7 @@ def clear_inference_pool() -> None: def get_model_options() -> ModelOptions: face_swapper_model = state_manager.get_item('face_swapper_model') face_swapper_model = 'inswapper_128' if has_execution_provider('coreml') and face_swapper_model == 'inswapper_128_fp16' else face_swapper_model - return MODEL_SET.get(face_swapper_model) + return create_model_set().get(face_swapper_model) def register_args(program : ArgumentParser) -> None: diff --git a/facefusion/processors/modules/frame_colorizer.py b/facefusion/processors/modules/frame_colorizer.py index 256d8594..d34e37bd 100644 --- a/facefusion/processors/modules/frame_colorizer.py +++ b/facefusion/processors/modules/frame_colorizer.py @@ -9,7 +9,7 @@ import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, inference_manager, logger, process_manager, state_manager, wording from facefusion.common_helper import create_int_metavar -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension from facefusion.processors import choices as processors_choices from facefusion.processors.typing import FrameColorizerInputs @@ -18,109 +18,111 @@ from facefusion.thread_helper import thread_semaphore from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import read_image, read_static_image, unpack_resolution, write_image -MODEL_SET : ModelSet =\ -{ - 'ddcolor': + +def create_model_set() -> ModelSet: + return\ { - 'hashes': + 'ddcolor': { - 'frame_colorizer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ddcolor.hash', - 'path': resolve_relative_path('../.assets/models/ddcolor.hash') - } + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'ddcolor.hash'), + 'path': resolve_relative_path('../.assets/models/ddcolor.hash') + } + }, + 'sources': + { + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'ddcolor.onnx'), + 'path': resolve_relative_path('../.assets/models/ddcolor.onnx') + } + }, + 'type': 'ddcolor' }, - 'sources': + 'ddcolor_artistic': { - 'frame_colorizer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ddcolor.onnx', - 'path': resolve_relative_path('../.assets/models/ddcolor.onnx') - } + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'ddcolor_artistic.hash'), + 'path': resolve_relative_path('../.assets/models/ddcolor_artistic.hash') + } + }, + 'sources': + { + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'ddcolor_artistic.onnx'), + 'path': resolve_relative_path('../.assets/models/ddcolor_artistic.onnx') + } + }, + 'type': 'ddcolor' }, - 'type': 'ddcolor' - }, - 'ddcolor_artistic': - { - 'hashes': + 'deoldify': { - 'frame_colorizer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ddcolor_artistic.hash', - 'path': resolve_relative_path('../.assets/models/ddcolor_artistic.hash') - } + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'deoldify.hash'), + 'path': resolve_relative_path('../.assets/models/deoldify.hash') + } + }, + 'sources': + { + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'deoldify.onnx'), + 'path': resolve_relative_path('../.assets/models/deoldify.onnx') + } + }, + 'type': 'deoldify' }, - 'sources': + 'deoldify_artistic': { - 'frame_colorizer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ddcolor_artistic.onnx', - 'path': resolve_relative_path('../.assets/models/ddcolor_artistic.onnx') - } + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'deoldify_artistic.hash'), + 'path': resolve_relative_path('../.assets/models/deoldify_artistic.hash') + } + }, + 'sources': + { + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'deoldify_artistic.onnx'), + 'path': resolve_relative_path('../.assets/models/deoldify_artistic.onnx') + } + }, + 'type': 'deoldify' }, - 'type': 'ddcolor' - }, - 'deoldify': - { - 'hashes': + 'deoldify_stable': { - 'frame_colorizer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/deoldify.hash', - 'path': resolve_relative_path('../.assets/models/deoldify.hash') - } - }, - 'sources': - { - 'frame_colorizer': + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'deoldify_stable.hash'), + 'path': resolve_relative_path('../.assets/models/deoldify_stable.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/deoldify.onnx', - 'path': resolve_relative_path('../.assets/models/deoldify.onnx') - } - }, - 'type': 'deoldify' - }, - 'deoldify_artistic': - { - 'hashes': - { - 'frame_colorizer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/deoldify_artistic.hash', - 'path': resolve_relative_path('../.assets/models/deoldify_artistic.hash') - } - }, - 'sources': - { - 'frame_colorizer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/deoldify_artistic.onnx', - 'path': resolve_relative_path('../.assets/models/deoldify_artistic.onnx') - } - }, - 'type': 'deoldify' - }, - 'deoldify_stable': - { - 'hashes': - { - 'frame_colorizer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/deoldify_stable.hash', - 'path': resolve_relative_path('../.assets/models/deoldify_stable.hash') - } - }, - 'sources': - { - 'frame_colorizer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/deoldify_stable.onnx', - 'path': resolve_relative_path('../.assets/models/deoldify_stable.onnx') - } - }, - 'type': 'deoldify' + 'frame_colorizer': + { + 'url': resolve_download_url('models-3.0.0', 'deoldify_stable.onnx'), + 'path': resolve_relative_path('../.assets/models/deoldify_stable.onnx') + } + }, + 'type': 'deoldify' + } } -} def get_inference_pool() -> InferencePool: @@ -136,7 +138,7 @@ def clear_inference_pool() -> None: def get_model_options() -> ModelOptions: frame_colorizer_model = state_manager.get_item('frame_colorizer_model') - return MODEL_SET.get(frame_colorizer_model) + return create_model_set().get(frame_colorizer_model) def register_args(program : ArgumentParser) -> None: diff --git a/facefusion/processors/modules/frame_enhancer.py b/facefusion/processors/modules/frame_enhancer.py index df6472e8..7281bb1e 100644 --- a/facefusion/processors/modules/frame_enhancer.py +++ b/facefusion/processors/modules/frame_enhancer.py @@ -9,7 +9,7 @@ import facefusion.jobs.job_store import facefusion.processors.core as processors from facefusion import config, content_analyser, inference_manager, logger, process_manager, state_manager, wording from facefusion.common_helper import create_int_metavar -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension from facefusion.processors import choices as processors_choices from facefusion.processors.typing import FrameEnhancerInputs @@ -18,366 +18,368 @@ from facefusion.thread_helper import conditional_thread_semaphore from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import create_tile_frames, merge_tile_frames, read_image, read_static_image, write_image -MODEL_SET : ModelSet =\ -{ - 'clear_reality_x4': + +def create_model_set() -> ModelSet: + return\ { - 'hashes': + 'clear_reality_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/clear_reality_x4.hash', - 'path': resolve_relative_path('../.assets/models/clear_reality_x4.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'clear_reality_x4.hash'), + 'path': resolve_relative_path('../.assets/models/clear_reality_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'clear_reality_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/clear_reality_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'sources': + 'lsdir_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/clear_reality_x4.onnx', - 'path': resolve_relative_path('../.assets/models/clear_reality_x4.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'lsdir_x4.hash'), + 'path': resolve_relative_path('../.assets/models/lsdir_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'lsdir_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/lsdir_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'size': (128, 8, 4), - 'scale': 4 - }, - 'lsdir_x4': - { - 'hashes': + 'nomos8k_sc_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/lsdir_x4.hash', - 'path': resolve_relative_path('../.assets/models/lsdir_x4.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'nomos8k_sc_x4.hash'), + 'path': resolve_relative_path('../.assets/models/nomos8k_sc_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'nomos8k_sc_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/nomos8k_sc_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'sources': + 'real_esrgan_x2': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/lsdir_x4.onnx', - 'path': resolve_relative_path('../.assets/models/lsdir_x4.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x2.hash'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x2.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x2.onnx'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x2.onnx') + } + }, + 'size': (256, 16, 8), + 'scale': 2 }, - 'size': (128, 8, 4), - 'scale': 4 - }, - 'nomos8k_sc_x4': - { - 'hashes': + 'real_esrgan_x2_fp16': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/nomos8k_sc_x4.hash', - 'path': resolve_relative_path('../.assets/models/nomos8k_sc_x4.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x2_fp16.hash'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x2_fp16.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x2_fp16.onnx'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x2_fp16.onnx') + } + }, + 'size': (256, 16, 8), + 'scale': 2 }, - 'sources': + 'real_esrgan_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/nomos8k_sc_x4.onnx', - 'path': resolve_relative_path('../.assets/models/nomos8k_sc_x4.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x4.hash'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x4.onnx') + } + }, + 'size': (256, 16, 8), + 'scale': 4 }, - 'size': (128, 8, 4), - 'scale': 4 - }, - 'real_esrgan_x2': - { - 'hashes': + 'real_esrgan_x4_fp16': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x2.hash', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x2.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x4_fp16.hash'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x4_fp16.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x4_fp16.onnx'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x4_fp16.onnx') + } + }, + 'size': (256, 16, 8), + 'scale': 4 }, - 'sources': + 'real_esrgan_x8': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x2.onnx', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x2.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x8.hash'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x8.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x8.onnx'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x8.onnx') + } + }, + 'size': (256, 16, 8), + 'scale': 8 }, - 'size': (256, 16, 8), - 'scale': 2 - }, - 'real_esrgan_x2_fp16': - { - 'hashes': + 'real_esrgan_x8_fp16': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x2_fp16.hash', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x2_fp16.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x8_fp16.hash'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x8_fp16.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_esrgan_x8_fp16.onnx'), + 'path': resolve_relative_path('../.assets/models/real_esrgan_x8_fp16.onnx') + } + }, + 'size': (256, 16, 8), + 'scale': 8 }, - 'sources': + 'real_hatgan_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x2_fp16.onnx', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x2_fp16.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_hatgan_x4.hash'), + 'path': resolve_relative_path('../.assets/models/real_hatgan_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_hatgan_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/real_hatgan_x4.onnx') + } + }, + 'size': (256, 16, 8), + 'scale': 4 }, - 'size': (256, 16, 8), - 'scale': 2 - }, - 'real_esrgan_x4': - { - 'hashes': + 'real_web_photo_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x4.hash', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x4.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_web_photo_x4.hash'), + 'path': resolve_relative_path('../.assets/models/real_web_photo_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'real_web_photo_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/real_web_photo_x4.onnx') + } + }, + 'size': (64, 4, 2), + 'scale': 4 }, - 'sources': + 'realistic_rescaler_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x4.onnx', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x4.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'realistic_rescaler_x4.hash'), + 'path': resolve_relative_path('../.assets/models/realistic_rescaler_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'realistic_rescaler_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/realistic_rescaler_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'size': (256, 16, 8), - 'scale': 4 - }, - 'real_esrgan_x4_fp16': - { - 'hashes': + 'remacri_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x4_fp16.hash', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x4_fp16.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'remacri_x4.hash'), + 'path': resolve_relative_path('../.assets/models/remacri_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'remacri_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/remacri_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'sources': + 'siax_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x4_fp16.onnx', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x4_fp16.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'siax_x4.hash'), + 'path': resolve_relative_path('../.assets/models/siax_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'siax_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/siax_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'size': (256, 16, 8), - 'scale': 4 - }, - 'real_esrgan_x8': - { - 'hashes': + 'span_kendata_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x8.hash', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x8.hash') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'span_kendata_x4.hash'), + 'path': resolve_relative_path('../.assets/models/span_kendata_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'span_kendata_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/span_kendata_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'sources': + 'swin2_sr_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x8.onnx', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x8.onnx') - } + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'swin2_sr_x4.hash'), + 'path': resolve_relative_path('../.assets/models/swin2_sr_x4.hash') + } + }, + 'sources': + { + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'swin2_sr_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/swin2_sr_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 }, - 'size': (256, 16, 8), - 'scale': 8 - }, - 'real_esrgan_x8_fp16': - { - 'hashes': + 'ultra_sharp_x4': { - 'frame_enhancer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x8_fp16.hash', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x8_fp16.hash') - } - }, - 'sources': - { - 'frame_enhancer': + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'ultra_sharp_x4.hash'), + 'path': resolve_relative_path('../.assets/models/ultra_sharp_x4.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_esrgan_x8_fp16.onnx', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x8_fp16.onnx') - } - }, - 'size': (256, 16, 8), - 'scale': 8 - }, - 'real_hatgan_x4': - { - 'hashes': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_hatgan_x4.hash', - 'path': resolve_relative_path('../.assets/models/real_hatgan_x4.hash') - } - }, - 'sources': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/real_hatgan_x4.onnx', - 'path': resolve_relative_path('../.assets/models/real_hatgan_x4.onnx') - } - }, - 'size': (256, 16, 8), - 'scale': 4 - }, - 'real_web_photo_x4': - { - 'hashes': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/real_web_photo_x4.hash', - 'path': resolve_relative_path('../.assets/models/real_web_photo_x4.hash') - } - }, - 'sources': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/real_web_photo_x4.onnx', - 'path': resolve_relative_path('../.assets/models/real_web_photo_x4.onnx') - } - }, - 'size': (64, 4, 2), - 'scale': 4 - }, - 'realistic_rescaler_x4': - { - 'hashes': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/realistic_rescaler_x4.hash', - 'path': resolve_relative_path('../.assets/models/realistic_rescaler_x4.hash') - } - }, - 'sources': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/realistic_rescaler_x4.onnx', - 'path': resolve_relative_path('../.assets/models/realistic_rescaler_x4.onnx') - } - }, - 'size': (128, 8, 4), - 'scale': 4 - }, - 'remacri_x4': - { - 'hashes': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/remacri_x4.hash', - 'path': resolve_relative_path('../.assets/models/remacri_x4.hash') - } - }, - 'sources': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/remacri_x4.onnx', - 'path': resolve_relative_path('../.assets/models/remacri_x4.onnx') - } - }, - 'size': (128, 8, 4), - 'scale': 4 - }, - 'siax_x4': - { - 'hashes': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/siax_x4.hash', - 'path': resolve_relative_path('../.assets/models/siax_x4.hash') - } - }, - 'sources': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/siax_x4.onnx', - 'path': resolve_relative_path('../.assets/models/siax_x4.onnx') - } - }, - 'size': (128, 8, 4), - 'scale': 4 - }, - 'span_kendata_x4': - { - 'hashes': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/span_kendata_x4.hash', - 'path': resolve_relative_path('../.assets/models/span_kendata_x4.hash') - } - }, - 'sources': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/span_kendata_x4.onnx', - 'path': resolve_relative_path('../.assets/models/span_kendata_x4.onnx') - } - }, - 'size': (128, 8, 4), - 'scale': 4 - }, - 'swin2_sr_x4': - { - 'hashes': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/swin2_sr_x4.hash', - 'path': resolve_relative_path('../.assets/models/swin2_sr_x4.hash') - } - }, - 'sources': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.1.0/swin2_sr_x4.onnx', - 'path': resolve_relative_path('../.assets/models/swin2_sr_x4.onnx') - } - }, - 'size': (128, 8, 4), - 'scale': 4 - }, - 'ultra_sharp_x4': - { - 'hashes': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ultra_sharp_x4.hash', - 'path': resolve_relative_path('../.assets/models/ultra_sharp_x4.hash') - } - }, - 'sources': - { - 'frame_enhancer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/ultra_sharp_x4.onnx', - 'path': resolve_relative_path('../.assets/models/ultra_sharp_x4.onnx') - } - }, - 'size': (128, 8, 4), - 'scale': 4 + 'frame_enhancer': + { + 'url': resolve_download_url('models-3.0.0', 'ultra_sharp_x4.onnx'), + 'path': resolve_relative_path('../.assets/models/ultra_sharp_x4.onnx') + } + }, + 'size': (128, 8, 4), + 'scale': 4 + } } -} def get_inference_pool() -> InferencePool: @@ -393,7 +395,7 @@ def clear_inference_pool() -> None: def get_model_options() -> ModelOptions: frame_enhancer_model = state_manager.get_item('frame_enhancer_model') - return MODEL_SET.get(frame_enhancer_model) + return create_model_set().get(frame_enhancer_model) def register_args(program : ArgumentParser) -> None: diff --git a/facefusion/processors/modules/lip_syncer.py b/facefusion/processors/modules/lip_syncer.py index 6be93294..81b261f9 100755 --- a/facefusion/processors/modules/lip_syncer.py +++ b/facefusion/processors/modules/lip_syncer.py @@ -10,7 +10,7 @@ import facefusion.processors.core as processors from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, voice_extractor, wording from facefusion.audio import create_empty_audio_frame, get_voice_frame, read_static_voice from facefusion.common_helper import get_first -from facefusion.download import conditional_download_hashes, conditional_download_sources +from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url from facefusion.face_analyser import get_many_faces, get_one_face from facefusion.face_helper import create_bounding_box, paste_back, warp_face_by_bounding_box, warp_face_by_face_landmark_5 from facefusion.face_masker import create_mouth_mask, create_occlusion_mask, create_static_box_mask @@ -24,49 +24,51 @@ from facefusion.thread_helper import conditional_thread_semaphore from facefusion.typing import ApplyStateItem, Args, AudioFrame, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame from facefusion.vision import read_image, read_static_image, restrict_video_fps, write_image -MODEL_SET : ModelSet =\ -{ - 'wav2lip_96': + +def create_model_set() -> ModelSet: + return\ { - 'hashes': + 'wav2lip_96': { - 'lip_syncer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/wav2lip_96.hash', - 'path': resolve_relative_path('../.assets/models/wav2lip_96.hash') - } + 'lip_syncer': + { + 'url': resolve_download_url('models-3.0.0', 'wav2lip_96.hash'), + 'path': resolve_relative_path('../.assets/models/wav2lip_96.hash') + } + }, + 'sources': + { + 'lip_syncer': + { + 'url': resolve_download_url('models-3.0.0', 'wav2lip_96.onnx'), + 'path': resolve_relative_path('../.assets/models/wav2lip_96.onnx') + } + }, + 'size': (96, 96) }, - 'sources': + 'wav2lip_gan_96': { - 'lip_syncer': + 'hashes': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/wav2lip_96.onnx', - 'path': resolve_relative_path('../.assets/models/wav2lip_96.onnx') - } - }, - 'size': (96, 96) - }, - 'wav2lip_gan_96': - { - 'hashes': - { - 'lip_syncer': + 'lip_syncer': + { + 'url': resolve_download_url('models-3.0.0', 'wav2lip_gan_96.hash'), + 'path': resolve_relative_path('../.assets/models/wav2lip_gan_96.hash') + } + }, + 'sources': { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/wav2lip_gan_96.hash', - 'path': resolve_relative_path('../.assets/models/wav2lip_gan_96.hash') - } - }, - 'sources': - { - 'lip_syncer': - { - 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/wav2lip_gan_96.onnx', - 'path': resolve_relative_path('../.assets/models/wav2lip_gan_96.onnx') - } - }, - 'size': (96, 96) + 'lip_syncer': + { + 'url': resolve_download_url('models-3.0.0', 'wav2lip_gan_96.onnx'), + 'path': resolve_relative_path('../.assets/models/wav2lip_gan_96.onnx') + } + }, + 'size': (96, 96) + } } -} def get_inference_pool() -> InferencePool: @@ -82,7 +84,7 @@ def clear_inference_pool() -> None: def get_model_options() -> ModelOptions: lip_syncer_model = state_manager.get_item('lip_syncer_model') - return MODEL_SET.get(lip_syncer_model) + return create_model_set().get(lip_syncer_model) def register_args(program : ArgumentParser) -> None: diff --git a/facefusion/program.py b/facefusion/program.py index 8212498a..7d8727e0 100755 --- a/facefusion/program.py +++ b/facefusion/program.py @@ -4,7 +4,7 @@ from argparse import ArgumentParser, HelpFormatter import facefusion.choices from facefusion import config, metadata, state_manager, wording from facefusion.common_helper import create_float_metavar, create_int_metavar, get_last -from facefusion.execution import get_execution_provider_choices +from facefusion.execution import get_execution_provider_set from facefusion.filesystem import list_directory from facefusion.jobs import job_store from facefusion.processors.core import get_processors_modules @@ -70,7 +70,7 @@ def create_output_path_program() -> ArgumentParser: def create_face_detector_program() -> ArgumentParser: program = ArgumentParser(add_help = False) group_face_detector = program.add_argument_group('face detector') - group_face_detector.add_argument('--face-detector-model', help = wording.get('help.face_detector_model'), default = config.get_str_value('face_detector.face_detector_model', 'yoloface'), choices = facefusion.choices.face_detector_set.keys()) + group_face_detector.add_argument('--face-detector-model', help = wording.get('help.face_detector_model'), default = config.get_str_value('face_detector.face_detector_model', 'yoloface'), choices = list(facefusion.choices.face_detector_set.keys())) known_args, _ = program.parse_known_args() face_detector_size_choices = facefusion.choices.face_detector_set.get(known_args.face_detector_model) group_face_detector.add_argument('--face-detector-size', help = wording.get('help.face_detector_size'), default = config.get_str_value('face_detector.face_detector_size', get_last(face_detector_size_choices)), choices = face_detector_size_choices) @@ -166,16 +166,24 @@ def create_uis_program() -> ArgumentParser: def create_execution_program() -> ArgumentParser: program = ArgumentParser(add_help = False) - execution_providers = get_execution_provider_choices() group_execution = program.add_argument_group('execution') group_execution.add_argument('--execution-device-id', help = wording.get('help.execution_device_id'), default = config.get_str_value('execution.execution_device_id', '0')) - group_execution.add_argument('--execution-providers', help = wording.get('help.execution_providers').format(choices = ', '.join(execution_providers)), default = config.get_str_list('execution.execution_providers', 'cpu'), choices = execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS') + group_execution.add_argument('--execution-providers', help = wording.get('help.execution_providers').format(choices = ', '.join(list(get_execution_provider_set().keys()))), default = config.get_str_list('execution.execution_providers', 'cpu'), choices = list(get_execution_provider_set().keys()), nargs = '+', metavar = 'EXECUTION_PROVIDERS') group_execution.add_argument('--execution-thread-count', help = wording.get('help.execution_thread_count'), type = int, default = config.get_int_value('execution.execution_thread_count', '4'), choices = facefusion.choices.execution_thread_count_range, metavar = create_int_metavar(facefusion.choices.execution_thread_count_range)) group_execution.add_argument('--execution-queue-count', help = wording.get('help.execution_queue_count'), type = int, default = config.get_int_value('execution.execution_queue_count', '1'), choices = facefusion.choices.execution_queue_count_range, metavar = create_int_metavar(facefusion.choices.execution_queue_count_range)) job_store.register_job_keys([ 'execution_device_id', 'execution_providers', 'execution_thread_count', 'execution_queue_count' ]) return program +def create_download_program() -> ArgumentParser: + program = ArgumentParser(add_help = False) + group_download = program.add_argument_group('download') + group_download.add_argument('--download-providers', help = wording.get('help.download_providers').format(choices = ', '.join(list(facefusion.choices.download_provider_set.keys()))), default = config.get_str_list('download.download_providers', 'github'), choices = list(facefusion.choices.download_provider_set.keys()), nargs = '+', metavar = 'DOWNLOAD_PROVIDERS') + group_download.add_argument('--skip-download', help = wording.get('help.skip_download'), action = 'store_true', default = config.get_bool_value('misc.skip_download')) + job_store.register_job_keys([ 'download_providers', 'skip_download' ]) + return program + + def create_memory_program() -> ArgumentParser: program = ArgumentParser(add_help = False) group_memory = program.add_argument_group('memory') @@ -185,18 +193,10 @@ def create_memory_program() -> ArgumentParser: return program -def create_skip_download_program() -> ArgumentParser: - program = ArgumentParser(add_help = False) - group_misc = program.add_argument_group('misc') - group_misc.add_argument('--skip-download', help = wording.get('help.skip_download'), action = 'store_true', default = config.get_bool_value('misc.skip_download')) - job_store.register_job_keys([ 'skip_download' ]) - return program - - def create_log_level_program() -> ArgumentParser: program = ArgumentParser(add_help = False) group_misc = program.add_argument_group('misc') - group_misc.add_argument('--log-level', help = wording.get('help.log_level'), default = config.get_str_value('misc.log_level', 'info'), choices = facefusion.choices.log_level_set.keys()) + group_misc.add_argument('--log-level', help = wording.get('help.log_level'), default = config.get_str_value('misc.log_level', 'info'), choices = list(facefusion.choices.log_level_set.keys())) job_store.register_job_keys([ 'log_level' ]) return program @@ -225,7 +225,7 @@ def collect_step_program() -> ArgumentParser: def collect_job_program() -> ArgumentParser: - return ArgumentParser(parents= [ create_execution_program(), create_memory_program(), create_skip_download_program(), create_log_level_program() ], add_help = False) + return ArgumentParser(parents= [ create_execution_program(), create_download_program(), create_memory_program(), create_log_level_program() ], add_help = False) def create_program() -> ArgumentParser: diff --git a/facefusion/typing.py b/facefusion/typing.py index 29f0f232..a6dfb2b1 100755 --- a/facefusion/typing.py +++ b/facefusion/typing.py @@ -109,13 +109,6 @@ OutputAudioEncoder = Literal['aac', 'libmp3lame', 'libopus', 'libvorbis'] OutputVideoEncoder = Literal['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc', 'h264_amf', 'hevc_amf','h264_qsv', 'hevc_qsv', 'h264_videotoolbox', 'hevc_videotoolbox'] OutputVideoPreset = Literal['ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow'] -Download = TypedDict('Download', -{ - 'url' : str, - 'path' : str -}) -DownloadSet = Dict[str, Download] - ModelOptions = Dict[str, Any] ModelSet = Dict[str, ModelOptions] ModelInitializer = NDArray[Any] @@ -124,6 +117,15 @@ ExecutionProviderKey = Literal['cpu', 'coreml', 'cuda', 'directml', 'openvino', ExecutionProviderValue = Literal['CPUExecutionProvider', 'CoreMLExecutionProvider', 'CUDAExecutionProvider', 'DmlExecutionProvider', 'OpenVINOExecutionProvider', 'ROCMExecutionProvider', 'TensorrtExecutionProvider'] ExecutionProviderSet = Dict[ExecutionProviderKey, ExecutionProviderValue] +DownloadProviderKey = Literal['github', 'huggingface'] +DownloadProviderSet = Dict[DownloadProviderKey, str] +Download = TypedDict('Download', +{ + 'url' : str, + 'path' : str +}) +DownloadSet = Dict[str, Download] + ValueAndUnit = TypedDict('ValueAndUnit', { 'value' : int, @@ -237,9 +239,10 @@ StateKey = Literal\ 'execution_providers', 'execution_thread_count', 'execution_queue_count', + 'download_providers', + 'skip_download', 'video_memory_strategy', 'system_memory_limit', - 'skip_download', 'log_level', 'job_id', 'job_status', @@ -249,7 +252,7 @@ State = TypedDict('State', { 'command' : str, 'config_path' : str, - 'temp_path': str, + 'temp_path' : str, 'jobs_path' : str, 'source_paths' : List[str], 'target_path' : str, @@ -294,9 +297,10 @@ State = TypedDict('State', 'execution_providers' : List[ExecutionProviderKey], 'execution_thread_count' : int, 'execution_queue_count' : int, + 'download_providers' : List[DownloadProviderKey], + 'skip_download' : bool, 'video_memory_strategy' : VideoMemoryStrategy, 'system_memory_limit' : int, - 'skip_download' : bool, 'log_level' : LogLevel, 'job_id' : str, 'job_status' : JobStatus, diff --git a/facefusion/uis/components/benchmark_options.py b/facefusion/uis/components/benchmark_options.py index 5b5cda02..c087487b 100644 --- a/facefusion/uis/components/benchmark_options.py +++ b/facefusion/uis/components/benchmark_options.py @@ -16,8 +16,8 @@ def render() -> None: BENCHMARK_RUNS_CHECKBOX_GROUP = gradio.CheckboxGroup( label = wording.get('uis.benchmark_runs_checkbox_group'), - value = list(BENCHMARKS.keys()), - choices = list(BENCHMARKS.keys()) + choices = list(BENCHMARKS.keys()), + value = list(BENCHMARKS.keys()) ) BENCHMARK_CYCLES_SLIDER = gradio.Slider( label = wording.get('uis.benchmark_cycles_slider'), diff --git a/facefusion/uis/components/download.py b/facefusion/uis/components/download.py new file mode 100644 index 00000000..7a4c2e2f --- /dev/null +++ b/facefusion/uis/components/download.py @@ -0,0 +1,29 @@ +from typing import List, Optional + +import gradio + +from facefusion import state_manager, wording +from facefusion.choices import download_provider_set +from facefusion.typing import DownloadProviderKey + +DOWNLOAD_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None + + +def render() -> None: + global DOWNLOAD_PROVIDERS_CHECKBOX_GROUP + + DOWNLOAD_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup( + label = wording.get('uis.download_providers_checkbox_group'), + choices = list(download_provider_set.keys()), + value = state_manager.get_item('download_providers') + ) + + +def listen() -> None: + DOWNLOAD_PROVIDERS_CHECKBOX_GROUP.change(update_download_providers, inputs = DOWNLOAD_PROVIDERS_CHECKBOX_GROUP, outputs = DOWNLOAD_PROVIDERS_CHECKBOX_GROUP) + + +def update_download_providers(download_providers : List[DownloadProviderKey]) -> gradio.CheckboxGroup: + download_providers = download_providers or list(download_provider_set.keys()) + state_manager.set_item('download_providers', download_providers) + return gradio.CheckboxGroup(value = state_manager.get_item('download_providers')) diff --git a/facefusion/uis/components/execution.py b/facefusion/uis/components/execution.py index baf1888c..02730ece 100644 --- a/facefusion/uis/components/execution.py +++ b/facefusion/uis/components/execution.py @@ -3,7 +3,7 @@ from typing import List, Optional import gradio from facefusion import content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, state_manager, voice_extractor, wording -from facefusion.execution import get_execution_provider_choices +from facefusion.execution import get_execution_provider_set from facefusion.processors.core import clear_processors_modules from facefusion.typing import ExecutionProviderKey @@ -15,7 +15,7 @@ def render() -> None: EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup( label = wording.get('uis.execution_providers_checkbox_group'), - choices = get_execution_provider_choices(), + choices = list(get_execution_provider_set().keys()), value = state_manager.get_item('execution_providers') ) @@ -33,6 +33,6 @@ def update_execution_providers(execution_providers : List[ExecutionProviderKey]) face_recognizer.clear_inference_pool() voice_extractor.clear_inference_pool() clear_processors_modules(state_manager.get_item('processors')) - execution_providers = execution_providers or get_execution_provider_choices() + execution_providers = execution_providers or list(get_execution_provider_set()) state_manager.set_item('execution_providers', execution_providers) return gradio.CheckboxGroup(value = state_manager.get_item('execution_providers')) diff --git a/facefusion/uis/components/face_detector.py b/facefusion/uis/components/face_detector.py index eb0e20fc..37a2d433 100644 --- a/facefusion/uis/components/face_detector.py +++ b/facefusion/uis/components/face_detector.py @@ -31,7 +31,7 @@ def render() -> None: with gradio.Row(): FACE_DETECTOR_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_detector_model_dropdown'), - choices = facefusion.choices.face_detector_set.keys(), + choices = list(facefusion.choices.face_detector_set.keys()), value = state_manager.get_item('face_detector_model') ) FACE_DETECTOR_SIZE_DROPDOWN = gradio.Dropdown(**face_detector_size_dropdown_options) diff --git a/facefusion/uis/components/face_swapper_options.py b/facefusion/uis/components/face_swapper_options.py index 7eb4b713..e2259214 100755 --- a/facefusion/uis/components/face_swapper_options.py +++ b/facefusion/uis/components/face_swapper_options.py @@ -19,7 +19,7 @@ def render() -> None: FACE_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_swapper_model_dropdown'), - choices = processors_choices.face_swapper_set.keys(), + choices = list(processors_choices.face_swapper_set.keys()), value = state_manager.get_item('face_swapper_model'), visible = 'face_swapper' in state_manager.get_item('processors') ) diff --git a/facefusion/uis/components/terminal.py b/facefusion/uis/components/terminal.py index bbe2a35b..9d57fd9b 100644 --- a/facefusion/uis/components/terminal.py +++ b/facefusion/uis/components/terminal.py @@ -24,7 +24,7 @@ def render() -> None: LOG_LEVEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.log_level_dropdown'), - choices = log_level_set.keys(), + choices = list(log_level_set.keys()), value = state_manager.get_item('log_level') ) TERMINAL_TEXTBOX = gradio.Textbox( diff --git a/facefusion/uis/layouts/benchmark.py b/facefusion/uis/layouts/benchmark.py index 766492c1..76485904 100644 --- a/facefusion/uis/layouts/benchmark.py +++ b/facefusion/uis/layouts/benchmark.py @@ -1,23 +1,23 @@ import gradio from facefusion import state_manager -from facefusion.download import conditional_download -from facefusion.uis.components import about, age_modifier_options, benchmark, benchmark_options, deep_swapper_options, execution, execution_queue_count, execution_thread_count, expression_restorer_options, face_debugger_options, face_editor_options, face_enhancer_options, face_swapper_options, frame_colorizer_options, frame_enhancer_options, lip_syncer_options, memory, processors +from facefusion.download import conditional_download, resolve_download_url +from facefusion.uis.components import about, age_modifier_options, benchmark, benchmark_options, deep_swapper_options, download, execution, execution_queue_count, execution_thread_count, expression_restorer_options, face_debugger_options, face_editor_options, face_enhancer_options, face_swapper_options, frame_colorizer_options, frame_enhancer_options, lip_syncer_options, memory, processors def pre_check() -> bool: if not state_manager.get_item('skip_download'): conditional_download('.assets/examples', [ - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/source.jpg', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/source.mp3', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-240p.mp4', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-360p.mp4', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-540p.mp4', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-720p.mp4', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-1080p.mp4', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-1440p.mp4', - 'https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-2160p.mp4' + resolve_download_url('examples-3.0.0', 'source.jpg'), + resolve_download_url('examples-3.0.0', 'source.mp3'), + resolve_download_url('examples-3.0.0', 'target-240p.mp4'), + resolve_download_url('examples-3.0.0', 'target-360p.mp4'), + resolve_download_url('examples-3.0.0', 'target-540p.mp4'), + resolve_download_url('examples-3.0.0', 'target-720p.mp4'), + resolve_download_url('examples-3.0.0', 'target-1080p.mp4'), + resolve_download_url('examples-3.0.0', 'target-1440p.mp4'), + resolve_download_url('examples-3.0.0', 'target-2160p.mp4') ]) return True return False @@ -55,6 +55,8 @@ def render() -> gradio.Blocks: execution.render() execution_thread_count.render() execution_queue_count.render() + with gradio.Blocks(): + download.render() with gradio.Blocks(): memory.render() with gradio.Blocks(): @@ -70,6 +72,7 @@ def listen() -> None: age_modifier_options.listen() deep_swapper_options.listen() expression_restorer_options.listen() + download.listen() face_debugger_options.listen() face_editor_options.listen() face_enhancer_options.listen() diff --git a/facefusion/uis/layouts/default.py b/facefusion/uis/layouts/default.py index 054cc1f0..96553f8a 100755 --- a/facefusion/uis/layouts/default.py +++ b/facefusion/uis/layouts/default.py @@ -1,7 +1,7 @@ import gradio from facefusion import state_manager -from facefusion.uis.components import about, age_modifier_options, common_options, deep_swapper_options, execution, execution_queue_count, execution_thread_count, expression_restorer_options, face_debugger_options, face_detector, face_editor_options, face_enhancer_options, face_landmarker, face_masker, face_selector, face_swapper_options, frame_colorizer_options, frame_enhancer_options, instant_runner, job_manager, job_runner, lip_syncer_options, memory, output, output_options, preview, processors, source, target, temp_frame, terminal, trim_frame, ui_workflow +from facefusion.uis.components import about, age_modifier_options, common_options, deep_swapper_options, download, execution, execution_queue_count, execution_thread_count, expression_restorer_options, face_debugger_options, face_detector, face_editor_options, face_enhancer_options, face_landmarker, face_masker, face_selector, face_swapper_options, frame_colorizer_options, frame_enhancer_options, instant_runner, job_manager, job_runner, lip_syncer_options, memory, output, output_options, preview, processors, source, target, temp_frame, terminal, trim_frame, ui_workflow def pre_check() -> bool: @@ -40,6 +40,8 @@ def render() -> gradio.Blocks: execution.render() execution_thread_count.render() execution_queue_count.render() + with gradio.Blocks(): + download.render() with gradio.Blocks(): memory.render() with gradio.Blocks(): @@ -93,6 +95,7 @@ def listen() -> None: execution.listen() execution_thread_count.listen() execution_queue_count.listen() + download.listen() memory.listen() temp_frame.listen() output_options.listen() diff --git a/facefusion/uis/layouts/webcam.py b/facefusion/uis/layouts/webcam.py index e57fb4b5..7b1fcd71 100644 --- a/facefusion/uis/layouts/webcam.py +++ b/facefusion/uis/layouts/webcam.py @@ -1,7 +1,7 @@ import gradio from facefusion import state_manager -from facefusion.uis.components import about, age_modifier_options, deep_swapper_options, execution, execution_thread_count, expression_restorer_options, face_debugger_options, face_editor_options, face_enhancer_options, face_swapper_options, frame_colorizer_options, frame_enhancer_options, lip_syncer_options, processors, source, webcam, webcam_options +from facefusion.uis.components import about, age_modifier_options, deep_swapper_options, download, execution, execution_thread_count, expression_restorer_options, face_debugger_options, face_editor_options, face_enhancer_options, face_swapper_options, frame_colorizer_options, frame_enhancer_options, lip_syncer_options, processors, source, webcam, webcam_options def pre_check() -> bool: @@ -39,6 +39,8 @@ def render() -> gradio.Blocks: with gradio.Blocks(): execution.render() execution_thread_count.render() + with gradio.Blocks(): + download.render() with gradio.Blocks(): webcam_options.render() with gradio.Blocks(): @@ -54,6 +56,7 @@ def listen() -> None: age_modifier_options.listen() deep_swapper_options.listen() expression_restorer_options.listen() + download.listen() face_debugger_options.listen() face_editor_options.listen() face_enhancer_options.listen() diff --git a/facefusion/wording.py b/facefusion/wording.py index 0f94d842..8210ef74 100755 --- a/facefusion/wording.py +++ b/facefusion/wording.py @@ -178,14 +178,16 @@ WORDING : Dict[str, Any] =\ 'ui_workflow': 'choose the ui workflow', # execution 'execution_device_id': 'specify the device used for processing', - 'execution_providers': 'accelerate the model inference using different providers (choices: {choices}, ...)', + 'execution_providers': 'inference using different providers (choices: {choices}, ...)', 'execution_thread_count': 'specify the amount of parallel threads while processing', 'execution_queue_count': 'specify the amount of frames each thread is processing', + # download + 'download_providers': 'download using different providers (choices: {choices}, ...)', + 'skip_download': 'omit downloads and remote lookups', # memory 'video_memory_strategy': 'balance fast processing and low VRAM usage', 'system_memory_limit': 'limit the available RAM that can be used while processing', # misc - 'skip_download': 'omit downloads and remote lookups', 'log_level': 'adjust the message severity displayed in the terminal', # run 'run': 'run the program', @@ -227,6 +229,7 @@ WORDING : Dict[str, Any] =\ 'benchmark_runs_checkbox_group': 'BENCHMARK RUNS', 'clear_button': 'CLEAR', 'common_options_checkbox_group': 'OPTIONS', + 'download_providers_checkbox_group': 'DOWNLOAD PROVIDERS', 'deep_swapper_model_dropdown': 'DEEP SWAPPER MODEL', 'execution_providers_checkbox_group': 'EXECUTION PROVIDERS', 'execution_queue_count_slider': 'EXECUTION QUEUE COUNT', diff --git a/tests/test_download.py b/tests/test_download.py index 8ca1d368..3129b345 100644 --- a/tests/test_download.py +++ b/tests/test_download.py @@ -1,7 +1,7 @@ import pytest -from facefusion.download import conditional_download, get_download_size, is_download_done -from .helper import get_test_example_file, get_test_examples_directory +from facefusion.download import conditional_download, get_download_size +from .helper import get_test_examples_directory @pytest.fixture(scope = 'module', autouse = True) @@ -16,9 +16,3 @@ def test_get_download_size() -> None: assert get_download_size('https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-240p.mp4') == 191675 assert get_download_size('https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-360p.mp4') == 370732 assert get_download_size('invalid') == 0 - - -def test_is_download_done() -> None: - assert is_download_done('https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-240p.mp4', get_test_example_file('target-240p.mp4')) is True - assert is_download_done('https://github.com/facefusion/facefusion-assets/releases/download/examples-3.0.0/target-240p.mp4', 'invalid') is False - assert is_download_done('invalid', 'invalid') is False diff --git a/tests/test_execution.py b/tests/test_execution.py index f5c6a956..c8b3b7db 100644 --- a/tests/test_execution.py +++ b/tests/test_execution.py @@ -1,8 +1,8 @@ -from facefusion.execution import create_execution_providers, get_execution_provider_choices, has_execution_provider +from facefusion.execution import create_execution_providers, get_execution_provider_set, has_execution_provider -def test_get_execution_provider_choices() -> None: - assert 'cpu' in get_execution_provider_choices() +def test_get_execution_provider_set() -> None: + assert 'cpu' in get_execution_provider_set().keys() def test_has_execution_provider() -> None: