Refine face selection and detection (#174)

* Refine face selection and detection

* Update README.md

* Fix some face analyser UI

* Fix some face analyser UI

* Introduce range handling for CLI arguments

* Introduce range handling for CLI arguments

* Fix some spacings
This commit is contained in:
Henry Ruhs 2023-10-30 21:35:33 +01:00 committed by GitHub
parent 1c0ac89b54
commit b85d474351
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 267 additions and 161 deletions

View File

@ -27,8 +27,6 @@ Usage
Run the command:
```
python run.py [options]
options:
-h, --help show this help message and exit
-s SOURCE_PATH, --source SOURCE_PATH select a source image
@ -42,17 +40,21 @@ misc:
execution:
--execution-providers {cpu} [{cpu} ...] choose from the available execution providers (choices: cpu, ...)
--execution-thread-count EXECUTION_THREAD_COUNT specify the number of execution threads
--execution-queue-count EXECUTION_QUEUE_COUNT specify the number of execution queries
--max-memory MAX_MEMORY specify the maximum amount of ram to be used (in gb)
--execution-thread-count [1-128] specify the number of execution threads
--execution-queue-count [1-32] specify the number of execution queries
--max-memory [1-128] specify the maximum amount of ram to be used (in gb)
face recognition:
--face-recognition {reference,many} specify the method for face recognition
face analyser:
--face-analyser-direction {left-right,right-left,top-bottom,bottom-top,small-large,large-small} specify the direction used for face analysis
--face-analyser-age {child,teen,adult,senior} specify the age used for face analysis
--face-analyser-gender {male,female} specify the gender used for face analysis
--face-detection-size {320x320,480x480,512x512,640x640,768x768,1024x1024} specify the size threshold used for face detection
--face-detection-score [0.0-1.0] specify the score threshold used for face detection
face selector:
--face-selector-mode {reference,many} specify the mode for face selection
--reference-face-position REFERENCE_FACE_POSITION specify the position of the reference face
--reference-face-distance REFERENCE_FACE_DISTANCE specify the distance between the reference face and the target face
--reference-face-distance [0.0-1.5] specify the distance between the reference face and the target face
--reference-frame-number REFERENCE_FRAME_NUMBER specify the number of the reference frame
frame extraction:
@ -71,10 +73,10 @@ output creation:
frame processors:
--frame-processors FRAME_PROCESSORS [FRAME_PROCESSORS ...] choose from the available frame processors (choices: face_enhancer, face_swapper, frame_enhancer, ...)
--face-enhancer-model {codeformer,gfpgan_1.2,gfpgan_1.3,gfpgan_1.4,gpen_bfr_512} choose from the mode for the frame processor
--face-enhancer-model {codeformer,gfpgan_1.2,gfpgan_1.3,gfpgan_1.4,gpen_bfr_512} choose the model for the frame processor
--face-enhancer-blend [0-100] specify the blend factor for the frame processor
--face-swapper-model {inswapper_128,inswapper_128_fp16} choose from the mode for the frame processor
--frame-enhancer-model {realesrgan_x2plus,realesrgan_x4plus,realesrnet_x4plus} choose from the mode for the frame processor
--face-swapper-model {inswapper_128,inswapper_128_fp16,simswap_244} choose the model for the frame processor
--frame-enhancer-model {realesrgan_x2plus,realesrgan_x4plus,realesrnet_x4plus} choose the model for the frame processor
--frame-enhancer-blend [0-100] specify the blend factor for the frame processor
uis:

View File

@ -1,10 +1,23 @@
from typing import List
from facefusion.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder
import numpy
from facefusion.typing import FaceSelectorMode, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder
face_recognitions : List[FaceRecognition] = [ 'reference', 'many' ]
face_analyser_directions : List[FaceAnalyserDirection] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small' ]
face_analyser_ages : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ]
face_analyser_genders : List[FaceAnalyserGender] = [ 'male', 'female' ]
face_detection_sizes : List[str] = [ '320x320', '480x480', '512x512', '640x640', '768x768', '1024x1024' ]
face_selector_modes : List[FaceSelectorMode] = [ 'reference', 'many' ]
temp_frame_formats : List[TempFrameFormat] = [ 'jpg', 'png' ]
output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ]
execution_thread_count_range : List[int] = numpy.arange(1, 129, 1).tolist()
execution_queue_count_range : List[int] = numpy.arange(1, 33, 1).tolist()
max_memory_range : List[int] = numpy.arange(1, 129, 1).tolist()
face_detection_score_range : List[float] = numpy.arange(0.0, 1.05, 0.05).tolist()
reference_face_distance_range : List[float] = numpy.arange(0.0, 1.55, 0.05).tolist()
temp_frame_quality_range : List[int] = numpy.arange(0, 101, 1).tolist()
output_image_quality_range : List[int] = numpy.arange(0, 101, 1).tolist()
output_video_quality_range : List[int] = numpy.arange(0, 101, 1).tolist()

View File

@ -16,9 +16,8 @@ import facefusion.globals
from facefusion import face_analyser, predictor, metadata, wording
from facefusion.predictor import predict_image, predict_video
from facefusion.processors.frame.core import get_frame_processors_modules, load_frame_processor_module
from facefusion.utilities import is_image, is_video, detect_fps, compress_image, merge_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clear_temp, list_module_names, encode_execution_providers, decode_execution_providers, normalize_output_path, update_status
from facefusion.utilities import is_image, is_video, detect_fps, compress_image, merge_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clear_temp, list_module_names, encode_execution_providers, decode_execution_providers, normalize_output_path, create_metavar, update_status
warnings.filterwarnings('ignore', category = FutureWarning, module = 'insightface')
warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision')
@ -37,30 +36,34 @@ def cli() -> None:
# execution
group_execution = program.add_argument_group('execution')
group_execution.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = 'cpu'), dest = 'execution_providers', default = [ 'cpu' ], choices = encode_execution_providers(onnxruntime.get_available_providers()), nargs = '+')
group_execution.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), dest = 'execution_thread_count', type = int, default = 1)
group_execution.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), dest = 'execution_queue_count', type = int, default = 1)
group_execution.add_argument('--max-memory', help=wording.get('max_memory_help'), dest='max_memory', type = int)
group_execution.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), dest = 'execution_thread_count', type = int, default = 4, choices = facefusion.choices.execution_thread_count_range, metavar = create_metavar(facefusion.choices.execution_thread_count_range))
group_execution.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), dest = 'execution_queue_count', type = int, default = 1, choices = facefusion.choices.execution_queue_count_range, metavar = create_metavar(facefusion.choices.execution_queue_count_range))
group_execution.add_argument('--max-memory', help = wording.get('max_memory_help'), dest = 'max_memory', type = int, choices = facefusion.choices.max_memory_range, metavar = create_metavar(facefusion.choices.max_memory_range))
# face analyser
group_face_analyser = program.add_argument_group('face recognition')
group_face_analyser = program.add_argument_group('face analyser')
group_face_analyser.add_argument('--face-analyser-direction', help = wording.get('face_analyser_direction_help'), dest = 'face_analyser_direction', default = 'left-right', choices = facefusion.choices.face_analyser_directions)
group_face_analyser.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), dest = 'face_analyser_age', choices = facefusion.choices.face_analyser_ages)
group_face_analyser.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), dest = 'face_analyser_gender', choices = facefusion.choices.face_analyser_genders)
group_face_analyser.add_argument('--face-recognition', help = wording.get('face_recognition_help'), dest = 'face_recognition', default = 'reference', choices = facefusion.choices.face_recognitions)
group_face_analyser.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), dest = 'reference_face_position', type = int, default = 0)
group_face_analyser.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), dest = 'reference_face_distance', type = float, default = 0.6)
group_face_analyser.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), dest = 'reference_frame_number', type = int, default = 0)
group_face_analyser.add_argument('--face-detection-size', help = wording.get('face_detection_size_help'), dest = 'face_detection_size', default = '1024x1024', choices = facefusion.choices.face_detection_sizes)
group_face_analyser.add_argument('--face-detection-score', help = wording.get('face_detection_score_help'), dest = 'face_detection_score', type = float, default = 0.5, choices = facefusion.choices.face_detection_score_range, metavar = create_metavar(facefusion.choices.face_detection_score_range))
# face selector
group_face_selector = program.add_argument_group('face selector')
group_face_selector.add_argument('--face-selector-mode', help = wording.get('face_selector_mode_help'), dest = 'face_selector_mode', default = 'reference', choices = facefusion.choices.face_selector_modes)
group_face_selector.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), dest = 'reference_face_position', type = int, default = 0)
group_face_selector.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), dest = 'reference_face_distance', type = float, default = 0.6, choices = facefusion.choices.reference_face_distance_range, metavar = create_metavar(facefusion.choices.reference_face_distance_range))
group_face_selector.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), dest = 'reference_frame_number', type = int, default = 0)
# frame extraction
group_frame_extraction = program.add_argument_group('frame extraction')
group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), dest = 'trim_frame_start', type = int)
group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), dest = 'trim_frame_end', type = int)
group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), dest = 'temp_frame_format', default = 'jpg', choices = facefusion.choices.temp_frame_formats)
group_frame_extraction.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), dest = 'temp_frame_quality', type = int, default = 100, choices = range(101), metavar = '[0-100]')
group_frame_extraction.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), dest = 'temp_frame_quality', type = int, default = 100, choices = facefusion.choices.temp_frame_quality_range, metavar = create_metavar(facefusion.choices.temp_frame_quality_range))
group_frame_extraction.add_argument('--keep-temp', help = wording.get('keep_temp_help'), dest = 'keep_temp', action = 'store_true')
# output creation
group_output_creation = program.add_argument_group('output creation')
group_output_creation.add_argument('--output-image-quality', help=wording.get('output_image_quality_help'), dest = 'output_image_quality', type = int, default = 80, choices = range(101), metavar = '[0-100]')
group_output_creation.add_argument('--output-image-quality', help = wording.get('output_image_quality_help'), dest = 'output_image_quality', type = int, default = 80, choices = facefusion.choices.output_image_quality_range, metavar = create_metavar(facefusion.choices.output_image_quality_range))
group_output_creation.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), dest = 'output_video_encoder', default = 'libx264', choices = facefusion.choices.output_video_encoders)
group_output_creation.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), dest = 'output_video_quality', type = int, default = 80, choices = range(101), metavar = '[0-100]')
group_output_creation.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), dest = 'output_video_quality', type = int, default = 80, choices = facefusion.choices.output_video_quality_range, metavar = create_metavar(facefusion.choices.output_video_quality_range))
group_output_creation.add_argument('--keep-fps', help = wording.get('keep_fps_help'), dest = 'keep_fps', action = 'store_true')
group_output_creation.add_argument('--skip-audio', help = wording.get('skip_audio_help'), dest = 'skip_audio', action = 'store_true')
# frame processors
@ -95,7 +98,10 @@ def apply_args(program : ArgumentParser) -> None:
facefusion.globals.face_analyser_direction = args.face_analyser_direction
facefusion.globals.face_analyser_age = args.face_analyser_age
facefusion.globals.face_analyser_gender = args.face_analyser_gender
facefusion.globals.face_recognition = args.face_recognition
facefusion.globals.face_detection_size = args.face_detection_size
facefusion.globals.face_detection_score = args.face_detection_score
# face selector
facefusion.globals.face_selector_mode = args.face_selector_mode
facefusion.globals.reference_face_position = args.reference_face_position
facefusion.globals.reference_face_distance = args.reference_face_distance
facefusion.globals.reference_frame_number = args.reference_frame_number

View File

@ -81,13 +81,13 @@ def pre_check() -> bool:
def extract_faces(frame : Frame) -> List[Face]:
face_detector = get_face_analyser().get('face_detector')
faces: List[Face] = []
temp_frame = resize_frame_dimension(frame, 1024, 1024)
face_detection_width, face_detection_height = map(int, facefusion.globals.face_detection_size.split('x'))
temp_frame = resize_frame_dimension(frame, face_detection_width, face_detection_height)
temp_frame_height, temp_frame_width, _ = temp_frame.shape
frame_height, frame_width, _ = frame.shape
ratio_height = frame_height / temp_frame_height
ratio_width = frame_width / temp_frame_width
face_detector.setScoreThreshold(0.5)
face_detector.setNMSThreshold(0.5)
face_detector.setScoreThreshold(facefusion.globals.face_detection_score)
face_detector.setTopK(100)
face_detector.setInputSize((temp_frame_width, temp_frame_height))
with THREAD_SEMAPHORE:
@ -219,8 +219,8 @@ def filter_by_age(faces : List[Face], age : FaceAnalyserAge) -> List[Face]:
def filter_by_gender(faces : List[Face], gender : FaceAnalyserGender) -> List[Face]:
filter_faces = []
for face in faces:
if face.gender == 1 and gender == 'male':
filter_faces.append(face)
if face.gender == 0 and gender == 'female':
filter_faces.append(face)
if face.gender == 1 and gender == 'male':
filter_faces.append(face)
return filter_faces

View File

@ -1,6 +1,6 @@
from typing import List, Optional
from facefusion.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder
from facefusion.typing import FaceSelectorMode, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder
# general
source_path : Optional[str] = None
@ -18,7 +18,10 @@ max_memory : Optional[int] = None
face_analyser_direction : Optional[FaceAnalyserDirection] = None
face_analyser_age : Optional[FaceAnalyserAge] = None
face_analyser_gender : Optional[FaceAnalyserGender] = None
face_recognition : Optional[FaceRecognition] = None
face_detection_size : Optional[str] = None
face_detection_score : Optional[float] = None
# face selector
face_selector_mode : Optional[FaceSelectorMode] = None
reference_face_position : Optional[int] = None
reference_face_distance : Optional[float] = None
reference_frame_number : Optional[int] = None

View File

@ -1,5 +1,10 @@
from typing import List
import numpy
face_swapper_models : List[str] = [ 'inswapper_128', 'inswapper_128_fp16', 'simswap_244' ]
face_enhancer_models : List[str] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_512' ]
frame_enhancer_models : List[str] = [ 'realesrgan_x2plus', 'realesrgan_x4plus', 'realesrnet_x4plus' ]
face_enhancer_blend_range : List[int] = numpy.arange(0, 101, 1).tolist()
frame_enhancer_blend_range : List[int] = numpy.arange(0, 101, 1).tolist()

View File

@ -12,7 +12,7 @@ from facefusion.face_analyser import get_many_faces, clear_face_analyser
from facefusion.face_helper import warp_face, paste_back
from facefusion.predictor import clear_predictor
from facefusion.typing import Face, Frame, Update_Process, ProcessMode, ModelValue, OptionsWithModel
from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done, update_status
from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done, create_metavar, update_status
from facefusion.vision import read_image, read_static_image, write_image
from facefusion.processors.frame import globals as frame_processors_globals
from facefusion.processors.frame import choices as frame_processors_choices
@ -97,7 +97,7 @@ def set_options(key : Literal[ 'model' ], value : Any) -> None:
def register_args(program : ArgumentParser) -> None:
program.add_argument('--face-enhancer-model', help = wording.get('frame_processor_model_help'), dest = 'face_enhancer_model', default = 'gfpgan_1.4', choices = frame_processors_choices.face_enhancer_models)
program.add_argument('--face-enhancer-blend', help = wording.get('frame_processor_blend_help'), dest= 'face_enhancer_blend', type = int, default= 80, choices = range(101), metavar = '[0-100]')
program.add_argument('--face-enhancer-blend', help = wording.get('frame_processor_blend_help'), dest = 'face_enhancer_blend', type = int, default = 80, choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range))
def apply_args(program : ArgumentParser) -> None:

View File

@ -196,12 +196,12 @@ def normalize_crop_frame(crop_frame : Frame) -> Frame:
def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
if 'reference' in facefusion.globals.face_recognition:
if 'reference' in facefusion.globals.face_selector_mode:
similar_faces = find_similar_faces(temp_frame, reference_face, facefusion.globals.reference_face_distance)
if similar_faces:
for similar_face in similar_faces:
temp_frame = swap_face(source_face, similar_face, temp_frame)
if 'many' in facefusion.globals.face_recognition:
if 'many' in facefusion.globals.face_selector_mode:
many_faces = get_many_faces(temp_frame)
if many_faces:
for target_face in many_faces:
@ -211,7 +211,7 @@ def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame)
def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None:
source_face = get_one_face(read_static_image(source_path))
reference_face = get_face_reference() if 'reference' in facefusion.globals.face_recognition else None
reference_face = get_face_reference() if 'reference' in facefusion.globals.face_selector_mode else None
for temp_frame_path in temp_frame_paths:
temp_frame = read_image(temp_frame_path)
result_frame = process_frame(source_face, reference_face, temp_frame)
@ -222,7 +222,7 @@ def process_frames(source_path : str, temp_frame_paths : List[str], update_progr
def process_image(source_path : str, target_path : str, output_path : str) -> None:
source_face = get_one_face(read_static_image(source_path))
target_frame = read_static_image(target_path)
reference_face = get_one_face(target_frame, facefusion.globals.reference_face_position) if 'reference' in facefusion.globals.face_recognition else None
reference_face = get_one_face(target_frame, facefusion.globals.reference_face_position) if 'reference' in facefusion.globals.face_selector_mode else None
result_frame = process_frame(source_face, reference_face, target_frame)
write_image(output_path, result_frame)
@ -233,7 +233,7 @@ def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
def conditional_set_face_reference(temp_frame_paths : List[str]) -> None:
if 'reference' in facefusion.globals.face_recognition and not get_face_reference():
if 'reference' in facefusion.globals.face_selector_mode and not get_face_reference():
reference_frame = read_static_image(temp_frame_paths[facefusion.globals.reference_frame_number])
reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
set_face_reference(reference_face)

View File

@ -11,7 +11,7 @@ from facefusion import wording
from facefusion.face_analyser import clear_face_analyser
from facefusion.predictor import clear_predictor
from facefusion.typing import Frame, Face, Update_Process, ProcessMode, ModelValue, OptionsWithModel
from facefusion.utilities import conditional_download, resolve_relative_path, is_file, is_download_done, map_device, update_status
from facefusion.utilities import conditional_download, resolve_relative_path, is_file, is_download_done, map_device, create_metavar, update_status
from facefusion.vision import read_image, read_static_image, write_image
from facefusion.processors.frame import globals as frame_processors_globals
from facefusion.processors.frame import choices as frame_processors_choices
@ -89,7 +89,7 @@ def set_options(key : Literal[ 'model' ], value : Any) -> None:
def register_args(program : ArgumentParser) -> None:
program.add_argument('--frame-enhancer-model', help = wording.get('frame_processor_model_help'), dest = 'frame_enhancer_model', default = 'realesrgan_x2plus', choices = frame_processors_choices.frame_enhancer_models)
program.add_argument('--frame-enhancer-blend', help = wording.get('frame_processor_blend_help'), dest = 'frame_enhancer_blend', type = int, default = 80, choices = range(101), metavar = '[0-100]')
program.add_argument('--frame-enhancer-blend', help = wording.get('frame_processor_blend_help'), dest = 'frame_enhancer_blend', type = int, default = 80, choices = frame_processors_choices.frame_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.frame_enhancer_blend_range))
def apply_args(program : ArgumentParser) -> None:

View File

@ -14,7 +14,7 @@ Process_Frames = Callable[[str, List[str], Update_Process], None]
Template = Literal[ 'arcface', 'ffhq' ]
ProcessMode = Literal[ 'output', 'preview', 'stream' ]
FaceRecognition = Literal[ 'reference', 'many' ]
FaceSelectorMode = Literal[ 'reference', 'many' ]
FaceAnalyserDirection = Literal[ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small' ]
FaceAnalyserAge = Literal[ 'child', 'teen', 'adult', 'senior' ]
FaceAnalyserGender = Literal[ 'male', 'female' ]

View File

@ -4,4 +4,4 @@ from facefusion.uis.typing import WebcamMode
common_options : List[str] = [ 'keep-fps', 'keep-temp', 'skip-audio', 'skip-download' ]
webcam_modes : List[WebcamMode] = [ 'inline', 'udp', 'v4l2' ]
webcam_resolutions : List[str] = [ '320x240', '640x480', '1280x720', '1920x1080', '2560x1440', '3840x2160' ]
webcam_resolutions : List[str] = [ '320x240', '640x480', '800x600', '1024x768', '1280x720', '1280x960', '1920x1080', '2560x1440', '3840x2160' ]

View File

@ -3,7 +3,7 @@ import gradio
import facefusion.globals
from facefusion import wording
from facefusion.uis import choices
from facefusion.uis import choices as uis_choices
COMMON_OPTIONS_CHECKBOX_GROUP : Optional[gradio.Checkboxgroup] = None
@ -22,7 +22,7 @@ def render() -> None:
value.append('skip-download')
COMMON_OPTIONS_CHECKBOX_GROUP = gradio.Checkboxgroup(
label = wording.get('common_options_checkbox_group_label'),
choices = choices.common_options,
choices = uis_choices.common_options,
value = value
)

View File

@ -2,6 +2,7 @@ from typing import Optional
import gradio
import facefusion.globals
import facefusion.choices
from facefusion import wording
EXECUTION_QUEUE_COUNT_SLIDER : Optional[gradio.Slider] = None
@ -13,9 +14,9 @@ def render() -> None:
EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider(
label = wording.get('execution_queue_count_slider_label'),
value = facefusion.globals.execution_queue_count,
step = 1,
minimum = 1,
maximum = 16
step = facefusion.choices.execution_queue_count_range[1] - facefusion.choices.execution_queue_count_range[0],
minimum = facefusion.choices.execution_queue_count_range[0],
maximum = facefusion.choices.execution_queue_count_range[-1]
)
@ -25,4 +26,3 @@ def listen() -> None:
def update_execution_queue_count(execution_queue_count : int = 1) -> None:
facefusion.globals.execution_queue_count = execution_queue_count

View File

@ -2,6 +2,7 @@ from typing import Optional
import gradio
import facefusion.globals
import facefusion.choices
from facefusion import wording
EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None
@ -13,9 +14,9 @@ def render() -> None:
EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider(
label = wording.get('execution_thread_count_slider_label'),
value = facefusion.globals.execution_thread_count,
step = 1,
minimum = 1,
maximum = 128
step = facefusion.choices.execution_thread_count_range[1] - facefusion.choices.execution_thread_count_range[0],
minimum = facefusion.choices.execution_thread_count_range[0],
maximum = facefusion.choices.execution_thread_count_range[-1]
)

View File

@ -2,49 +2,84 @@ from typing import Optional
import gradio
import facefusion.choices
import facefusion.globals
import facefusion.choices
from facefusion import wording
from facefusion.typing import FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender
from facefusion.uis.core import register_ui_component
FACE_ANALYSER_DIRECTION_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_ANALYSER_GENDER_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_DETECTION_SIZE_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_DETECTION_SCORE_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global FACE_ANALYSER_DIRECTION_DROPDOWN
global FACE_ANALYSER_AGE_DROPDOWN
global FACE_ANALYSER_GENDER_DROPDOWN
global FACE_DETECTION_SIZE_DROPDOWN
global FACE_DETECTION_SCORE_SLIDER
FACE_ANALYSER_DIRECTION_DROPDOWN = gradio.Dropdown(
label = wording.get('face_analyser_direction_dropdown_label'),
choices = facefusion.choices.face_analyser_directions,
value = facefusion.globals.face_analyser_direction
with gradio.Row():
FACE_ANALYSER_DIRECTION_DROPDOWN = gradio.Dropdown(
label = wording.get('face_analyser_direction_dropdown_label'),
choices = facefusion.choices.face_analyser_directions,
value = facefusion.globals.face_analyser_direction
)
FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown(
label = wording.get('face_analyser_age_dropdown_label'),
choices = [ 'none' ] + facefusion.choices.face_analyser_ages,
value = facefusion.globals.face_analyser_age or 'none'
)
FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown(
label = wording.get('face_analyser_gender_dropdown_label'),
choices = [ 'none' ] + facefusion.choices.face_analyser_genders,
value = facefusion.globals.face_analyser_gender or 'none'
)
FACE_DETECTION_SIZE_DROPDOWN = gradio.Dropdown(
label = wording.get('face_detection_size_dropdown_label'),
choices = facefusion.choices.face_detection_sizes,
value = facefusion.globals.face_detection_size
)
FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown(
label = wording.get('face_analyser_age_dropdown_label'),
choices = [ 'none' ] + facefusion.choices.face_analyser_ages,
value = facefusion.globals.face_analyser_age or 'none'
)
FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown(
label = wording.get('face_analyser_gender_dropdown_label'),
choices = [ 'none' ] + facefusion.choices.face_analyser_genders,
value = facefusion.globals.face_analyser_gender or 'none'
FACE_DETECTION_SCORE_SLIDER = gradio.Slider(
label = wording.get('face_detection_score_slider_label'),
value = facefusion.globals.face_detection_score,
step = facefusion.choices.face_detection_score_range[1] - facefusion.choices.face_detection_score_range[0],
minimum = facefusion.choices.face_detection_score_range[0],
maximum = facefusion.choices.face_detection_score_range[-1]
)
register_ui_component('face_analyser_direction_dropdown', FACE_ANALYSER_DIRECTION_DROPDOWN)
register_ui_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN)
register_ui_component('face_analyser_gender_dropdown', FACE_ANALYSER_GENDER_DROPDOWN)
register_ui_component('face_detection_size_dropdown', FACE_DETECTION_SIZE_DROPDOWN)
register_ui_component('face_detection_score_slider', FACE_DETECTION_SCORE_SLIDER)
def listen() -> None:
FACE_ANALYSER_DIRECTION_DROPDOWN.select(lambda value: update_dropdown('face_analyser_direction', value), inputs = FACE_ANALYSER_DIRECTION_DROPDOWN)
FACE_ANALYSER_AGE_DROPDOWN.select(lambda value: update_dropdown('face_analyser_age', value), inputs = FACE_ANALYSER_AGE_DROPDOWN)
FACE_ANALYSER_GENDER_DROPDOWN.select(lambda value: update_dropdown('face_analyser_gender', value), inputs = FACE_ANALYSER_GENDER_DROPDOWN)
FACE_ANALYSER_DIRECTION_DROPDOWN.select(update_face_analyser_direction, inputs = FACE_ANALYSER_DIRECTION_DROPDOWN)
FACE_ANALYSER_AGE_DROPDOWN.select(update_face_analyser_age, inputs = FACE_ANALYSER_AGE_DROPDOWN)
FACE_ANALYSER_GENDER_DROPDOWN.select(update_face_analyser_gender, inputs = FACE_ANALYSER_GENDER_DROPDOWN)
FACE_DETECTION_SIZE_DROPDOWN.select(update_face_detection_size, inputs = FACE_DETECTION_SIZE_DROPDOWN)
FACE_DETECTION_SCORE_SLIDER.change(update_face_detection_score, inputs = FACE_DETECTION_SCORE_SLIDER)
def update_dropdown(name : str, value : str) -> None:
if value == 'none':
setattr(facefusion.globals, name, None)
else:
setattr(facefusion.globals, name, value)
def update_face_analyser_direction(face_analyser_direction : FaceAnalyserDirection) -> None:
facefusion.globals.face_analyser_direction = face_analyser_direction if face_analyser_direction != 'none' else None
def update_face_analyser_age(face_analyser_age : FaceAnalyserAge) -> None:
facefusion.globals.face_analyser_age = face_analyser_age if face_analyser_age != 'none' else None
def update_face_analyser_gender(face_analyser_gender : FaceAnalyserGender) -> None:
facefusion.globals.face_analyser_gender = face_analyser_gender if face_analyser_gender != 'none' else None
def update_face_detection_size(face_detection_size : str) -> None:
facefusion.globals.face_detection_size = face_detection_size
def update_face_detection_score(face_detection_score : float) -> None:
facefusion.globals.face_detection_score = face_detection_score

View File

@ -2,24 +2,25 @@ from typing import List, Optional, Tuple, Any, Dict
import gradio
import facefusion.choices
import facefusion.globals
import facefusion.choices
from facefusion import wording
from facefusion.face_cache import clear_faces_cache
from facefusion.vision import get_video_frame, read_static_image, normalize_frame_color
from facefusion.face_analyser import get_many_faces
from facefusion.face_analyser import get_many_faces, clear_face_analyser
from facefusion.face_reference import clear_face_reference
from facefusion.typing import Frame, FaceRecognition
from facefusion.typing import Frame, FaceSelectorMode
from facefusion.utilities import is_image, is_video
from facefusion.uis.core import get_ui_component, register_ui_component
from facefusion.uis.typing import ComponentName
FACE_RECOGNITION_DROPDOWN : Optional[gradio.Dropdown] = None
FACE_SELECTOR_MODE_DROPDOWN : Optional[gradio.Dropdown] = None
REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None
REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global FACE_RECOGNITION_DROPDOWN
global FACE_SELECTOR_MODE_DROPDOWN
global REFERENCE_FACE_POSITION_GALLERY
global REFERENCE_FACE_DISTANCE_SLIDER
@ -30,7 +31,7 @@ def render() -> None:
'object_fit': 'cover',
'columns': 10,
'allow_preview': False,
'visible': 'reference' in facefusion.globals.face_recognition
'visible': 'reference' in facefusion.globals.face_selector_mode
}
if is_image(facefusion.globals.target_path):
reference_frame = read_static_image(facefusion.globals.target_path)
@ -38,27 +39,27 @@ def render() -> None:
if is_video(facefusion.globals.target_path):
reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
FACE_RECOGNITION_DROPDOWN = gradio.Dropdown(
label = wording.get('face_recognition_dropdown_label'),
choices = facefusion.choices.face_recognitions,
value = facefusion.globals.face_recognition
FACE_SELECTOR_MODE_DROPDOWN = gradio.Dropdown(
label = wording.get('face_selector_mode_dropdown_label'),
choices = facefusion.choices.face_selector_modes,
value = facefusion.globals.face_selector_mode
)
REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args)
REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider(
label = wording.get('reference_face_distance_slider_label'),
value = facefusion.globals.reference_face_distance,
step = 0.05,
minimum = 0,
maximum = 1.5,
visible = 'reference' in facefusion.globals.face_recognition
step = facefusion.choices.reference_face_distance_range[1] - facefusion.choices.reference_face_distance_range[0],
minimum = facefusion.choices.reference_face_distance_range[0],
maximum = facefusion.choices.reference_face_distance_range[-1],
visible = 'reference' in facefusion.globals.face_selector_mode
)
register_ui_component('face_recognition_dropdown', FACE_RECOGNITION_DROPDOWN)
register_ui_component('face_selector_mode_dropdown', FACE_SELECTOR_MODE_DROPDOWN)
register_ui_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY)
register_ui_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER)
def listen() -> None:
FACE_RECOGNITION_DROPDOWN.select(update_face_recognition, inputs = FACE_RECOGNITION_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ])
FACE_SELECTOR_MODE_DROPDOWN.select(update_face_selector_mode, inputs = FACE_SELECTOR_MODE_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ])
REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_reference_face_position)
REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER)
multi_component_names : List[ComponentName] =\
@ -70,40 +71,69 @@ def listen() -> None:
component = get_ui_component(component_name)
if component:
for method in [ 'upload', 'change', 'clear' ]:
getattr(component, method)(update_reference_face_position, outputs = REFERENCE_FACE_POSITION_GALLERY)
select_component_names : List[ComponentName] =\
getattr(component, method)(update_reference_face_position)
getattr(component, method)(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
change_one_component_names : List[ComponentName] =\
[
'face_analyser_direction_dropdown',
'face_analyser_age_dropdown',
'face_analyser_gender_dropdown'
]
for component_name in select_component_names:
for component_name in change_one_component_names:
component = get_ui_component(component_name)
if component:
component.select(update_reference_face_position, outputs = REFERENCE_FACE_POSITION_GALLERY)
component.change(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
change_two_component_names : List[ComponentName] =\
[
'face_detection_size_dropdown',
'face_detection_score_slider'
]
for component_name in change_two_component_names:
component = get_ui_component(component_name)
if component:
component.change(clear_and_update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
preview_frame_slider = get_ui_component('preview_frame_slider')
if preview_frame_slider:
preview_frame_slider.change(update_reference_frame_number, inputs = preview_frame_slider)
preview_frame_slider.release(update_reference_face_position, outputs = REFERENCE_FACE_POSITION_GALLERY)
preview_frame_slider.release(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
def update_face_recognition(face_recognition : FaceRecognition) -> Tuple[gradio.Gallery, gradio.Slider]:
if face_recognition == 'reference':
facefusion.globals.face_recognition = face_recognition
def update_face_selector_mode(face_selector_mode : FaceSelectorMode) -> Tuple[gradio.Gallery, gradio.Slider]:
if face_selector_mode == 'reference':
facefusion.globals.face_selector_mode = face_selector_mode
return gradio.Gallery(visible = True), gradio.Slider(visible = True)
if face_recognition == 'many':
facefusion.globals.face_recognition = face_recognition
if face_selector_mode == 'many':
facefusion.globals.face_selector_mode = face_selector_mode
return gradio.Gallery(visible = False), gradio.Slider(visible = False)
def clear_and_update_reference_face_position(event : gradio.SelectData) -> gradio.Gallery:
clear_face_reference()
return update_reference_face_position(event.index)
update_reference_face_position(event.index)
return update_reference_position_gallery()
def update_reference_face_position(reference_face_position : int = 0) -> gradio.Gallery:
gallery_frames = []
def update_reference_face_position(reference_face_position : int = 0) -> None:
facefusion.globals.reference_face_position = reference_face_position
def update_reference_face_distance(reference_face_distance : float) -> None:
facefusion.globals.reference_face_distance = reference_face_distance
def update_reference_frame_number(reference_frame_number : int) -> None:
facefusion.globals.reference_frame_number = reference_frame_number
def clear_and_update_reference_position_gallery() -> gradio.Gallery:
clear_face_analyser()
clear_face_reference()
clear_faces_cache()
return update_reference_position_gallery()
def update_reference_position_gallery() -> gradio.Gallery:
gallery_frames = []
if is_image(facefusion.globals.target_path):
reference_frame = read_static_image(facefusion.globals.target_path)
gallery_frames = extract_gallery_frames(reference_frame)
@ -115,14 +145,6 @@ def update_reference_face_position(reference_face_position : int = 0) -> gradio.
return gradio.Gallery(value = None)
def update_reference_face_distance(reference_face_distance : float) -> None:
facefusion.globals.reference_face_distance = reference_face_distance
def update_reference_frame_number(reference_frame_number : int) -> None:
facefusion.globals.reference_frame_number = reference_frame_number
def extract_gallery_frames(reference_frame : Frame) -> List[Frame]:
crop_frames = []
faces = get_many_faces(reference_frame)
@ -138,4 +160,3 @@ def extract_gallery_frames(reference_frame : Frame) -> List[Frame]:
crop_frame = normalize_frame_color(crop_frame)
crop_frames.append(crop_frame)
return crop_frames

View File

@ -36,9 +36,9 @@ def render() -> None:
FACE_ENHANCER_BLEND_SLIDER = gradio.Slider(
label = wording.get('face_enhancer_blend_slider_label'),
value = frame_processors_globals.face_enhancer_blend,
step = 1,
minimum = 0,
maximum = 100,
step = frame_processors_choices.face_enhancer_blend_range[1] - frame_processors_choices.face_enhancer_blend_range[0],
minimum = frame_processors_choices.face_enhancer_blend_range[0],
maximum = frame_processors_choices.face_enhancer_blend_range[-1],
visible = 'face_enhancer' in facefusion.globals.frame_processors
)
FRAME_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
@ -50,9 +50,9 @@ def render() -> None:
FRAME_ENHANCER_BLEND_SLIDER = gradio.Slider(
label = wording.get('frame_enhancer_blend_slider_label'),
value = frame_processors_globals.frame_enhancer_blend,
step = 1,
minimum = 0,
maximum = 100,
step = frame_processors_choices.frame_enhancer_blend_range[1] - frame_processors_choices.frame_enhancer_blend_range[0],
minimum = frame_processors_choices.frame_enhancer_blend_range[0],
maximum = frame_processors_choices.frame_enhancer_blend_range[-1],
visible = 'face_enhancer' in facefusion.globals.frame_processors
)
register_ui_component('face_swapper_model_dropdown', FACE_SWAPPER_MODEL_DROPDOWN)

View File

@ -2,6 +2,7 @@ from typing import Optional
import gradio
import facefusion.globals
import facefusion.choices
from facefusion import wording
MAX_MEMORY_SLIDER : Optional[gradio.Slider] = None
@ -12,9 +13,9 @@ def render() -> None:
MAX_MEMORY_SLIDER = gradio.Slider(
label = wording.get('max_memory_slider_label'),
step = 1,
minimum = 0,
maximum = 128
step = facefusion.choices.max_memory_range[1] - facefusion.choices.max_memory_range[0],
minimum = facefusion.choices.max_memory_range[0],
maximum = facefusion.choices.max_memory_range[-1]
)

View File

@ -2,8 +2,8 @@ from typing import Optional, Tuple, List
import tempfile
import gradio
import facefusion.choices
import facefusion.globals
import facefusion.choices
from facefusion import wording
from facefusion.typing import OutputVideoEncoder
from facefusion.utilities import is_image, is_video
@ -30,9 +30,9 @@ def render() -> None:
OUTPUT_IMAGE_QUALITY_SLIDER = gradio.Slider(
label = wording.get('output_image_quality_slider_label'),
value = facefusion.globals.output_image_quality,
step = 1,
minimum = 0,
maximum = 100,
step = facefusion.choices.output_image_quality_range[1] - facefusion.choices.output_image_quality_range[0],
minimum = facefusion.choices.output_image_quality_range[0],
maximum = facefusion.choices.output_image_quality_range[-1],
visible = is_image(facefusion.globals.target_path)
)
OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown(
@ -44,9 +44,9 @@ def render() -> None:
OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider(
label = wording.get('output_video_quality_slider_label'),
value = facefusion.globals.output_video_quality,
step = 1,
minimum = 0,
maximum = 100,
step = facefusion.choices.output_video_quality_range[1] - facefusion.choices.output_video_quality_range[0],
minimum = facefusion.choices.output_video_quality_range[0],
maximum = facefusion.choices.output_video_quality_range[-1],
visible = is_video(facefusion.globals.target_path)
)
register_ui_component('output_path_textbox', OUTPUT_PATH_TEXTBOX)

View File

@ -38,7 +38,7 @@ def render() -> None:
}
conditional_set_face_reference()
source_face = get_one_face(read_static_image(facefusion.globals.source_path))
reference_face = get_face_reference() if 'reference' in facefusion.globals.face_recognition else None
reference_face = get_face_reference() if 'reference' in facefusion.globals.face_selector_mode else None
if is_image(facefusion.globals.target_path):
target_frame = read_static_image(facefusion.globals.target_path)
preview_frame = process_preview_frame(source_face, reference_face, target_frame)
@ -90,9 +90,9 @@ def listen() -> None:
component = get_ui_component(component_name)
if component:
component.select(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
change_component_names : List[ComponentName] =\
change_one_component_names : List[ComponentName] =\
[
'face_recognition_dropdown',
'face_selector_mode_dropdown',
'reference_face_distance_slider',
'frame_processors_checkbox_group',
'face_enhancer_model_dropdown',
@ -100,13 +100,20 @@ def listen() -> None:
'frame_enhancer_model_dropdown',
'frame_enhancer_blend_slider'
]
for component_name in change_component_names:
for component_name in change_one_component_names:
component = get_ui_component(component_name)
if component:
component.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
face_swapper_model_dropdown = get_ui_component('face_swapper_model_dropdown')
if face_swapper_model_dropdown:
face_swapper_model_dropdown.change(clear_and_update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
change_two_component_names : List[ComponentName] =\
[
'face_swapper_model_dropdown',
'face_detection_size_dropdown',
'face_detection_score_slider'
]
for component_name in change_two_component_names:
component = get_ui_component(component_name)
if component:
component.change(clear_and_update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
def clear_and_update_preview_image(frame_number : int = 0) -> gradio.Image:
@ -119,7 +126,7 @@ def clear_and_update_preview_image(frame_number : int = 0) -> gradio.Image:
def update_preview_image(frame_number : int = 0) -> gradio.Image:
conditional_set_face_reference()
source_face = get_one_face(read_static_image(facefusion.globals.source_path))
reference_face = get_face_reference() if 'reference' in facefusion.globals.face_recognition else None
reference_face = get_face_reference() if 'reference' in facefusion.globals.face_selector_mode else None
if is_image(facefusion.globals.target_path):
target_frame = read_static_image(facefusion.globals.target_path)
preview_frame = process_preview_frame(source_face, reference_face, target_frame)
@ -156,7 +163,7 @@ def process_preview_frame(source_face : Face, reference_face : Face, temp_frame
def conditional_set_face_reference() -> None:
if 'reference' in facefusion.globals.face_recognition and not get_face_reference():
if 'reference' in facefusion.globals.face_selector_mode and not get_face_reference():
reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
set_face_reference(reference_face)

View File

@ -1,8 +1,8 @@
from typing import Optional, Tuple
import gradio
import facefusion.choices
import facefusion.globals
import facefusion.choices
from facefusion import wording
from facefusion.typing import TempFrameFormat
from facefusion.utilities import is_video
@ -25,9 +25,9 @@ def render() -> None:
TEMP_FRAME_QUALITY_SLIDER = gradio.Slider(
label = wording.get('temp_frame_quality_slider_label'),
value = facefusion.globals.temp_frame_quality,
step = 1,
minimum = 0,
maximum = 100,
step = facefusion.choices.temp_frame_quality_range[1] - facefusion.choices.temp_frame_quality_range[0],
minimum = facefusion.choices.temp_frame_quality_range[0],
maximum = facefusion.choices.temp_frame_quality_range[-1],
visible = is_video(facefusion.globals.target_path)
)

View File

@ -39,8 +39,9 @@ def render() -> None:
trim_frame_end_slider_args['value'] = facefusion.globals.trim_frame_end or video_frame_total
trim_frame_end_slider_args['maximum'] = video_frame_total
trim_frame_end_slider_args['visible'] = True
TRIM_FRAME_START_SLIDER = gradio.Slider(**trim_frame_start_slider_args)
TRIM_FRAME_END_SLIDER = gradio.Slider(**trim_frame_end_slider_args)
with gradio.Row():
TRIM_FRAME_START_SLIDER = gradio.Slider(**trim_frame_start_slider_args)
TRIM_FRAME_END_SLIDER = gradio.Slider(**trim_frame_end_slider_args)
def listen() -> None:

View File

@ -61,7 +61,7 @@ def listen() -> None:
def start(mode : WebcamMode, resolution : str, fps : float) -> Generator[Frame, None, None]:
facefusion.globals.face_recognition = 'many'
facefusion.globals.face_selector_mode = 'many'
source_face = get_one_face(read_static_image(facefusion.globals.source_path))
stream = None
if mode in [ 'udp', 'v4l2' ]:
@ -99,14 +99,14 @@ def stop() -> gradio.Image:
def capture_webcam(resolution : str, fps : float) -> cv2.VideoCapture:
width, height = resolution.split('x')
webcam_width, webcam_height = map(int, resolution.split('x'))
if platform.system().lower() == 'windows':
capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
else:
capture = cv2.VideoCapture(0)
capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) # type: ignore[attr-defined]
capture.set(cv2.CAP_PROP_FRAME_WIDTH, int(width))
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, int(height))
capture.set(cv2.CAP_PROP_FRAME_WIDTH, webcam_width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, webcam_height)
capture.set(cv2.CAP_PROP_FPS, fps)
return capture

View File

@ -2,7 +2,7 @@ from typing import Optional
import gradio
from facefusion import wording
from facefusion.uis import choices
from facefusion.uis import choices as uis_choices
from facefusion.uis.core import register_ui_component
WEBCAM_MODE_RADIO : Optional[gradio.Radio] = None
@ -17,13 +17,13 @@ def render() -> None:
WEBCAM_MODE_RADIO = gradio.Radio(
label = wording.get('webcam_mode_radio_label'),
choices = choices.webcam_modes,
choices = uis_choices.webcam_modes,
value = 'inline'
)
WEBCAM_RESOLUTION_DROPDOWN = gradio.Dropdown(
label = wording.get('webcam_resolution_dropdown'),
choices = choices.webcam_resolutions,
value = choices.webcam_resolutions[0]
choices = uis_choices.webcam_resolutions,
value = uis_choices.webcam_resolutions[0]
)
WEBCAM_FPS_SLIDER = gradio.Slider(
label = wording.get('webcam_fps_slider'),

View File

@ -43,7 +43,7 @@ def render() -> gradio.Blocks:
limit_resources.render()
with gradio.Blocks():
benchmark_options.render()
with gradio.Column(scale= 5):
with gradio.Column(scale = 5):
with gradio.Blocks():
benchmark.render()
return layout

View File

@ -40,11 +40,11 @@ def render() -> gradio.Blocks:
with gradio.Column(scale = 3):
with gradio.Blocks():
preview.render()
with gradio.Row():
with gradio.Blocks():
trim_frame.render()
with gradio.Blocks():
face_selector.render()
with gradio.Row():
with gradio.Blocks():
face_analyser.render()
with gradio.Blocks():
common_options.render()

View File

@ -8,12 +8,14 @@ ComponentName = Literal\
'target_image',
'target_video',
'preview_frame_slider',
'face_recognition_dropdown',
'face_selector_mode_dropdown',
'reference_face_position_gallery',
'reference_face_distance_slider',
'face_analyser_direction_dropdown',
'face_analyser_age_dropdown',
'face_analyser_gender_dropdown',
'face_detection_size_dropdown',
'face_detection_score_slider',
'frame_processors_checkbox_group',
'face_swapper_model_dropdown',
'face_enhancer_model_dropdown',

View File

@ -1,6 +1,7 @@
from typing import List, Optional
from typing import Any, List, Optional
from functools import lru_cache
from pathlib import Path
from tqdm import tqdm
import glob
import mimetypes
@ -241,5 +242,9 @@ def map_device(execution_providers : List[str]) -> str:
return 'cpu'
def create_metavar(ranges : List[Any]) -> str:
return '[' + str(ranges[0]) + '-' + str(ranges[-1]) + ']'
def update_status(message : str, scope : str = 'FACEFUSION.CORE') -> None:
print('[' + scope + '] ' + message)

View File

@ -7,16 +7,18 @@ WORDING =\
'target_help': 'select a target image or video',
'output_help': 'specify the output file or directory',
'frame_processors_help': 'choose from the available frame processors (choices: {choices}, ...)',
'frame_processor_model_help': 'choose from the mode for the frame processor',
'frame_processor_model_help': 'choose the model for the frame processor',
'frame_processor_blend_help': 'specify the blend factor for the frame processor',
'ui_layouts_help': 'choose from the available ui layouts (choices: {choices}, ...)',
'keep_fps_help': 'preserve the frames per second (fps) of the target',
'keep_temp_help': 'retain temporary frames after processing',
'skip_audio_help': 'omit audio from the target',
'face_recognition_help': 'specify the method for face recognition',
'face_analyser_direction_help': 'specify the direction used for face analysis',
'face_analyser_age_help': 'specify the age used for face analysis',
'face_analyser_gender_help': 'specify the gender used for face analysis',
'face_detection_size_help': 'specify the size threshold used for face detection',
'face_detection_score_help': 'specify the score threshold used for face detection',
'face_selector_mode_help': 'specify the mode for face selection',
'reference_face_position_help': 'specify the position of the reference face',
'reference_face_distance_help': 'specify the distance between the reference face and the target face',
'reference_frame_number_help': 'specify the number of the reference frame',
@ -74,8 +76,10 @@ WORDING =\
'face_analyser_direction_dropdown_label': 'FACE ANALYSER DIRECTION',
'face_analyser_age_dropdown_label': 'FACE ANALYSER AGE',
'face_analyser_gender_dropdown_label': 'FACE ANALYSER GENDER',
'face_selector_mode_dropdown_label': 'FACE SELECTOR MODE',
'face_detection_size_dropdown_label': 'FACE DETECTION SIZE',
'face_detection_score_slider_label': 'FACE DETECTION SCORE',
'reference_face_gallery_label': 'REFERENCE FACE',
'face_recognition_dropdown_label': 'FACE RECOGNITION',
'reference_face_distance_slider_label': 'REFERENCE FACE DISTANCE',
'max_memory_slider_label': 'MAX MEMORY',
'output_image_or_video_label': 'OUTPUT',