DFM Morph (#816)
* changes * Improve wording, Replace [None], SideQuest: clean forward() of age modifier * SideQuest: clean forward() of face enhancer --------- Co-authored-by: henryruhs <info@henryruhs.com>
This commit is contained in:
parent
28f7dba897
commit
5a0c2cad96
@ -59,6 +59,7 @@ processors =
|
||||
age_modifier_model =
|
||||
age_modifier_direction =
|
||||
deep_swapper_model =
|
||||
deep_swapper_morph =
|
||||
expression_restorer_model =
|
||||
expression_restorer_factor =
|
||||
face_debugger_items =
|
||||
|
@ -28,6 +28,7 @@ frame_enhancer_models : List[FrameEnhancerModel] = [ 'clear_reality_x4', 'lsdir_
|
||||
lip_syncer_models : List[LipSyncerModel] = [ 'wav2lip_96', 'wav2lip_gan_96' ]
|
||||
|
||||
age_modifier_direction_range : Sequence[int] = create_int_range(-100, 100, 1)
|
||||
deep_swapper_morph_range : Sequence[int] = create_int_range(0, 100, 1)
|
||||
expression_restorer_factor_range : Sequence[int] = create_int_range(0, 100, 1)
|
||||
face_editor_eyebrow_direction_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
|
||||
face_editor_eye_gaze_horizontal_range : Sequence[float] = create_float_range(-1.0, 1.0, 0.05)
|
||||
|
@ -1,9 +1,8 @@
|
||||
from argparse import ArgumentParser
|
||||
from typing import Any, List
|
||||
from typing import List
|
||||
|
||||
import cv2
|
||||
import numpy
|
||||
from numpy.typing import NDArray
|
||||
|
||||
import facefusion.jobs.job_manager
|
||||
import facefusion.jobs.job_store
|
||||
@ -18,7 +17,7 @@ from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
|
||||
from facefusion.face_store import get_reference_faces
|
||||
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
|
||||
from facefusion.processors import choices as processors_choices
|
||||
from facefusion.processors.typing import AgeModifierInputs
|
||||
from facefusion.processors.typing import AgeModifierDirection, AgeModifierInputs
|
||||
from facefusion.program_helper import find_argument_group
|
||||
from facefusion.thread_helper import thread_semaphore
|
||||
from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
|
||||
@ -145,7 +144,8 @@ def modify_age(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFra
|
||||
|
||||
crop_vision_frame = prepare_vision_frame(crop_vision_frame)
|
||||
extend_vision_frame = prepare_vision_frame(extend_vision_frame)
|
||||
extend_vision_frame = forward(crop_vision_frame, extend_vision_frame)
|
||||
age_modifier_direction = numpy.array(numpy.interp(state_manager.get_item('age_modifier_direction'), [-100, 100], [2.5, -2.5])).astype(numpy.float32)
|
||||
extend_vision_frame = forward(crop_vision_frame, extend_vision_frame, age_modifier_direction)
|
||||
extend_vision_frame = normalize_extend_frame(extend_vision_frame)
|
||||
extend_vision_frame = match_frame_color(extend_vision_frame_raw, extend_vision_frame)
|
||||
extend_affine_matrix *= (model_sizes.get('target')[0] * 4) / model_sizes.get('target_with_background')[0]
|
||||
@ -155,7 +155,7 @@ def modify_age(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFra
|
||||
return paste_vision_frame
|
||||
|
||||
|
||||
def forward(crop_vision_frame : VisionFrame, extend_vision_frame : VisionFrame) -> VisionFrame:
|
||||
def forward(crop_vision_frame : VisionFrame, extend_vision_frame : VisionFrame, age_modifier_direction : AgeModifierDirection) -> VisionFrame:
|
||||
age_modifier = get_inference_pool().get('age_modifier')
|
||||
age_modifier_inputs = {}
|
||||
|
||||
@ -165,7 +165,7 @@ def forward(crop_vision_frame : VisionFrame, extend_vision_frame : VisionFrame)
|
||||
if age_modifier_input.name == 'target_with_background':
|
||||
age_modifier_inputs[age_modifier_input.name] = extend_vision_frame
|
||||
if age_modifier_input.name == 'direction':
|
||||
age_modifier_inputs[age_modifier_input.name] = prepare_direction(state_manager.get_item('age_modifier_direction'))
|
||||
age_modifier_inputs[age_modifier_input.name] = age_modifier_direction
|
||||
|
||||
with thread_semaphore():
|
||||
crop_vision_frame = age_modifier.run(None, age_modifier_inputs)[0][0]
|
||||
@ -173,11 +173,6 @@ def forward(crop_vision_frame : VisionFrame, extend_vision_frame : VisionFrame)
|
||||
return crop_vision_frame
|
||||
|
||||
|
||||
def prepare_direction(direction : int) -> NDArray[Any]:
|
||||
direction = numpy.interp(float(direction), [ -100, 100 ], [ 2.5, -2.5 ]) #type:ignore[assignment]
|
||||
return numpy.array(direction).astype(numpy.float32)
|
||||
|
||||
|
||||
def prepare_vision_frame(vision_frame : VisionFrame) -> VisionFrame:
|
||||
vision_frame = vision_frame[:, :, ::-1] / 255.0
|
||||
vision_frame = (vision_frame - 0.5) / 0.5
|
||||
|
@ -8,6 +8,7 @@ import facefusion.jobs.job_manager
|
||||
import facefusion.jobs.job_store
|
||||
import facefusion.processors.core as processors
|
||||
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, wording
|
||||
from facefusion.common_helper import create_int_metavar
|
||||
from facefusion.download import conditional_download_hashes, conditional_download_sources, resolve_download_url_by_provider
|
||||
from facefusion.face_analyser import get_many_faces, get_one_face
|
||||
from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5
|
||||
@ -16,7 +17,7 @@ from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
|
||||
from facefusion.face_store import get_reference_faces
|
||||
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
|
||||
from facefusion.processors import choices as processors_choices
|
||||
from facefusion.processors.typing import DeepSwapperInputs
|
||||
from facefusion.processors.typing import DeepSwapperInputs, DeepSwapperMorph
|
||||
from facefusion.program_helper import find_argument_group
|
||||
from facefusion.thread_helper import thread_semaphore
|
||||
from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, Mask, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
|
||||
@ -154,11 +155,13 @@ def register_args(program : ArgumentParser) -> None:
|
||||
group_processors = find_argument_group(program, 'processors')
|
||||
if group_processors:
|
||||
group_processors.add_argument('--deep-swapper-model', help = wording.get('help.deep_swapper_model'), default = config.get_str_value('processors.deep_swapper_model', 'iperov/jackie_chan_224'), choices = processors_choices.deep_swapper_models)
|
||||
facefusion.jobs.job_store.register_step_keys([ 'deep_swapper_model' ])
|
||||
group_processors.add_argument('--deep-swapper-morph', help = wording.get('help.deep_swapper_morph'), type = int, default = config.get_int_value('processors.deep_swapper_morph', '80'), choices = processors_choices.deep_swapper_morph_range, metavar = create_int_metavar(processors_choices.deep_swapper_morph_range))
|
||||
facefusion.jobs.job_store.register_step_keys([ 'deep_swapper_model', 'deep_swapper_morph' ])
|
||||
|
||||
|
||||
def apply_args(args : Args, apply_state_item : ApplyStateItem) -> None:
|
||||
apply_state_item('deep_swapper_model', args.get('deep_swapper_model'))
|
||||
apply_state_item('deep_swapper_morph', args.get('deep_swapper_morph'))
|
||||
|
||||
|
||||
def pre_check() -> bool:
|
||||
@ -210,7 +213,8 @@ def swap_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFram
|
||||
crop_masks.append(occlusion_mask)
|
||||
|
||||
crop_vision_frame = prepare_crop_frame(crop_vision_frame)
|
||||
crop_vision_frame, crop_source_mask, crop_target_mask = forward(crop_vision_frame)
|
||||
deep_swapper_morph = numpy.array([ numpy.interp(state_manager.get_item('deep_swapper_morph'), [ 0, 100 ], [ 0, 1 ]) ]).astype(numpy.float32)
|
||||
crop_vision_frame, crop_source_mask, crop_target_mask = forward(crop_vision_frame, deep_swapper_morph)
|
||||
crop_vision_frame = normalize_crop_frame(crop_vision_frame)
|
||||
crop_vision_frame = conditional_match_frame_color(crop_vision_frame_raw, crop_vision_frame)
|
||||
crop_masks.append(prepare_crop_mask(crop_source_mask, crop_target_mask))
|
||||
@ -219,7 +223,7 @@ def swap_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFram
|
||||
return paste_vision_frame
|
||||
|
||||
|
||||
def forward(crop_vision_frame : VisionFrame) -> Tuple[VisionFrame, Mask, Mask]:
|
||||
def forward(crop_vision_frame : VisionFrame, deep_swapper_morph : DeepSwapperMorph) -> Tuple[VisionFrame, Mask, Mask]:
|
||||
deep_swapper = get_inference_pool().get('deep_swapper')
|
||||
deep_swapper_inputs = {}
|
||||
|
||||
@ -227,8 +231,7 @@ def forward(crop_vision_frame : VisionFrame) -> Tuple[VisionFrame, Mask, Mask]:
|
||||
if index == 0:
|
||||
deep_swapper_inputs[deep_swapper_input.name] = crop_vision_frame
|
||||
if index == 1:
|
||||
morph_value = numpy.array([ 0.5 ]).astype(numpy.float32)
|
||||
deep_swapper_inputs[deep_swapper_input.name] = morph_value
|
||||
deep_swapper_inputs[deep_swapper_input.name] = deep_swapper_morph
|
||||
|
||||
with thread_semaphore():
|
||||
crop_target_mask, crop_vision_frame, crop_source_mask = deep_swapper.run(None, deep_swapper_inputs)
|
||||
|
@ -17,7 +17,7 @@ from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
|
||||
from facefusion.face_store import get_reference_faces
|
||||
from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension
|
||||
from facefusion.processors import choices as processors_choices
|
||||
from facefusion.processors.typing import FaceEnhancerInputs
|
||||
from facefusion.processors.typing import FaceEnhancerInputs, FaceEnhancerWeight
|
||||
from facefusion.program_helper import find_argument_group
|
||||
from facefusion.thread_helper import thread_semaphore
|
||||
from facefusion.typing import ApplyStateItem, Args, Face, InferencePool, ModelOptions, ModelSet, ProcessMode, QueuePayload, UpdateProgress, VisionFrame
|
||||
@ -296,7 +296,8 @@ def enhance_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionF
|
||||
crop_masks.append(occlusion_mask)
|
||||
|
||||
crop_vision_frame = prepare_crop_frame(crop_vision_frame)
|
||||
crop_vision_frame = forward(crop_vision_frame)
|
||||
face_enhancer_weight = numpy.array([ 1 ]).astype(numpy.double)
|
||||
crop_vision_frame = forward(crop_vision_frame, face_enhancer_weight)
|
||||
crop_vision_frame = normalize_crop_frame(crop_vision_frame)
|
||||
crop_mask = numpy.minimum.reduce(crop_masks).clip(0, 1)
|
||||
paste_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, crop_mask, affine_matrix)
|
||||
@ -304,7 +305,7 @@ def enhance_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionF
|
||||
return temp_vision_frame
|
||||
|
||||
|
||||
def forward(crop_vision_frame : VisionFrame) -> VisionFrame:
|
||||
def forward(crop_vision_frame : VisionFrame, face_enhancer_weight : FaceEnhancerWeight) -> VisionFrame:
|
||||
face_enhancer = get_inference_pool().get('face_enhancer')
|
||||
face_enhancer_inputs = {}
|
||||
|
||||
@ -312,8 +313,7 @@ def forward(crop_vision_frame : VisionFrame) -> VisionFrame:
|
||||
if face_enhancer_input.name == 'input':
|
||||
face_enhancer_inputs[face_enhancer_input.name] = crop_vision_frame
|
||||
if face_enhancer_input.name == 'weight':
|
||||
weight = numpy.array([ 1 ]).astype(numpy.double)
|
||||
face_enhancer_inputs[face_enhancer_input.name] = weight
|
||||
face_enhancer_inputs[face_enhancer_input.name] = face_enhancer_weight
|
||||
|
||||
with thread_semaphore():
|
||||
crop_vision_frame = face_enhancer.run(None, face_enhancer_inputs)[0][0]
|
||||
|
@ -74,6 +74,7 @@ ProcessorStateKey = Literal\
|
||||
'age_modifier_model',
|
||||
'age_modifier_direction',
|
||||
'deep_swapper_model',
|
||||
'deep_swapper_morph',
|
||||
'expression_restorer_model',
|
||||
'expression_restorer_factor',
|
||||
'face_debugger_items',
|
||||
@ -108,6 +109,7 @@ ProcessorState = TypedDict('ProcessorState',
|
||||
'age_modifier_model' : AgeModifierModel,
|
||||
'age_modifier_direction' : int,
|
||||
'deep_swapper_model' : DeepSwapperModel,
|
||||
'deep_swapper_morph' : int,
|
||||
'expression_restorer_model' : ExpressionRestorerModel,
|
||||
'expression_restorer_factor' : int,
|
||||
'face_debugger_items' : List[FaceDebuggerItem],
|
||||
@ -139,6 +141,9 @@ ProcessorState = TypedDict('ProcessorState',
|
||||
})
|
||||
ProcessorStateSet = Dict[AppContext, ProcessorState]
|
||||
|
||||
AgeModifierDirection = NDArray[Any]
|
||||
DeepSwapperMorph = NDArray[Any]
|
||||
FaceEnhancerWeight = NDArray[Any]
|
||||
LivePortraitPitch = float
|
||||
LivePortraitYaw = float
|
||||
LivePortraitRoll = float
|
||||
|
@ -1,18 +1,21 @@
|
||||
from typing import List, Optional
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
import gradio
|
||||
|
||||
from facefusion import state_manager, wording
|
||||
from facefusion.common_helper import calc_int_step
|
||||
from facefusion.processors import choices as processors_choices
|
||||
from facefusion.processors.core import load_processor_module
|
||||
from facefusion.processors.typing import DeepSwapperModel
|
||||
from facefusion.uis.core import get_ui_component, register_ui_component
|
||||
|
||||
DEEP_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
DEEP_SWAPPER_MORPH_SLIDER : Optional[gradio.Slider] = None
|
||||
|
||||
|
||||
def render() -> None:
|
||||
global DEEP_SWAPPER_MODEL_DROPDOWN
|
||||
global DEEP_SWAPPER_MORPH_SLIDER
|
||||
|
||||
DEEP_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.deep_swapper_model_dropdown'),
|
||||
@ -20,20 +23,30 @@ def render() -> None:
|
||||
value = state_manager.get_item('deep_swapper_model'),
|
||||
visible = 'deep_swapper' in state_manager.get_item('processors')
|
||||
)
|
||||
DEEP_SWAPPER_MORPH_SLIDER = gradio.Slider(
|
||||
label=wording.get('uis.deep_swapper_morph_slider'),
|
||||
value=state_manager.get_item('deep_swapper_morph'),
|
||||
step=calc_int_step(processors_choices.deep_swapper_morph_range),
|
||||
minimum=processors_choices.deep_swapper_morph_range[0],
|
||||
maximum=processors_choices.deep_swapper_morph_range[-1],
|
||||
visible='deep_swapper' in state_manager.get_item('processors')
|
||||
)
|
||||
register_ui_component('deep_swapper_model_dropdown', DEEP_SWAPPER_MODEL_DROPDOWN)
|
||||
register_ui_component('deep_swapper_morph_slider', DEEP_SWAPPER_MORPH_SLIDER)
|
||||
|
||||
|
||||
def listen() -> None:
|
||||
DEEP_SWAPPER_MODEL_DROPDOWN.change(update_deep_swapper_model, inputs = DEEP_SWAPPER_MODEL_DROPDOWN, outputs = DEEP_SWAPPER_MODEL_DROPDOWN)
|
||||
DEEP_SWAPPER_MORPH_SLIDER.release(update_deep_swapper_morph, inputs = DEEP_SWAPPER_MORPH_SLIDER)
|
||||
|
||||
processors_checkbox_group = get_ui_component('processors_checkbox_group')
|
||||
if processors_checkbox_group:
|
||||
processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = DEEP_SWAPPER_MODEL_DROPDOWN)
|
||||
processors_checkbox_group.change(remote_update, inputs = processors_checkbox_group, outputs = [ DEEP_SWAPPER_MODEL_DROPDOWN, DEEP_SWAPPER_MORPH_SLIDER ])
|
||||
|
||||
|
||||
def remote_update(processors : List[str]) -> gradio.Dropdown:
|
||||
def remote_update(processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Slider]:
|
||||
has_deep_swapper = 'deep_swapper' in processors
|
||||
return gradio.Dropdown(visible = has_deep_swapper)
|
||||
return gradio.Dropdown(visible = has_deep_swapper), gradio.Slider(visible = has_deep_swapper)
|
||||
|
||||
|
||||
def update_deep_swapper_model(deep_swapper_model : DeepSwapperModel) -> gradio.Dropdown:
|
||||
@ -44,3 +57,8 @@ def update_deep_swapper_model(deep_swapper_model : DeepSwapperModel) -> gradio.D
|
||||
if deep_swapper_module.pre_check():
|
||||
return gradio.Dropdown(value = state_manager.get_item('deep_swapper_model'))
|
||||
return gradio.Dropdown()
|
||||
|
||||
|
||||
def update_deep_swapper_morph(deep_swapper_morph : int) -> None:
|
||||
state_manager.set_item('deep_swapper_morph', deep_swapper_morph)
|
||||
|
||||
|
@ -111,6 +111,7 @@ def listen() -> None:
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'age_modifier_direction_slider',
|
||||
'deep_swapper_morph_slider',
|
||||
'expression_restorer_factor_slider',
|
||||
'face_editor_eyebrow_direction_slider',
|
||||
'face_editor_eye_gaze_horizontal_slider',
|
||||
|
@ -10,6 +10,7 @@ ComponentName = Literal\
|
||||
'benchmark_cycles_slider',
|
||||
'benchmark_runs_checkbox_group',
|
||||
'deep_swapper_model_dropdown',
|
||||
'deep_swapper_morph_slider',
|
||||
'expression_restorer_factor_slider',
|
||||
'expression_restorer_model_dropdown',
|
||||
'face_debugger_items_checkbox_group',
|
||||
|
@ -144,6 +144,7 @@ WORDING : Dict[str, Any] =\
|
||||
'age_modifier_model': 'choose the model responsible for aging the face',
|
||||
'age_modifier_direction': 'specify the direction in which the age should be modified',
|
||||
'deep_swapper_model': 'choose the model responsible for swapping the face',
|
||||
'deep_swapper_morph': 'morph between source face and target faces',
|
||||
'expression_restorer_model': 'choose the model responsible for restoring the expression',
|
||||
'expression_restorer_factor': 'restore factor of expression from the target face',
|
||||
'face_debugger_items': 'load a single or multiple processors (choices: {choices})',
|
||||
@ -232,6 +233,7 @@ WORDING : Dict[str, Any] =\
|
||||
'common_options_checkbox_group': 'OPTIONS',
|
||||
'download_providers_checkbox_group': 'DOWNLOAD PROVIDERS',
|
||||
'deep_swapper_model_dropdown': 'DEEP SWAPPER MODEL',
|
||||
'deep_swapper_morph_slider': 'DEEP SWAPPER MORPH',
|
||||
'execution_providers_checkbox_group': 'EXECUTION PROVIDERS',
|
||||
'execution_queue_count_slider': 'EXECUTION QUEUE COUNT',
|
||||
'execution_thread_count_slider': 'EXECUTION THREAD COUNT',
|
||||
|
Loading…
Reference in New Issue
Block a user