Add the first iteration of a frame colorizer
This commit is contained in:
parent
91bb1529f8
commit
ad60c17dc5
@ -61,6 +61,8 @@ face_debugger_items =
|
||||
face_enhancer_model =
|
||||
face_enhancer_blend =
|
||||
face_swapper_model =
|
||||
frame_colorizer_model =
|
||||
frame_colorizer_blend =
|
||||
frame_enhancer_model =
|
||||
frame_enhancer_blend =
|
||||
lip_syncer_model =
|
||||
|
@ -1,13 +1,15 @@
|
||||
from typing import List
|
||||
|
||||
from facefusion.common_helper import create_int_range
|
||||
from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameEnhancerModel, LipSyncerModel
|
||||
from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel
|
||||
|
||||
face_debugger_items : List[FaceDebuggerItem] = [ 'bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender' ]
|
||||
face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus' ]
|
||||
face_swapper_models : List[FaceSwapperModel] = [ 'blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial', 'uniface_256' ]
|
||||
frame_colorizer_models : List[FrameColorizerModel] = [ 'deoldify' ]
|
||||
frame_enhancer_models : List[FrameEnhancerModel] = [ 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_hatgan_x4', 'span_kendata_x4' ]
|
||||
lip_syncer_models : List[LipSyncerModel] = [ 'wav2lip_gan' ]
|
||||
|
||||
face_enhancer_blend_range : List[int] = create_int_range(0, 100, 1)
|
||||
frame_colorizer_blend_range : List[int] = create_int_range(0, 100, 1)
|
||||
frame_enhancer_blend_range : List[int] = create_int_range(0, 100, 1)
|
||||
|
@ -1,11 +1,13 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameEnhancerModel, LipSyncerModel
|
||||
from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel
|
||||
|
||||
face_debugger_items : Optional[List[FaceDebuggerItem]] = None
|
||||
face_enhancer_model : Optional[FaceEnhancerModel] = None
|
||||
face_enhancer_blend : Optional[int] = None
|
||||
face_swapper_model : Optional[FaceSwapperModel] = None
|
||||
frame_colorizer_model : Optional[FrameColorizerModel] = None
|
||||
frame_colorizer_blend : Optional[int] = None
|
||||
frame_enhancer_model : Optional[FrameEnhancerModel] = None
|
||||
frame_enhancer_blend : Optional[int] = None
|
||||
lip_syncer_model : Optional[LipSyncerModel] = None
|
||||
|
197
facefusion/processors/frame/modules/frame_colorizer.py
Normal file
197
facefusion/processors/frame/modules/frame_colorizer.py
Normal file
@ -0,0 +1,197 @@
|
||||
from typing import Any, List, Literal, Optional
|
||||
from argparse import ArgumentParser
|
||||
from time import sleep
|
||||
import threading
|
||||
import cv2
|
||||
import numpy
|
||||
import onnxruntime
|
||||
|
||||
import facefusion.globals
|
||||
import facefusion.processors.frame.core as frame_processors
|
||||
from facefusion import config, process_manager, logger, wording
|
||||
from facefusion.face_analyser import clear_face_analyser
|
||||
from facefusion.content_analyser import clear_content_analyser
|
||||
from facefusion.execution import apply_execution_provider_options
|
||||
from facefusion.normalizer import normalize_output_path
|
||||
from facefusion.typing import Face, VisionFrame, UpdateProcess, ProcessMode, ModelSet, OptionsWithModel, QueuePayload
|
||||
from facefusion.common_helper import create_metavar
|
||||
from facefusion.filesystem import is_file, resolve_relative_path, is_image, is_video
|
||||
from facefusion.download import conditional_download, is_download_done
|
||||
from facefusion.vision import read_image, read_static_image, write_image
|
||||
from facefusion.processors.frame.typings import FrameColorizerInputs
|
||||
from facefusion.processors.frame import globals as frame_processors_globals
|
||||
from facefusion.processors.frame import choices as frame_processors_choices
|
||||
|
||||
FRAME_PROCESSOR = None
|
||||
THREAD_LOCK : threading.Lock = threading.Lock()
|
||||
NAME = __name__.upper()
|
||||
MODELS : ModelSet =\
|
||||
{
|
||||
'deoldify':
|
||||
{
|
||||
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/deoldify.onnx',
|
||||
'path': resolve_relative_path('../.assets/models/deoldify.onnx'),
|
||||
'size': (256, 256)
|
||||
}
|
||||
}
|
||||
OPTIONS : Optional[OptionsWithModel] = None
|
||||
|
||||
|
||||
def get_frame_processor() -> Any:
|
||||
global FRAME_PROCESSOR
|
||||
|
||||
with THREAD_LOCK:
|
||||
while process_manager.is_checking():
|
||||
sleep(0.5)
|
||||
if FRAME_PROCESSOR is None:
|
||||
model_path = get_options('model').get('path')
|
||||
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
|
||||
return FRAME_PROCESSOR
|
||||
|
||||
|
||||
def clear_frame_processor() -> None:
|
||||
global FRAME_PROCESSOR
|
||||
|
||||
FRAME_PROCESSOR = None
|
||||
|
||||
|
||||
def get_options(key : Literal['model']) -> Any:
|
||||
global OPTIONS
|
||||
|
||||
if OPTIONS is None:
|
||||
OPTIONS =\
|
||||
{
|
||||
'model': MODELS[frame_processors_globals.frame_colorizer_model]
|
||||
}
|
||||
return OPTIONS.get(key)
|
||||
|
||||
|
||||
def set_options(key : Literal['model'], value : Any) -> None:
|
||||
global OPTIONS
|
||||
|
||||
OPTIONS[key] = value
|
||||
|
||||
|
||||
def register_args(program : ArgumentParser) -> None:
|
||||
program.add_argument('--frame-colorizer-model', help = wording.get('help.frame_colorizer_model'), default = config.get_str_value('frame_processors.frame_colorizer_model', 'deoldify'), choices = frame_processors_choices.frame_colorizer_models)
|
||||
program.add_argument('--frame-colorizer-blend', help = wording.get('help.frame_colorizer_blend'), type = int, default = config.get_int_value('frame_processors.frame_colorizer_blend', '100'), choices = frame_processors_choices.frame_colorizer_blend_range, metavar = create_metavar(frame_processors_choices.frame_colorizer_blend_range))
|
||||
|
||||
|
||||
def apply_args(program : ArgumentParser) -> None:
|
||||
args = program.parse_args()
|
||||
frame_processors_globals.frame_colorizer_model = args.frame_colorizer_model
|
||||
frame_processors_globals.frame_colorizer_blend = args.frame_colorizer_blend
|
||||
|
||||
|
||||
def pre_check() -> bool:
|
||||
if not facefusion.globals.skip_download:
|
||||
download_directory_path = resolve_relative_path('../.assets/models')
|
||||
model_url = get_options('model').get('url')
|
||||
process_manager.check()
|
||||
conditional_download(download_directory_path, [ model_url ])
|
||||
process_manager.end()
|
||||
return True
|
||||
|
||||
|
||||
def post_check() -> bool:
|
||||
model_url = get_options('model').get('url')
|
||||
model_path = get_options('model').get('path')
|
||||
if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
|
||||
logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
|
||||
return False
|
||||
elif not is_file(model_path):
|
||||
logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def pre_process(mode : ProcessMode) -> bool:
|
||||
if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
|
||||
logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
|
||||
return False
|
||||
if mode == 'output' and not normalize_output_path(facefusion.globals.target_path, facefusion.globals.output_path):
|
||||
logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def post_process() -> None:
|
||||
read_static_image.cache_clear()
|
||||
if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate':
|
||||
clear_frame_processor()
|
||||
if facefusion.globals.video_memory_strategy == 'strict':
|
||||
clear_face_analyser()
|
||||
clear_content_analyser()
|
||||
|
||||
|
||||
def colorize_frame(temp_vision_frame : VisionFrame) -> VisionFrame:
|
||||
frame_processor = get_frame_processor()
|
||||
color_vision_frame = frame_processor.run(None,
|
||||
{
|
||||
frame_processor.get_inputs()[0].name: prepare_temp_frame(temp_vision_frame)
|
||||
})[0][0]
|
||||
color_vision_frame = merge_color_frame(temp_vision_frame, color_vision_frame)
|
||||
color_vision_frame = blend_frame(temp_vision_frame, color_vision_frame)
|
||||
return color_vision_frame
|
||||
|
||||
|
||||
def prepare_temp_frame(temp_vision_frame : VisionFrame) -> VisionFrame:
|
||||
model_size = get_options('model').get('size')
|
||||
temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_BGR2GRAY)
|
||||
temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_GRAY2RGB)
|
||||
temp_vision_frame = cv2.resize(temp_vision_frame, model_size)
|
||||
temp_vision_frame = temp_vision_frame.transpose((2, 0, 1))
|
||||
temp_vision_frame = numpy.expand_dims(temp_vision_frame, axis = 0).astype(numpy.float32)
|
||||
return temp_vision_frame
|
||||
|
||||
|
||||
def merge_color_frame(temp_vision_frame : VisionFrame, color_vision_frame : VisionFrame) -> VisionFrame:
|
||||
temp_luminance, _, _ = cv2.split(temp_vision_frame)
|
||||
color_vision_frame = color_vision_frame.transpose(1, 2, 0)
|
||||
color_vision_frame = cv2.cvtColor(color_vision_frame, cv2.COLOR_BGR2RGB).astype(numpy.uint8)
|
||||
color_vision_frame = cv2.resize(color_vision_frame, (temp_vision_frame.shape[1], temp_vision_frame.shape[0]))
|
||||
color_vision_frame = cv2.cvtColor(color_vision_frame, cv2.COLOR_BGR2LAB)
|
||||
_, color_channel_a, color_channel_b = cv2.split(color_vision_frame)
|
||||
color_vision_frame = cv2.merge((temp_luminance, color_channel_a, color_channel_b))
|
||||
color_vision_frame = cv2.cvtColor(color_vision_frame, cv2.COLOR_LAB2BGR)
|
||||
return color_vision_frame
|
||||
|
||||
|
||||
def blend_frame(temp_vision_frame : VisionFrame, paste_vision_frame : VisionFrame) -> VisionFrame:
|
||||
frame_colorizer_blend = 1 - (frame_processors_globals.frame_colorizer_blend / 100)
|
||||
temp_vision_frame = cv2.addWeighted(temp_vision_frame, frame_colorizer_blend, paste_vision_frame, 1 - frame_colorizer_blend, 0)
|
||||
return temp_vision_frame
|
||||
|
||||
|
||||
def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
||||
pass
|
||||
|
||||
|
||||
def process_frame(inputs : FrameColorizerInputs) -> VisionFrame:
|
||||
target_vision_frame = inputs.get('target_vision_frame')
|
||||
return colorize_frame(target_vision_frame)
|
||||
|
||||
|
||||
def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProcess) -> None:
|
||||
for queue_payload in process_manager.manage(queue_payloads):
|
||||
target_vision_path = queue_payload['frame_path']
|
||||
target_vision_frame = read_image(target_vision_path)
|
||||
output_vision_frame = process_frame(
|
||||
{
|
||||
'target_vision_frame': target_vision_frame
|
||||
})
|
||||
write_image(target_vision_path, output_vision_frame)
|
||||
update_progress()
|
||||
|
||||
|
||||
def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
|
||||
target_vision_frame = read_static_image(target_path)
|
||||
output_vision_frame = process_frame(
|
||||
{
|
||||
'target_vision_frame': target_vision_frame
|
||||
})
|
||||
write_image(output_path, output_vision_frame)
|
||||
|
||||
|
||||
def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
|
||||
frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
|
@ -191,10 +191,10 @@ def normalize_tile_frame(vision_tile_frame : VisionFrame) -> VisionFrame:
|
||||
return vision_tile_frame
|
||||
|
||||
|
||||
def blend_frame(temp_vision_frame : VisionFrame, paste_vision_frame : VisionFrame) -> VisionFrame:
|
||||
def blend_frame(temp_vision_frame : VisionFrame, merge_vision_frame : VisionFrame) -> VisionFrame:
|
||||
frame_enhancer_blend = 1 - (frame_processors_globals.frame_enhancer_blend / 100)
|
||||
temp_vision_frame = cv2.resize(temp_vision_frame, (paste_vision_frame.shape[1], paste_vision_frame.shape[0]))
|
||||
temp_vision_frame = cv2.addWeighted(temp_vision_frame, frame_enhancer_blend, paste_vision_frame, 1 - frame_enhancer_blend, 0)
|
||||
temp_vision_frame = cv2.resize(temp_vision_frame, (merge_vision_frame.shape[1], merge_vision_frame.shape[0]))
|
||||
temp_vision_frame = cv2.addWeighted(temp_vision_frame, frame_enhancer_blend, merge_vision_frame, 1 - frame_enhancer_blend, 0)
|
||||
return temp_vision_frame
|
||||
|
||||
|
||||
|
@ -5,6 +5,7 @@ from facefusion.typing import Face, FaceSet, AudioFrame, VisionFrame
|
||||
FaceDebuggerItem = Literal['bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender']
|
||||
FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus']
|
||||
FaceSwapperModel = Literal['blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial', 'uniface_256']
|
||||
FrameColorizerModel = Literal['deoldify']
|
||||
FrameEnhancerModel = Literal['lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_hatgan_x4', 'span_kendata_x4']
|
||||
LipSyncerModel = Literal['wav2lip_gan']
|
||||
|
||||
@ -24,6 +25,10 @@ FaceSwapperInputs = TypedDict('FaceSwapperInputs',
|
||||
'source_face' : Face,
|
||||
'target_vision_frame' : VisionFrame
|
||||
})
|
||||
FrameColorizerInputs = TypedDict('FrameColorizerInputs',
|
||||
{
|
||||
'target_vision_frame' : VisionFrame
|
||||
})
|
||||
FrameEnhancerInputs = TypedDict('FrameEnhancerInputs',
|
||||
{
|
||||
'target_vision_frame' : VisionFrame
|
||||
|
@ -5,13 +5,15 @@ import facefusion.globals
|
||||
from facefusion import face_analyser, wording
|
||||
from facefusion.processors.frame.core import load_frame_processor_module
|
||||
from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
|
||||
from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameEnhancerModel, LipSyncerModel
|
||||
from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel
|
||||
from facefusion.uis.core import get_ui_component, register_ui_component
|
||||
|
||||
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
||||
FACE_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FACE_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
|
||||
FACE_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FRAME_COLORIZER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FRAME_COLORIZER_BLEND_SLIDER : Optional[gradio.Slider] = None
|
||||
FRAME_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
FRAME_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
|
||||
LIP_SYNCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
||||
@ -22,6 +24,8 @@ def render() -> None:
|
||||
global FACE_ENHANCER_MODEL_DROPDOWN
|
||||
global FACE_ENHANCER_BLEND_SLIDER
|
||||
global FACE_SWAPPER_MODEL_DROPDOWN
|
||||
global FRAME_COLORIZER_MODEL_DROPDOWN
|
||||
global FRAME_COLORIZER_BLEND_SLIDER
|
||||
global FRAME_ENHANCER_MODEL_DROPDOWN
|
||||
global FRAME_ENHANCER_BLEND_SLIDER
|
||||
global LIP_SYNCER_MODEL_DROPDOWN
|
||||
@ -52,6 +56,20 @@ def render() -> None:
|
||||
value = frame_processors_globals.face_swapper_model,
|
||||
visible = 'face_swapper' in facefusion.globals.frame_processors
|
||||
)
|
||||
FRAME_COLORIZER_MODEL_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.frame_colorizer_model_dropdown'),
|
||||
choices = frame_processors_choices.frame_colorizer_models,
|
||||
value = frame_processors_globals.frame_colorizer_model,
|
||||
visible = 'frame_colorizer' in facefusion.globals.frame_processors
|
||||
)
|
||||
FRAME_COLORIZER_BLEND_SLIDER = gradio.Slider(
|
||||
label = wording.get('uis.frame_colorizer_blend_slider'),
|
||||
value = frame_processors_globals.frame_colorizer_blend,
|
||||
step = frame_processors_choices.frame_colorizer_blend_range[1] - frame_processors_choices.frame_colorizer_blend_range[0],
|
||||
minimum = frame_processors_choices.frame_colorizer_blend_range[0],
|
||||
maximum = frame_processors_choices.frame_colorizer_blend_range[-1],
|
||||
visible = 'frame_colorizer' in facefusion.globals.frame_processors
|
||||
)
|
||||
FRAME_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
|
||||
label = wording.get('uis.frame_enhancer_model_dropdown'),
|
||||
choices = frame_processors_choices.frame_enhancer_models,
|
||||
@ -76,6 +94,8 @@ def render() -> None:
|
||||
register_ui_component('face_enhancer_model_dropdown', FACE_ENHANCER_MODEL_DROPDOWN)
|
||||
register_ui_component('face_enhancer_blend_slider', FACE_ENHANCER_BLEND_SLIDER)
|
||||
register_ui_component('face_swapper_model_dropdown', FACE_SWAPPER_MODEL_DROPDOWN)
|
||||
register_ui_component('frame_colorizer_model_dropdown', FRAME_COLORIZER_MODEL_DROPDOWN)
|
||||
register_ui_component('frame_colorizer_blend_slider', FRAME_COLORIZER_BLEND_SLIDER)
|
||||
register_ui_component('frame_enhancer_model_dropdown', FRAME_ENHANCER_MODEL_DROPDOWN)
|
||||
register_ui_component('frame_enhancer_blend_slider', FRAME_ENHANCER_BLEND_SLIDER)
|
||||
register_ui_component('lip_syncer_model_dropdown', LIP_SYNCER_MODEL_DROPDOWN)
|
||||
@ -86,21 +106,24 @@ def listen() -> None:
|
||||
FACE_ENHANCER_MODEL_DROPDOWN.change(update_face_enhancer_model, inputs = FACE_ENHANCER_MODEL_DROPDOWN, outputs = FACE_ENHANCER_MODEL_DROPDOWN)
|
||||
FACE_ENHANCER_BLEND_SLIDER.release(update_face_enhancer_blend, inputs = FACE_ENHANCER_BLEND_SLIDER)
|
||||
FACE_SWAPPER_MODEL_DROPDOWN.change(update_face_swapper_model, inputs = FACE_SWAPPER_MODEL_DROPDOWN, outputs = FACE_SWAPPER_MODEL_DROPDOWN)
|
||||
FRAME_COLORIZER_MODEL_DROPDOWN.change(update_frame_colorizer_model, inputs = FRAME_COLORIZER_MODEL_DROPDOWN, outputs = FRAME_COLORIZER_MODEL_DROPDOWN)
|
||||
FRAME_COLORIZER_BLEND_SLIDER.release(update_frame_colorizer_blend, inputs = FRAME_COLORIZER_BLEND_SLIDER)
|
||||
FRAME_ENHANCER_MODEL_DROPDOWN.change(update_frame_enhancer_model, inputs = FRAME_ENHANCER_MODEL_DROPDOWN, outputs = FRAME_ENHANCER_MODEL_DROPDOWN)
|
||||
FRAME_ENHANCER_BLEND_SLIDER.release(update_frame_enhancer_blend, inputs = FRAME_ENHANCER_BLEND_SLIDER)
|
||||
LIP_SYNCER_MODEL_DROPDOWN.change(update_lip_syncer_model, inputs = LIP_SYNCER_MODEL_DROPDOWN, outputs = LIP_SYNCER_MODEL_DROPDOWN)
|
||||
frame_processors_checkbox_group = get_ui_component('frame_processors_checkbox_group')
|
||||
if frame_processors_checkbox_group:
|
||||
frame_processors_checkbox_group.change(update_frame_processors, inputs = frame_processors_checkbox_group, outputs = [ FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP, FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FACE_SWAPPER_MODEL_DROPDOWN, FRAME_ENHANCER_MODEL_DROPDOWN, FRAME_ENHANCER_BLEND_SLIDER, LIP_SYNCER_MODEL_DROPDOWN ])
|
||||
frame_processors_checkbox_group.change(update_frame_processors, inputs = frame_processors_checkbox_group, outputs = [ FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP, FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FACE_SWAPPER_MODEL_DROPDOWN, FRAME_COLORIZER_MODEL_DROPDOWN, FRAME_COLORIZER_BLEND_SLIDER, FRAME_ENHANCER_MODEL_DROPDOWN, FRAME_ENHANCER_BLEND_SLIDER, LIP_SYNCER_MODEL_DROPDOWN ])
|
||||
|
||||
|
||||
def update_frame_processors(frame_processors : List[str]) -> Tuple[gradio.CheckboxGroup, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown]:
|
||||
def update_frame_processors(frame_processors : List[str]) -> Tuple[gradio.CheckboxGroup, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Slider, gradio.Dropdown]:
|
||||
has_face_debugger = 'face_debugger' in frame_processors
|
||||
has_face_enhancer = 'face_enhancer' in frame_processors
|
||||
has_face_swapper = 'face_swapper' in frame_processors
|
||||
has_frame_colorizer = 'frame_colorizer' in frame_processors
|
||||
has_frame_enhancer = 'frame_enhancer' in frame_processors
|
||||
has_lip_syncer = 'lip_syncer' in frame_processors
|
||||
return gradio.CheckboxGroup(visible = has_face_debugger), gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Dropdown(visible = has_face_swapper), gradio.Dropdown(visible = has_frame_enhancer), gradio.Slider(visible = has_frame_enhancer), gradio.Dropdown(visible = has_lip_syncer)
|
||||
return gradio.CheckboxGroup(visible = has_face_debugger), gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Dropdown(visible = has_face_swapper), gradio.Dropdown(visible = has_frame_colorizer), gradio.Slider(visible = has_frame_colorizer), gradio.Dropdown(visible = has_frame_enhancer), gradio.Slider(visible = has_frame_enhancer), gradio.Dropdown(visible = has_lip_syncer)
|
||||
|
||||
|
||||
def update_face_debugger_items(face_debugger_items : List[FaceDebuggerItem]) -> None:
|
||||
@ -139,6 +162,20 @@ def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> gradio.D
|
||||
return gradio.Dropdown()
|
||||
|
||||
|
||||
def update_frame_colorizer_model(frame_colorizer_model : FrameColorizerModel) -> gradio.Dropdown:
|
||||
frame_processors_globals.frame_colorizer_model = frame_colorizer_model
|
||||
frame_colorizer_module = load_frame_processor_module('frame_colorizer')
|
||||
frame_colorizer_module.clear_frame_processor()
|
||||
frame_colorizer_module.set_options('model', frame_colorizer_module.MODELS[frame_colorizer_model])
|
||||
if frame_colorizer_module.pre_check():
|
||||
return gradio.Dropdown(value = frame_processors_globals.frame_colorizer_model)
|
||||
return gradio.Dropdown()
|
||||
|
||||
|
||||
def update_frame_colorizer_blend(frame_colorizer_blend : int) -> None:
|
||||
frame_processors_globals.frame_colorizer_blend = frame_colorizer_blend
|
||||
|
||||
|
||||
def update_frame_enhancer_model(frame_enhancer_model : FrameEnhancerModel) -> gradio.Dropdown:
|
||||
frame_processors_globals.frame_enhancer_model = frame_enhancer_model
|
||||
frame_enhancer_module = load_frame_processor_module('frame_enhancer')
|
||||
|
@ -105,6 +105,7 @@ def listen() -> None:
|
||||
for ui_component in get_ui_components(
|
||||
[
|
||||
'face_enhancer_blend_slider',
|
||||
'frame_colorizer_blend_slider',
|
||||
'frame_enhancer_blend_slider',
|
||||
'trim_frame_start_slider',
|
||||
'trim_frame_end_slider',
|
||||
@ -123,6 +124,7 @@ def listen() -> None:
|
||||
'frame_processors_checkbox_group',
|
||||
'face_enhancer_model_dropdown',
|
||||
'face_swapper_model_dropdown',
|
||||
'frame_colorizer_model_dropdown',
|
||||
'frame_enhancer_model_dropdown',
|
||||
'lip_syncer_model_dropdown',
|
||||
'face_detector_model_dropdown',
|
||||
|
@ -34,6 +34,8 @@ ComponentName = Literal\
|
||||
'face_enhancer_model_dropdown',
|
||||
'face_enhancer_blend_slider',
|
||||
'face_swapper_model_dropdown',
|
||||
'frame_colorizer_model_dropdown',
|
||||
'frame_colorizer_blend_slider',
|
||||
'frame_enhancer_model_dropdown',
|
||||
'frame_enhancer_blend_slider',
|
||||
'lip_syncer_model_dropdown',
|
||||
|
@ -162,6 +162,8 @@ WORDING : Dict[str, Any] =\
|
||||
'face_enhancer_model_dropdown': 'FACE ENHANCER MODEL',
|
||||
'face_enhancer_blend_slider': 'FACE ENHANCER BLEND',
|
||||
'face_swapper_model_dropdown': 'FACE SWAPPER MODEL',
|
||||
'frame_colorizer_model_dropdown': 'FRAME COLORIZER MODEL',
|
||||
'frame_colorizer_blend_slider': 'FRAME COLORIZER BLEND',
|
||||
'frame_enhancer_model_dropdown': 'FRAME ENHANCER MODEL',
|
||||
'frame_enhancer_blend_slider': 'FRAME ENHANCER BLEND',
|
||||
'lip_syncer_model_dropdown': 'LIP SYNCER MODEL',
|
||||
|
Loading…
Reference in New Issue
Block a user