Remove deoldify

This commit is contained in:
henryruhs 2024-04-02 01:11:12 +02:00
parent 2344ca2c3a
commit af97de90e2
3 changed files with 13 additions and 31 deletions

View File

@ -6,7 +6,7 @@ from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerMo
face_debugger_items : List[FaceDebuggerItem] = [ 'bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender' ] face_debugger_items : List[FaceDebuggerItem] = [ 'bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender' ]
face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus' ] face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus' ]
face_swapper_models : List[FaceSwapperModel] = [ 'blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial', 'uniface_256' ] face_swapper_models : List[FaceSwapperModel] = [ 'blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial', 'uniface_256' ]
frame_colorizer_models : List[FrameColorizerModel] = [ 'ddcolor', 'ddcolor_artistic', 'deoldify' ] frame_colorizer_models : List[FrameColorizerModel] = [ 'ddcolor', 'ddcolor_artistic' ]
frame_enhancer_models : List[FrameEnhancerModel] = [ 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_hatgan_x4', 'span_kendata_x4' ] frame_enhancer_models : List[FrameEnhancerModel] = [ 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_hatgan_x4', 'span_kendata_x4' ]
lip_syncer_models : List[LipSyncerModel] = [ 'wav2lip_gan' ] lip_syncer_models : List[LipSyncerModel] = [ 'wav2lip_gan' ]

View File

@ -40,13 +40,6 @@ MODELS : ModelSet =\
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/ddcolor_artistic.onnx', 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/ddcolor_artistic.onnx',
'path': resolve_relative_path('../.assets/models/ddcolor_artistic.onnx'), 'path': resolve_relative_path('../.assets/models/ddcolor_artistic.onnx'),
'size': (512, 512) 'size': (512, 512)
},
'deoldify':
{
'type': 'deoldify',
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/deoldify.onnx',
'path': resolve_relative_path('../.assets/models/deoldify.onnx'),
'size': (256, 256)
} }
} }
OPTIONS : Optional[OptionsWithModel] = None OPTIONS : Optional[OptionsWithModel] = None
@ -155,14 +148,13 @@ def colorize_frame(temp_vision_frame : VisionFrame) -> VisionFrame:
def prepare_temp_frame(temp_vision_frame : VisionFrame) -> VisionFrame: def prepare_temp_frame(temp_vision_frame : VisionFrame) -> VisionFrame:
model_size = get_options('model').get('size') model_size = get_options('model').get('size')
model_type = get_options('model').get('type')
temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_BGR2GRAY) temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_BGR2GRAY)
temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_GRAY2RGB) temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_GRAY2RGB)
if model_type == 'ddcolor': temp_vision_frame = (temp_vision_frame / 255.0).astype(numpy.float32)
temp_vision_frame = (temp_vision_frame / 255.0).astype(numpy.float32) temp_vision_frame = cv2.resize(temp_vision_frame, model_size)
temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_RGB2LAB)[:, :, :1] temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_BGR2Lab)[:, :, :1]
temp_vision_frame = numpy.dstack((temp_vision_frame, numpy.zeros_like(temp_vision_frame), numpy.zeros_like(temp_vision_frame))) temp_vision_frame = numpy.concatenate((temp_vision_frame, numpy.zeros_like(temp_vision_frame), numpy.zeros_like(temp_vision_frame)), axis=-1)
temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_LAB2RGB) temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_LAB2RGB)
temp_vision_frame = cv2.resize(temp_vision_frame, model_size) temp_vision_frame = cv2.resize(temp_vision_frame, model_size)
temp_vision_frame = temp_vision_frame.transpose((2, 0, 1)) temp_vision_frame = temp_vision_frame.transpose((2, 0, 1))
temp_vision_frame = numpy.expand_dims(temp_vision_frame, axis = 0).astype(numpy.float32) temp_vision_frame = numpy.expand_dims(temp_vision_frame, axis = 0).astype(numpy.float32)
@ -170,23 +162,13 @@ def prepare_temp_frame(temp_vision_frame : VisionFrame) -> VisionFrame:
def merge_color_frame(temp_vision_frame : VisionFrame, color_vision_frame : VisionFrame) -> VisionFrame: def merge_color_frame(temp_vision_frame : VisionFrame, color_vision_frame : VisionFrame) -> VisionFrame:
model_type = get_options('model').get('type')
color_vision_frame = color_vision_frame.transpose(1, 2, 0) color_vision_frame = color_vision_frame.transpose(1, 2, 0)
if model_type == 'ddcolor': temp_vision_frame = (temp_vision_frame / 255.0).astype(numpy.float32)
temp_vision_frame = (temp_vision_frame / 255.0).astype(numpy.float32) temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_BGR2Lab)[:, :, :1]
temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_BGR2Lab)[:, :, :1] color_vision_frame = cv2.resize(color_vision_frame, (temp_vision_frame.shape[1], temp_vision_frame.shape[0]))
color_vision_frame = cv2.resize(color_vision_frame, (temp_vision_frame.shape[1], temp_vision_frame.shape[0])) color_vision_frame = numpy.concatenate((temp_vision_frame, color_vision_frame), axis = -1)
color_vision_frame = numpy.dstack((temp_vision_frame, color_vision_frame)) color_vision_frame = cv2.cvtColor(color_vision_frame, cv2.COLOR_LAB2BGR)
color_vision_frame = cv2.cvtColor(color_vision_frame, cv2.COLOR_LAB2BGR) color_vision_frame = (color_vision_frame * 255.0).round().astype(numpy.uint8)
color_vision_frame = (color_vision_frame * 255.0).round().astype(numpy.uint8)
if model_type == 'deoldify':
temp_luminance, _, _ = cv2.split(temp_vision_frame)
color_vision_frame = cv2.cvtColor(color_vision_frame, cv2.COLOR_BGR2RGB).astype(numpy.uint8)
color_vision_frame = cv2.resize(color_vision_frame, (temp_vision_frame.shape[1], temp_vision_frame.shape[0]))
color_vision_frame = cv2.cvtColor(color_vision_frame, cv2.COLOR_BGR2LAB)
_, color_channel_a, color_channel_b = cv2.split(color_vision_frame)
color_vision_frame = cv2.merge((temp_luminance, color_channel_a, color_channel_b))
color_vision_frame = cv2.cvtColor(color_vision_frame, cv2.COLOR_LAB2BGR)
return color_vision_frame return color_vision_frame

View File

@ -5,7 +5,7 @@ from facefusion.typing import Face, FaceSet, AudioFrame, VisionFrame
FaceDebuggerItem = Literal['bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender'] FaceDebuggerItem = Literal['bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender']
FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus'] FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus']
FaceSwapperModel = Literal['blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial', 'uniface_256'] FaceSwapperModel = Literal['blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial', 'uniface_256']
FrameColorizerModel = Literal['ddcolor', 'ddcolor_artistic', 'deoldify'] FrameColorizerModel = Literal['ddcolor', 'ddcolor_artistic']
FrameEnhancerModel = Literal['lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_hatgan_x4', 'span_kendata_x4'] FrameEnhancerModel = Literal['lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_hatgan_x4', 'span_kendata_x4']
LipSyncerModel = Literal['wav2lip_gan'] LipSyncerModel = Literal['wav2lip_gan']