diff --git a/facefusion/face_helper.py b/facefusion/face_helper.py new file mode 100644 index 00000000..96e4e88d --- /dev/null +++ b/facefusion/face_helper.py @@ -0,0 +1,52 @@ +from typing import Tuple, Dict + +import cv2 +import numpy + +from facefusion.typing import Face, Frame, Matrix, Template + +TEMPLATES : Dict[str, numpy.array] =\ +{ + 'arface': numpy.array( + [ + [ 38.2946, 51.6963 ], + [ 73.5318, 51.5014 ], + [ 56.0252, 71.7366 ], + [ 41.5493, 92.3655 ], + [ 70.7299, 92.2041 ] + ]), + 'ffhq': numpy.array( + [ + [ 192.98138, 239.94708 ], + [ 318.90277, 240.1936 ], + [ 256.63416, 314.01935 ], + [ 201.26117, 371.41043 ], + [ 313.08905, 371.15118 ] + ]) +} + + +def warp_face(target_face : Face, temp_frame : Frame, template : Template) -> Tuple[Frame, Matrix]: + affine_matrix = cv2.estimateAffinePartial2D(target_face.kps, TEMPLATES[template], method = cv2.LMEDS)[0] + crop_frame = cv2.warpAffine(temp_frame, affine_matrix, (512, 512)) + return crop_frame, affine_matrix + + +def paste_back(temp_frame : Frame, crop_frame : Frame, affine_matrix : Matrix) -> Frame: + inverse_affine_matrix = cv2.invertAffineTransform(affine_matrix) + temp_frame_height, temp_frame_width = temp_frame.shape[0:2] + crop_frame_height, crop_frame_width = crop_frame.shape[0:2] + inverse_crop_frame = cv2.warpAffine(crop_frame, inverse_affine_matrix, (temp_frame_width, temp_frame_height)) + inverse_mask = numpy.ones((crop_frame_height, crop_frame_width, 3), dtype = numpy.float32) + inverse_mask_frame = cv2.warpAffine(inverse_mask, inverse_affine_matrix, (temp_frame_width, temp_frame_height)) + inverse_mask_frame = cv2.erode(inverse_mask_frame, numpy.ones((2, 2))) + inverse_mask_border = inverse_mask_frame * inverse_crop_frame + inverse_mask_area = numpy.sum(inverse_mask_frame) // 3 + inverse_mask_edge = int(inverse_mask_area ** 0.5) // 20 + inverse_mask_radius = inverse_mask_edge * 2 + inverse_mask_center = cv2.erode(inverse_mask_frame, numpy.ones((inverse_mask_radius, inverse_mask_radius))) + inverse_mask_blur_size = inverse_mask_edge * 2 + 1 + inverse_mask_blur_area = cv2.GaussianBlur(inverse_mask_center, (inverse_mask_blur_size, inverse_mask_blur_size), 0) + temp_frame = inverse_mask_blur_area * inverse_mask_border + (1 - inverse_mask_blur_area) * temp_frame + temp_frame = temp_frame.clip(0, 255).astype(numpy.uint8) + return temp_frame diff --git a/facefusion/installer.py b/facefusion/installer.py index 37f788a0..9c28d2ef 100644 --- a/facefusion/installer.py +++ b/facefusion/installer.py @@ -46,16 +46,8 @@ def run(program : ArgumentParser) -> None: else: answers = inquirer.prompt( [ - inquirer.List( - 'torch', - message = wording.get('install_dependency_help').format(dependency = 'torch'), - choices = list(TORCH.keys()) - ), - inquirer.List( - 'onnxruntime', - message = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), - choices = list(ONNXRUNTIMES.keys()) - ) + inquirer.List('torch', message = wording.get('install_dependency_help').format(dependency = 'torch'), choices = list(TORCH.keys())), + inquirer.List('onnxruntime', message = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), choices = list(ONNXRUNTIMES.keys())) ]) if answers is not None: torch = answers['torch'] diff --git a/facefusion/processors/frame/modules/face_enhancer.py b/facefusion/processors/frame/modules/face_enhancer.py index d8b6b589..fba27300 100644 --- a/facefusion/processors/frame/modules/face_enhancer.py +++ b/facefusion/processors/frame/modules/face_enhancer.py @@ -1,4 +1,4 @@ -from typing import Any, List, Tuple, Dict, Literal, Optional +from typing import Any, List, Dict, Literal, Optional from argparse import ArgumentParser import cv2 import threading @@ -9,7 +9,8 @@ import facefusion.globals from facefusion import wording from facefusion.core import update_status from facefusion.face_analyser import get_many_faces, clear_face_analyser -from facefusion.typing import Face, Frame, Matrix, Update_Process, ProcessMode, ModelValue, OptionsWithModel +from facefusion.face_helper import warp_face, paste_back +from facefusion.typing import Face, Frame, Update_Process, ProcessMode, ModelValue, OptionsWithModel from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame import globals as frame_processors_globals @@ -128,7 +129,7 @@ def post_process() -> None: def enhance_face(target_face: Face, temp_frame: Frame) -> Frame: frame_processor = get_frame_processor() - crop_frame, affine_matrix = warp_face(target_face, temp_frame) + crop_frame, affine_matrix = warp_face(target_face, temp_frame, 'ffhq') crop_frame = prepare_crop_frame(crop_frame) frame_processor_inputs = {} for frame_processor_input in frame_processor.get_inputs(): @@ -144,20 +145,6 @@ def enhance_face(target_face: Face, temp_frame: Frame) -> Frame: return temp_frame -def warp_face(target_face : Face, temp_frame : Frame) -> Tuple[Frame, Matrix]: - template = numpy.array( - [ - [ 192.98138, 239.94708 ], - [ 318.90277, 240.1936 ], - [ 256.63416, 314.01935 ], - [ 201.26117, 371.41043 ], - [ 313.08905, 371.15118 ] - ]) - affine_matrix = cv2.estimateAffinePartial2D(target_face['kps'], template, method = cv2.LMEDS)[0] - crop_frame = cv2.warpAffine(temp_frame, affine_matrix, (512, 512)) - return crop_frame, affine_matrix - - def prepare_crop_frame(crop_frame : Frame) -> Frame: crop_frame = crop_frame[:, :, ::-1] / 255.0 crop_frame = (crop_frame - 0.5) / 0.5 @@ -174,26 +161,6 @@ def normalize_crop_frame(crop_frame : Frame) -> Frame: return crop_frame -def paste_back(temp_frame : Frame, crop_frame : Frame, affine_matrix : Matrix) -> Frame: - inverse_affine_matrix = cv2.invertAffineTransform(affine_matrix) - temp_frame_height, temp_frame_width = temp_frame.shape[0:2] - crop_frame_height, crop_frame_width = crop_frame.shape[0:2] - inverse_crop_frame = cv2.warpAffine(crop_frame, inverse_affine_matrix, (temp_frame_width, temp_frame_height)) - inverse_mask = numpy.ones((crop_frame_height, crop_frame_width, 3), dtype = numpy.float32) - inverse_mask_frame = cv2.warpAffine(inverse_mask, inverse_affine_matrix, (temp_frame_width, temp_frame_height)) - inverse_mask_frame = cv2.erode(inverse_mask_frame, numpy.ones((2, 2))) - inverse_mask_border = inverse_mask_frame * inverse_crop_frame - inverse_mask_area = numpy.sum(inverse_mask_frame) // 3 - inverse_mask_edge = int(inverse_mask_area ** 0.5) // 20 - inverse_mask_radius = inverse_mask_edge * 2 - inverse_mask_center = cv2.erode(inverse_mask_frame, numpy.ones((inverse_mask_radius, inverse_mask_radius))) - inverse_mask_blur_size = inverse_mask_edge * 2 + 1 - inverse_mask_blur_area = cv2.GaussianBlur(inverse_mask_center, (inverse_mask_blur_size, inverse_mask_blur_size), 0) - temp_frame = inverse_mask_blur_area * inverse_mask_border + (1 - inverse_mask_blur_area) * temp_frame - temp_frame = temp_frame.clip(0, 255).astype(numpy.uint8) - return temp_frame - - def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame: face_enhancer_blend = 1 - (frame_processors_globals.face_enhancer_blend / 100) temp_frame = cv2.addWeighted(temp_frame, face_enhancer_blend, paste_frame, 1 - face_enhancer_blend, 0) diff --git a/facefusion/typing.py b/facefusion/typing.py index bfbdb849..d86fb1a0 100644 --- a/facefusion/typing.py +++ b/facefusion/typing.py @@ -9,6 +9,7 @@ Matrix = numpy.ndarray[Any, Any] Update_Process = Callable[[], None] Process_Frames = Callable[[str, List[str], Update_Process], None] +Template = Literal[ 'arcface', 'ffhq' ] ProcessMode = Literal[ 'output', 'preview', 'stream' ] FaceRecognition = Literal[ 'reference', 'many' ] FaceAnalyserDirection = Literal[ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small' ]