parent
dc1b69ed36
commit
971055da5f
@ -8,7 +8,7 @@ import onnxruntime
|
||||
import facefusion.globals
|
||||
from facefusion import process_manager
|
||||
from facefusion.common_helper import get_first
|
||||
from facefusion.face_helper import warp_face_by_face_landmark_5, warp_face_by_translation, create_static_anchors, distance_to_face_landmark_5, distance_to_bounding_box, convert_face_landmark_68_to_5, apply_nms, categorize_age, categorize_gender
|
||||
from facefusion.face_helper import warp_face_by_face_landmark_5, warp_face_by_translation, create_static_anchors, distance_to_face_landmark_5, distance_to_bounding_box, convert_face_landmark_68_to_5, convert_face_landmark_5_to_68, apply_nms, categorize_age, categorize_gender
|
||||
from facefusion.face_store import get_static_faces, set_static_faces
|
||||
from facefusion.execution import apply_execution_provider_options
|
||||
from facefusion.download import conditional_download
|
||||
@ -327,6 +327,8 @@ def create_faces(vision_frame : VisionFrame, bounding_box_list : List[BoundingBo
|
||||
face_landmark_68, face_landmark_68_score = detect_face_landmark_68(vision_frame, bounding_box)
|
||||
if face_landmark_68_score > facefusion.globals.face_landmarker_score:
|
||||
face_landmark_5_68 = convert_face_landmark_68_to_5(face_landmark_68)
|
||||
else:
|
||||
face_landmark_68 = convert_face_landmark_5_to_68(face_landmark_5_68)
|
||||
landmarks : FaceLandmarkSet =\
|
||||
{
|
||||
'5': face_landmark_5_list[index],
|
||||
|
@ -1,10 +1,30 @@
|
||||
import threading
|
||||
from time import sleep
|
||||
|
||||
from typing import Any, Tuple, List
|
||||
from cv2.typing import Size
|
||||
from functools import lru_cache
|
||||
import cv2
|
||||
import numpy
|
||||
import onnxruntime
|
||||
|
||||
import facefusion.globals
|
||||
from facefusion import process_manager
|
||||
from facefusion.typing import BoundingBox, FaceLandmark5, FaceLandmark68, VisionFrame, Mask, Matrix, Translation, WarpTemplate, WarpTemplateSet, FaceAnalyserAge, FaceAnalyserGender, ModelSet
|
||||
from facefusion.filesystem import resolve_relative_path
|
||||
from facefusion.download import conditional_download
|
||||
|
||||
FACE_LANDMARK_CONVERTER = None
|
||||
THREAD_LOCK : threading.Lock = threading.Lock()
|
||||
MODELS : ModelSet =\
|
||||
{
|
||||
'face_landmark_converter':
|
||||
{
|
||||
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/5_to_68_landmark_converter.onnx',
|
||||
'path': resolve_relative_path('../.assets/models/5_to_68_landmark_converter.onnx')
|
||||
}
|
||||
}
|
||||
|
||||
from facefusion.typing import BoundingBox, FaceLandmark5, FaceLandmark68, VisionFrame, Mask, Matrix, Translation, WarpTemplate, WarpTemplateSet, FaceAnalyserAge, FaceAnalyserGender
|
||||
|
||||
WARP_TEMPLATES : WarpTemplateSet =\
|
||||
{
|
||||
@ -43,6 +63,34 @@ WARP_TEMPLATES : WarpTemplateSet =\
|
||||
}
|
||||
|
||||
|
||||
def get_face_landmark_converter() -> Any:
|
||||
global FACE_LANDMARK_CONVERTER
|
||||
|
||||
with THREAD_LOCK:
|
||||
while process_manager.is_checking():
|
||||
sleep(0.5)
|
||||
if FACE_LANDMARK_CONVERTER is None:
|
||||
model_path = MODELS.get('face_landmark_converter').get('path')
|
||||
FACE_LANDMARK_CONVERTER = onnxruntime.InferenceSession(model_path, providers = ['CPUExecutionProvider'])
|
||||
return FACE_LANDMARK_CONVERTER
|
||||
|
||||
|
||||
def clear_face_landmark_converter() -> None:
|
||||
global FACE_LANDMARK_CONVERTER
|
||||
|
||||
FACE_LANDMARK_CONVERTER = None
|
||||
|
||||
|
||||
def pre_check() -> bool:
|
||||
if not facefusion.globals.skip_download:
|
||||
download_directory_path = resolve_relative_path('../.assets/models')
|
||||
model_url = MODELS.get('face_landmark_converter').get('url')
|
||||
process_manager.check()
|
||||
conditional_download(download_directory_path, [ model_url ])
|
||||
process_manager.end()
|
||||
return True
|
||||
|
||||
|
||||
def warp_face_by_face_landmark_5(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandmark5, warp_template : WarpTemplate, crop_size : Size) -> Tuple[VisionFrame, Matrix]:
|
||||
normed_warp_template = WARP_TEMPLATES.get(warp_template) * crop_size
|
||||
affine_matrix = cv2.estimateAffinePartial2D(face_landmark_5, normed_warp_template, method = cv2.RANSAC, ransacReprojThreshold = 100)[0]
|
||||
@ -124,6 +172,18 @@ def convert_face_landmark_68_to_5(landmark_68 : FaceLandmark68) -> FaceLandmark5
|
||||
return face_landmark_5
|
||||
|
||||
|
||||
def convert_face_landmark_5_to_68(face_landmark_5 : FaceLandmark5) -> FaceLandmark68:
|
||||
face_landmarker_converter = get_face_landmark_converter()
|
||||
normed_warp_template = WARP_TEMPLATES.get('ffhq_512') * 512
|
||||
affine_matrix = cv2.estimateAffinePartial2D(face_landmark_5, normed_warp_template, method = cv2.RANSAC, ransacReprojThreshold = 100)[0]
|
||||
face_landmark_5 = cv2.transform(face_landmark_5.reshape(1, -1, 2), affine_matrix).reshape(-1, 2)
|
||||
face_landmark_5 = face_landmark_5 / 512
|
||||
face_landmark_68 = face_landmarker_converter.run(None, {"input": [face_landmark_5]})[0][0]
|
||||
face_landmark_68 = (face_landmark_68 * 512).reshape(68, 2)
|
||||
face_landmark_68 = cv2.transform(face_landmark_68.reshape(1, -1, 2), cv2.invertAffineTransform(affine_matrix)).reshape(-1, 2)
|
||||
return face_landmark_68
|
||||
|
||||
|
||||
def apply_nms(bounding_box_list : List[BoundingBox], iou_threshold : float) -> List[int]:
|
||||
keep_indices = []
|
||||
dimension_list = numpy.reshape(bounding_box_list, (-1, 4))
|
||||
|
@ -138,14 +138,11 @@ def sync_lip(target_face : Face, temp_audio_frame : AudioFrame, temp_vision_fram
|
||||
crop_mask_list = []
|
||||
temp_audio_frame = prepare_audio_frame(temp_audio_frame)
|
||||
crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmarks.get('5/68'), 'ffhq_512', (512, 512))
|
||||
if numpy.any(target_face.landmarks.get('68')):
|
||||
face_landmark_68 = cv2.transform(target_face.landmarks.get('68').reshape(1, -1, 2), affine_matrix).reshape(-1, 2)
|
||||
bounding_box = create_bounding_box_from_face_landmark_68(face_landmark_68)
|
||||
bounding_box[1] -= numpy.abs(bounding_box[3] - bounding_box[1]) * 0.125
|
||||
mouth_mask = create_mouth_mask(face_landmark_68)
|
||||
crop_mask_list.append(mouth_mask)
|
||||
else:
|
||||
bounding_box = target_face.bounding_box
|
||||
face_landmark_68 = cv2.transform(target_face.landmarks.get('68').reshape(1, -1, 2), affine_matrix).reshape(-1, 2)
|
||||
bounding_box = create_bounding_box_from_face_landmark_68(face_landmark_68)
|
||||
bounding_box[1] -= numpy.abs(bounding_box[3] - bounding_box[1]) * 0.125
|
||||
mouth_mask = create_mouth_mask(face_landmark_68)
|
||||
crop_mask_list.append(mouth_mask)
|
||||
box_mask = create_static_box_mask(crop_vision_frame.shape[:2][::-1], facefusion.globals.face_mask_blur, facefusion.globals.face_mask_padding)
|
||||
crop_mask_list.append(box_mask)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user