new alignment
This commit is contained in:
parent
8385e199f4
commit
18244da99f
@ -5,7 +5,7 @@ import cv2
|
||||
import numpy
|
||||
from cv2.typing import Size
|
||||
|
||||
from facefusion.typing import Anchors, Angle, BoundingBox, Distance, FaceDetectorModel, FaceLandmark5, FaceLandmark68, Mask, Matrix, Points, Scale, Score, Translation, VisionFrame, WarpTemplate, WarpTemplateSet
|
||||
from facefusion.typing import Anchors, Angle, BoundingBox, Direction, Distance, FaceDetectorModel, FaceLandmark5, FaceLandmark68, Mask, Matrix, Points, Scale, Score, Translation, VisionFrame, WarpTemplate, WarpTemplateSet
|
||||
|
||||
WARP_TEMPLATES : WarpTemplateSet =\
|
||||
{
|
||||
@ -56,6 +56,14 @@ WARP_TEMPLATES : WarpTemplateSet =\
|
||||
[ 0.50123859, 0.61331904 ],
|
||||
[ 0.43364461, 0.68337652 ],
|
||||
[ 0.57015325, 0.68306005 ]
|
||||
]),
|
||||
'deep_face_live': numpy.array(
|
||||
[
|
||||
[ 0.22549182, 0.21599032 ],
|
||||
[ 0.75476142, 0.21599032 ],
|
||||
[ 0.49012712, 0.51562511 ],
|
||||
[ 0.25414925, 0.78023333 ],
|
||||
[ 0.72610437, 0.78023333 ]
|
||||
])
|
||||
}
|
||||
|
||||
@ -72,6 +80,29 @@ def warp_face_by_face_landmark_5(temp_vision_frame : VisionFrame, face_landmark_
|
||||
return crop_vision_frame, affine_matrix
|
||||
|
||||
|
||||
def warp_face_for_deepfacelive(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandmark5, crop_size : Size, coverage : float, x_shift : float, y_shift : float) -> Tuple[VisionFrame, Matrix]:
|
||||
affine_matrix = estimate_matrix_by_face_landmark_5(face_landmark_5, 'deep_face_live', (1, 1))
|
||||
square_points = numpy.array([ (0, 0), (1, 0), (1, 1), (0, 1) ]).astype(numpy.float32)
|
||||
square_points = transform_points(square_points, cv2.invertAffineTransform(affine_matrix))
|
||||
center_point = square_points.mean(axis = 0)
|
||||
center_point += x_shift * numpy.subtract(square_points[1], square_points[0])
|
||||
center_point += y_shift * numpy.subtract(square_points[3], square_points[0])
|
||||
scale = numpy.linalg.norm(center_point - square_points[0]) * coverage
|
||||
top_bottom_direction = calc_points_direction(square_points[0], square_points[2]) * scale
|
||||
bottom_top_direction = calc_points_direction(square_points[3], square_points[1]) * scale
|
||||
source_points = numpy.array([ center_point - top_bottom_direction, center_point + bottom_top_direction, center_point + top_bottom_direction ]).astype(numpy.float32)
|
||||
target_points = numpy.array([ (0, 0), (1, 0), (1, 1) ]).astype(numpy.float32) * crop_size[0]
|
||||
affine_matrix = cv2.getAffineTransform(source_points, target_points)
|
||||
crop_vision_frame = cv2.warpAffine(temp_vision_frame, affine_matrix, crop_size, flags = cv2.INTER_CUBIC)
|
||||
return crop_vision_frame, affine_matrix
|
||||
|
||||
|
||||
def calc_points_direction(start_point : Points, end_point : Points) -> Direction:
|
||||
direction = end_point - start_point
|
||||
direction /= numpy.linalg.norm(direction)
|
||||
return direction
|
||||
|
||||
|
||||
def warp_face_by_bounding_box(temp_vision_frame : VisionFrame, bounding_box : BoundingBox, crop_size : Size) -> Tuple[VisionFrame, Matrix]:
|
||||
source_points = numpy.array([ [ bounding_box[0], bounding_box[1] ], [bounding_box[2], bounding_box[1] ], [ bounding_box[0], bounding_box[3] ] ]).astype(numpy.float32)
|
||||
target_points = numpy.array([ [ 0, 0 ], [ crop_size[0], 0 ], [ 0, crop_size[1] ] ]).astype(numpy.float32)
|
||||
|
@ -10,7 +10,7 @@ import facefusion.processors.core as processors
|
||||
from facefusion import config, content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, inference_manager, logger, process_manager, state_manager, wording
|
||||
from facefusion.download import conditional_download_hashes, conditional_download_sources
|
||||
from facefusion.face_analyser import get_many_faces, get_one_face
|
||||
from facefusion.face_helper import paste_back, warp_face_by_face_landmark_5
|
||||
from facefusion.face_helper import paste_back, warp_face_for_deepfacelive
|
||||
from facefusion.face_masker import create_occlusion_mask, create_static_box_mask
|
||||
from facefusion.face_selector import find_similar_faces, sort_and_filter_faces
|
||||
from facefusion.face_store import get_reference_faces
|
||||
@ -42,7 +42,9 @@ MODEL_SET : ModelSet =\
|
||||
'path': resolve_relative_path('../.assets/models/Jackie_Chan.dfm')
|
||||
}
|
||||
},
|
||||
'template': 'arcface_128_v2',
|
||||
'x_shift': 0.0,
|
||||
'y_shift': 0.0,
|
||||
'coverage': 2.2,
|
||||
'size': (224, 224)
|
||||
}
|
||||
}
|
||||
@ -110,9 +112,11 @@ def post_process() -> None:
|
||||
|
||||
|
||||
def swap_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
|
||||
model_template = get_model_options().get('template')
|
||||
model_size = get_model_options().get('size')
|
||||
crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmark_set.get('5/68'), model_template, model_size)
|
||||
model_coverage = get_model_options().get('coverage')
|
||||
model_x_shift = get_model_options().get('x_shift')
|
||||
model_y_shift = get_model_options().get('y_shift')
|
||||
crop_vision_frame, affine_matrix = warp_face_for_deepfacelive(temp_vision_frame, target_face.landmark_set.get('5/68'), model_size, model_coverage, model_x_shift, model_y_shift)
|
||||
crop_vision_frame_raw = crop_vision_frame.copy()
|
||||
box_mask = create_static_box_mask(crop_vision_frame.shape[:2][::-1], state_manager.get_item('face_mask_blur'), state_manager.get_item('face_mask_padding'))
|
||||
crop_masks =\
|
||||
@ -166,10 +170,10 @@ def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame:
|
||||
|
||||
def prepare_crop_mask(crop_source_mask : Mask, crop_target_mask : Mask) -> Mask:
|
||||
model_size = get_model_options().get('size')
|
||||
crop_mask = numpy.maximum.reduce([ crop_source_mask, crop_target_mask ])
|
||||
crop_mask = numpy.minimum.reduce([ crop_source_mask, crop_target_mask ])
|
||||
crop_mask = crop_mask.reshape(model_size).clip(0, 1)
|
||||
crop_mask = cv2.erode(crop_mask, numpy.ones((5, 5), numpy.uint8), iterations = 1)
|
||||
crop_mask = cv2.GaussianBlur(crop_mask, (9, 9), 0)
|
||||
crop_mask = cv2.erode(crop_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)), iterations = 2)
|
||||
crop_mask = cv2.GaussianBlur(crop_mask, (0, 0), 6.25)
|
||||
return crop_mask
|
||||
|
||||
|
||||
|
@ -53,6 +53,7 @@ FaceStore = TypedDict('FaceStore',
|
||||
VisionFrame = NDArray[Any]
|
||||
Mask = NDArray[Any]
|
||||
Points = NDArray[Any]
|
||||
Direction = NDArray[Any]
|
||||
Distance = NDArray[Any]
|
||||
Matrix = NDArray[Any]
|
||||
Anchors = NDArray[Any]
|
||||
@ -85,7 +86,7 @@ ProcessStep = Callable[[str, int, Args], bool]
|
||||
|
||||
Content = Dict[str, Any]
|
||||
|
||||
WarpTemplate = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128_v2', 'ffhq_512', 'mtcnn_512', 'styleganex_384']
|
||||
WarpTemplate = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128_v2', 'ffhq_512', 'mtcnn_512', 'styleganex_384', 'deep_face_live']
|
||||
WarpTemplateSet = Dict[WarpTemplate, NDArray[Any]]
|
||||
ProcessMode = Literal['output', 'preview', 'stream']
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user