Fix/age modifier styleganex 512 (#798)

* fix

* styleganex template

* changes

* changes

* fix occlusion mask

* add age modifier scale

* change

* change

* hardcode

* Cleanup

* Use model_sizes and model_templates variables

* No need for prepare when just 2 lines of code

* Someone used spaces over tabs

* Revert back [0][0]

---------

Co-authored-by: harisreedhar <h4harisreedhar.s.s@gmail.com>
This commit is contained in:
Henry Ruhs 2024-10-18 10:40:33 +02:00 committed by henryruhs
parent 20d2b6a4ea
commit cd85a454f2
3 changed files with 33 additions and 15 deletions

View File

@ -48,6 +48,14 @@ WARP_TEMPLATES : WarpTemplateSet =\
[ 0.50019127, 0.61942959 ],
[ 0.39032951, 0.77598822 ],
[ 0.61178945, 0.77476328 ]
]),
'styleganex_512': numpy.array(
[
[ 0.43907768, 0.54098284 ],
[ 0.56204778, 0.54122359 ],
[ 0.50123859, 0.61331904 ],
[ 0.44716341, 0.66936502 ],
[ 0.55637032, 0.66911184 ]
])
}

View File

@ -43,10 +43,19 @@ MODEL_SET : ModelSet =\
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models-3.0.0/styleganex_age.onnx',
'path': resolve_relative_path('../.assets/models/styleganex_age.onnx')
}
},
'template': 'ffhq_512',
'size': (512, 512)
'templates':
{
'target': 'ffhq_512',
'target_with_background': 'styleganex_512'
},
'sizes':
{
'target': (256, 256),
'target_with_background': (512, 512)
}
}
}
@ -115,15 +124,14 @@ def post_process() -> None:
def modify_age(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame:
model_template = get_model_options().get('template')
model_size = get_model_options().get('size')
crop_size = (model_size[0] // 2, model_size[1] // 2)
model_templates = get_model_options().get('templates')
model_sizes = get_model_options().get('sizes')
face_landmark_5 = target_face.landmark_set.get('5/68').copy()
extend_face_landmark_5 = scale_face_landmark_5(face_landmark_5, 2.0)
crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, face_landmark_5, model_template, crop_size)
extend_vision_frame, extend_affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, extend_face_landmark_5, model_template, model_size)
crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, face_landmark_5, model_templates.get('target'), model_sizes.get('target'))
extend_face_landmark_5 = scale_face_landmark_5(face_landmark_5, 0.875)
extend_vision_frame, extend_affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, extend_face_landmark_5, model_templates.get('target_with_background'), model_sizes.get('target_with_background'))
extend_vision_frame_raw = extend_vision_frame.copy()
box_mask = create_static_box_mask(model_size, state_manager.get_item('face_mask_blur'), (0, 0, 0, 0))
box_mask = create_static_box_mask(model_sizes.get('target_with_background'), state_manager.get_item('face_mask_blur'), (0, 0, 0, 0))
crop_masks =\
[
box_mask
@ -132,7 +140,7 @@ def modify_age(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFra
if 'occlusion' in state_manager.get_item('face_mask_types'):
occlusion_mask = create_occlusion_mask(crop_vision_frame)
combined_matrix = merge_matrix([ extend_affine_matrix, cv2.invertAffineTransform(affine_matrix) ])
occlusion_mask = cv2.warpAffine(occlusion_mask, combined_matrix, model_size)
occlusion_mask = cv2.warpAffine(occlusion_mask, combined_matrix, model_sizes.get('target_with_background'))
crop_masks.append(occlusion_mask)
crop_vision_frame = prepare_vision_frame(crop_vision_frame)
@ -140,9 +148,10 @@ def modify_age(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFra
extend_vision_frame = forward(crop_vision_frame, extend_vision_frame)
extend_vision_frame = normalize_extend_frame(extend_vision_frame)
extend_vision_frame = fix_color(extend_vision_frame_raw, extend_vision_frame)
extend_crop_mask = cv2.pyrUp(numpy.minimum.reduce(crop_masks).clip(0, 1))
extend_affine_matrix *= extend_vision_frame.shape[0] / 512
paste_vision_frame = paste_back(temp_vision_frame, extend_vision_frame, extend_crop_mask, extend_affine_matrix)
extend_affine_matrix *= (model_sizes.get('target')[0] * 4) / model_sizes.get('target_with_background')[0]
crop_mask = numpy.minimum.reduce(crop_masks).clip(0, 1)
crop_mask = cv2.resize(crop_mask, (model_sizes.get('target')[0] * 4, model_sizes.get('target')[1] * 4))
paste_vision_frame = paste_back(temp_vision_frame, extend_vision_frame, crop_mask, extend_affine_matrix)
return paste_vision_frame
@ -204,12 +213,13 @@ def prepare_vision_frame(vision_frame : VisionFrame) -> VisionFrame:
def normalize_extend_frame(extend_vision_frame : VisionFrame) -> VisionFrame:
model_sizes = get_model_options().get('sizes')
extend_vision_frame = numpy.clip(extend_vision_frame, -1, 1)
extend_vision_frame = (extend_vision_frame + 1) / 2
extend_vision_frame = extend_vision_frame.transpose(1, 2, 0).clip(0, 255)
extend_vision_frame = (extend_vision_frame * 255.0)
extend_vision_frame = extend_vision_frame.astype(numpy.uint8)[:, :, ::-1]
extend_vision_frame = cv2.pyrDown(extend_vision_frame)
extend_vision_frame = cv2.resize(extend_vision_frame, (model_sizes.get('target')[0] * 4, model_sizes.get('target')[1] * 4), interpolation = cv2.INTER_AREA)
return extend_vision_frame

View File

@ -85,7 +85,7 @@ ProcessStep = Callable[[str, int, Args], bool]
Content = Dict[str, Any]
WarpTemplate = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128_v2', 'ffhq_512', 'mtcnn_512']
WarpTemplate = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128_v2', 'ffhq_512', 'mtcnn_512', 'styleganex_512']
WarpTemplateSet = Dict[WarpTemplate, NDArray[Any]]
ProcessMode = Literal['output', 'preview', 'stream']