facefusion/facefusion/uis/components/trim_frame.py
Henry Ruhs 4ccf4c24c7
Next (#477)
* Add real_hatgan_x4 model

* Mark it as NEXT

* Force download to be executed and exit

* Fix frame per second interpolation

* 5 to 68 landmark (#456)

* changes

* changes

* Adjust model url

* Cleanup 5 to 68 landmark convertion

* Move everything to face analyser

* Introduce matrix only face helper

* Revert facefusion.ini

* Adjust limit due false positive analysis

* changes (#457)

* Use pixel format yuv422p to merge video

* Fix some code

* Minor cleanup

* Add gpen_bfr_1024 and gpen_bfr_2048

* Revert it back to yuv420p due compatibility issues

* Add debug back to ffmpeg

* Add debug back to ffmpeg

* Migrate to conda (#461)

* Migrate from venv to conda

* Migrate from venv to conda

* Message when conda is not activated

* Use release for every slider (#463)

* Use release event handler for every slider

* Move more sliders to release handler

* Move more sliders to release handler

* Add get_ui_components() to simplify code

* Revert some changes on frame slider

* Add the first iteration of a frame colorizer

* Support for the DDColor model

* Improve model file handling

* Improve model file handling part2

* Remove deoldify

* Remove deoldify

* Voice separator (#468)

* changes

* changes

* changes

* changes

* changes

* changes

* Rename audio extractor to voice extractor

* Cosmetic changes

* Cosmetic changes

* Fix fps lowering and boosting

* Fix fps lowering and boosting

* Fix fps lowering and boosting

* Some refactoring for audio.py and some astype() here and there (#470)

* Some refactoring for audio.py and some astype() here and there

* Fix lint

* Spacing

* Add mp3 to benchmark suite for lip syncer testing

* Improve naming

* Adjust chunk size

* Use higher quality

* Revert "Use higher quality"

This reverts commit d32f287572.

* Improve naming in ffmpeg.py

* Simplify code

* Better fps calculation

* Fix naming here and there

* Add back real esrgan x2

* Remove trailing comma

* Update wording and README

* Use semaphore to prevent frame colorizer memory issues

* Revert "Remove deoldify"

This reverts commit bd8034cbc7.

* Remove unused type from frame colorizer

* Adjust naming

* Add missing clear of model initializer

* Change nvenc preset mappping to support old FFMPEG 4

* Update onnxruntime to 1.17.1

* Fix lint

* Prepare 2.5.0

* Fix Gradio overrides

* Add Deoldify Artistic back

* Feat/audio refactoring (#476)

* Improve audio naming and variables

* Improve audio naming and variables

* Refactor voice extractor like crazy

* Refactor voice extractor like crazy

* Remove spaces

* Update the usage

---------

Co-authored-by: Harisreedhar <46858047+harisreedhar@users.noreply.github.com>
2024-04-09 15:40:55 +02:00

74 lines
3.0 KiB
Python

from typing import Any, Dict, Tuple, Optional
import gradio
import facefusion.globals
from facefusion import wording
from facefusion.vision import count_video_frame_total
from facefusion.filesystem import is_video
from facefusion.uis.core import get_ui_component, register_ui_component
TRIM_FRAME_START_SLIDER : Optional[gradio.Slider] = None
TRIM_FRAME_END_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global TRIM_FRAME_START_SLIDER
global TRIM_FRAME_END_SLIDER
trim_frame_start_slider_args : Dict[str, Any] =\
{
'label': wording.get('uis.trim_frame_start_slider'),
'step': 1,
'minimum': 0,
'maximum': 100,
'visible': False
}
trim_frame_end_slider_args : Dict[str, Any] =\
{
'label': wording.get('uis.trim_frame_end_slider'),
'step': 1,
'minimum': 0,
'maximum': 100,
'visible': False
}
if is_video(facefusion.globals.target_path):
video_frame_total = count_video_frame_total(facefusion.globals.target_path)
trim_frame_start_slider_args['value'] = facefusion.globals.trim_frame_start or 0
trim_frame_start_slider_args['maximum'] = video_frame_total
trim_frame_start_slider_args['visible'] = True
trim_frame_end_slider_args['value'] = facefusion.globals.trim_frame_end or video_frame_total
trim_frame_end_slider_args['maximum'] = video_frame_total
trim_frame_end_slider_args['visible'] = True
with gradio.Row():
TRIM_FRAME_START_SLIDER = gradio.Slider(**trim_frame_start_slider_args)
TRIM_FRAME_END_SLIDER = gradio.Slider(**trim_frame_end_slider_args)
register_ui_component('trim_frame_start_slider', TRIM_FRAME_START_SLIDER)
register_ui_component('trim_frame_end_slider', TRIM_FRAME_END_SLIDER)
def listen() -> None:
TRIM_FRAME_START_SLIDER.release(update_trim_frame_start, inputs = TRIM_FRAME_START_SLIDER)
TRIM_FRAME_END_SLIDER.release(update_trim_frame_end, inputs = TRIM_FRAME_END_SLIDER)
target_video = get_ui_component('target_video')
if target_video:
for method in [ 'upload', 'change', 'clear' ]:
getattr(target_video, method)(remote_update, outputs = [ TRIM_FRAME_START_SLIDER, TRIM_FRAME_END_SLIDER ])
def remote_update() -> Tuple[gradio.Slider, gradio.Slider]:
if is_video(facefusion.globals.target_path):
video_frame_total = count_video_frame_total(facefusion.globals.target_path)
facefusion.globals.trim_frame_start = None
facefusion.globals.trim_frame_end = None
return gradio.Slider(value = 0, maximum = video_frame_total, visible = True), gradio.Slider(value = video_frame_total, maximum = video_frame_total, visible = True)
return gradio.Slider(value = None, maximum = None, visible = False), gradio.Slider(value = None, maximum = None, visible = False)
def update_trim_frame_start(trim_frame_start : int) -> None:
facefusion.globals.trim_frame_start = trim_frame_start if trim_frame_start > 0 else None
def update_trim_frame_end(trim_frame_end : int) -> None:
video_frame_total = count_video_frame_total(facefusion.globals.target_path)
facefusion.globals.trim_frame_end = trim_frame_end if trim_frame_end < video_frame_total else None