
* Improve typing for our callbacks * Return 0 for get_download_size * Introduce ONNX powered face enhancer * Introduce ONNX powered face enhancer * Introduce ONNX powered face enhancer * Remove tile processing from frame enhancer * Fix video compress translation for libvpx-vp9 * Allow zero values for video compression * Develop (#134) * Introduce model options to the frame processors * Finish UI to select frame processors models * Simplify frame processors options * Fix lint in CI * Rename all kind of settings to options * Add blend to enhancers * Simplify webcam mode naming * Bypass SSL issues under Windows * Fix blend of frame enhancer * Massive CLI refactoring, Register and apply ARGS via the frame processors * Refine UI theme and introduce donate button * Update dependencies and fix cpu only torch * Update dependencies and fix cpu only torch * Fix theme, Fix frame_processors in headless mode * Remove useless astype * Disable CoreML for the ONNX face enhancer * Disable CoreML for the ONNX face enhancer * Predict webcam too * Improve resize of preview * Change output quality defaults, Move options to the right * Support for codeformer model * Update the typo * Add GPEN and GFPGAN 1.2 * Extract blend_frame methods * Extend the installer * Revert broken Gradio * Rework on ui components * Move output path selector to the output options * Remove tons of pointless component updates * Reset more base theme styling * Use latest Gradio * Fix the sliders * More styles * Update torch to 2.1.0 * Add RealESRNet_x4plus * Fix that button * Use latest onnxruntime-silicon * Looks stable to me * Lowercase model keys, Update preview and readme
64 lines
2.1 KiB
Python
64 lines
2.1 KiB
Python
import gradio
|
|
|
|
import facefusion.globals
|
|
from facefusion.utilities import conditional_download
|
|
from facefusion.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, execution_queue_count, limit_resources, benchmark_options, benchmark
|
|
|
|
|
|
def pre_check() -> bool:
|
|
if not facefusion.globals.skip_download:
|
|
conditional_download('.assets/examples',
|
|
[
|
|
'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.jpg',
|
|
'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4',
|
|
'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-360p.mp4',
|
|
'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-540p.mp4',
|
|
'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-720p.mp4',
|
|
'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-1080p.mp4',
|
|
'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-1440p.mp4',
|
|
'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-2160p.mp4'
|
|
])
|
|
return True
|
|
return False
|
|
|
|
|
|
def pre_render() -> bool:
|
|
return True
|
|
|
|
|
|
def render() -> gradio.Blocks:
|
|
with gradio.Blocks() as layout:
|
|
with gradio.Row():
|
|
with gradio.Column(scale = 2):
|
|
with gradio.Blocks():
|
|
about.render()
|
|
with gradio.Blocks():
|
|
frame_processors.render()
|
|
frame_processors_options.render()
|
|
with gradio.Blocks():
|
|
execution.render()
|
|
execution_thread_count.render()
|
|
execution_queue_count.render()
|
|
with gradio.Blocks():
|
|
limit_resources.render()
|
|
with gradio.Blocks():
|
|
benchmark_options.render()
|
|
with gradio.Column(scale= 5):
|
|
with gradio.Blocks():
|
|
benchmark.render()
|
|
return layout
|
|
|
|
|
|
def listen() -> None:
|
|
frame_processors.listen()
|
|
frame_processors_options.listen()
|
|
execution.listen()
|
|
execution_thread_count.listen()
|
|
execution_queue_count.listen()
|
|
limit_resources.listen()
|
|
benchmark.listen()
|
|
|
|
|
|
def run(ui : gradio.Blocks) -> None:
|
|
ui.queue(concurrency_count = 2, api_open = False).launch(show_api = False)
|