
* Improve typing for our callbacks * Return 0 for get_download_size * Introduce ONNX powered face enhancer * Introduce ONNX powered face enhancer * Introduce ONNX powered face enhancer * Remove tile processing from frame enhancer * Fix video compress translation for libvpx-vp9 * Allow zero values for video compression * Develop (#134) * Introduce model options to the frame processors * Finish UI to select frame processors models * Simplify frame processors options * Fix lint in CI * Rename all kind of settings to options * Add blend to enhancers * Simplify webcam mode naming * Bypass SSL issues under Windows * Fix blend of frame enhancer * Massive CLI refactoring, Register and apply ARGS via the frame processors * Refine UI theme and introduce donate button * Update dependencies and fix cpu only torch * Update dependencies and fix cpu only torch * Fix theme, Fix frame_processors in headless mode * Remove useless astype * Disable CoreML for the ONNX face enhancer * Disable CoreML for the ONNX face enhancer * Predict webcam too * Improve resize of preview * Change output quality defaults, Move options to the right * Support for codeformer model * Update the typo * Add GPEN and GFPGAN 1.2 * Extract blend_frame methods * Extend the installer * Revert broken Gradio * Rework on ui components * Move output path selector to the output options * Remove tons of pointless component updates * Reset more base theme styling * Use latest Gradio * Fix the sliders * More styles * Update torch to 2.1.0 * Add RealESRNet_x4plus * Fix that button * Use latest onnxruntime-silicon * Looks stable to me * Lowercase model keys, Update preview and readme
59 lines
1.4 KiB
Python
59 lines
1.4 KiB
Python
import threading
|
|
from functools import lru_cache
|
|
|
|
import numpy
|
|
import opennsfw2
|
|
from PIL import Image
|
|
from keras import Model
|
|
|
|
from facefusion.typing import Frame
|
|
|
|
PREDICTOR = None
|
|
THREAD_LOCK : threading.Lock = threading.Lock()
|
|
MAX_PROBABILITY = 0.75
|
|
FRAME_INTERVAL = 25
|
|
STREAM_COUNTER = 0
|
|
|
|
|
|
def get_predictor() -> Model:
|
|
global PREDICTOR
|
|
|
|
with THREAD_LOCK:
|
|
if PREDICTOR is None:
|
|
PREDICTOR = opennsfw2.make_open_nsfw_model()
|
|
return PREDICTOR
|
|
|
|
|
|
def clear_predictor() -> None:
|
|
global PREDICTOR
|
|
|
|
PREDICTOR = None
|
|
|
|
|
|
def predict_stream(frame : Frame) -> bool:
|
|
global STREAM_COUNTER
|
|
|
|
STREAM_COUNTER = STREAM_COUNTER + 1
|
|
if STREAM_COUNTER % FRAME_INTERVAL == 0:
|
|
return predict_frame(frame)
|
|
return False
|
|
|
|
|
|
def predict_frame(frame : Frame) -> bool:
|
|
image = Image.fromarray(frame)
|
|
image = opennsfw2.preprocess_image(image, opennsfw2.Preprocessing.YAHOO)
|
|
views = numpy.expand_dims(image, axis = 0)
|
|
_, probability = get_predictor().predict(views)[0]
|
|
return probability > MAX_PROBABILITY
|
|
|
|
|
|
@lru_cache(maxsize = None)
|
|
def predict_image(image_path : str) -> bool:
|
|
return opennsfw2.predict_image(image_path) > MAX_PROBABILITY
|
|
|
|
|
|
@lru_cache(maxsize = None)
|
|
def predict_video(video_path : str) -> bool:
|
|
_, probabilities = opennsfw2.predict_video_frames(video_path = video_path, frame_interval = FRAME_INTERVAL)
|
|
return any(probability > MAX_PROBABILITY for probability in probabilities)
|