Skip to content

Commit

Permalink
Merge branch 'main' into dev
Browse files Browse the repository at this point in the history
Re-added gender selection (dynamically loading needed model)
Disabling of start/stop buttons
  • Loading branch information
C0untFloyd committed Mar 16, 2024
2 parents 7fe701d + d7911c4 commit 3bb6e2b
Show file tree
Hide file tree
Showing 12 changed files with 189 additions and 195 deletions.
32 changes: 22 additions & 10 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,18 @@ Additional commandline arguments are currently unsupported and settings should b

### Changelog

**10.03.2024** v3.5.5

- Bugfix: Installer Path Env
- Bugfix: file attributes
- Video processing checks for presence of ffmpeg and displays warning if not found
- Removed gender + age detection to speed up processing. Option removed from UI
- Replaced restoreformer with restoreformer++
- Live Cam recoded to run separate from virtual cam and without blocking controls
- Swapping with only 1 target face allows selecting from several input faces



**08.01.2024** v3.5.0

- Bugfix: wrong access options when creating folders
Expand Down Expand Up @@ -89,15 +101,15 @@ Initial Gradio Version - old TkInter Version now deprecated

Lots of ideas, code or pre-trained models used from the following projects:

https://github.com/deepinsight/insightface
https://github.com/s0md3v/roop
https://github.com/AUTOMATIC1111/stable-diffusion-webui
https://github.com/Hillobar/Rope
https://github.com/janvarev/chain-img-processor
https://github.com/TencentARC/GFPGAN
https://github.com/kadirnar/codeformer-pip
https://github.com/csxmli2016/DMDNet


https://github.com/deepinsight/insightface<br />
https://github.com/s0md3v/roop<br />
https://github.com/AUTOMATIC1111/stable-diffusion-webui<br />
https://github.com/Hillobar/Rope<br />
https://github.com/janvarev/chain-img-processor<br />
https://github.com/TencentARC/GFPGAN<br />
https://github.com/kadirnar/codeformer-pip<br />
https://github.com/csxmli2016/DMDNet<br />
<br />
<br />
Thanks to all developers!

2 changes: 1 addition & 1 deletion installer/installer.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def install_dependencies():
# Install Git and clone repo
run_cmd("conda install -y -k git")
run_cmd("git clone https://github.com/C0untFloyd/roop-unleashed.git")
run_cmd("git checkout 8ee085322158c4eeb0cd0126a49949f1acf0f7df")
run_cmd("git checkout 87943ad5413545db620921228bbcf73a6f9dab62")
# Install the webui dependencies
update_dependencies()

Expand Down
4 changes: 4 additions & 0 deletions roop/ProcessMgr.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,10 @@ def initialize(self, input_faces, target_faces, options):
self.target_face_datas = target_faces
self.options = options

roop.globals.g_desired_face_analysis=["landmark_3d_68", "landmark_2d_106","detection","recognition"]
if options.swap_mode == "all_female" or options.swap_mode == "all_male":
roop.globals.g_desired_face_analysis.append("genderage")

processornames = options.processors.split(",")
devicename = get_device()
if len(self.processors) < 1:
Expand Down
4 changes: 2 additions & 2 deletions roop/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,8 @@ def pre_check() -> bool:
util.conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/inswapper_128.onnx'])
util.conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/GFPGANv1.4.onnx'])
util.conditional_download(download_directory_path, ['https://github.com/csxmli2016/DMDNet/releases/download/v1/DMDNet.pth'])
util.conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/GPEN-BFR-512.onnx'])
util.conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/restoreformer_plus_plus.onnx'])
util.conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/GPEN-BFR-512.onnx'])
util.conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/restoreformer_plus_plus.onnx'])
download_directory_path = util.resolve_relative_path('../models/CLIP')
util.conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/rd64-uni-refined.pth'])
download_directory_path = util.resolve_relative_path('../models/CodeFormer')
Expand Down
10 changes: 7 additions & 3 deletions roop/face_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,20 @@ def get_face_analyser() -> Any:
global FACE_ANALYSER

with THREAD_LOCK_ANALYSER:
if FACE_ANALYSER is None:
if FACE_ANALYSER is None or roop.globals.g_current_face_analysis != roop.globals.g_desired_face_analysis:
model_path = resolve_relative_path('..')
# removed genderage
allowed_modules = roop.globals.g_desired_face_analysis
roop.globals.g_current_face_analysis = roop.globals.g_desired_face_analysis
if roop.globals.CFG.force_cpu:
print("Forcing CPU for Face Analysis")
FACE_ANALYSER = insightface.app.FaceAnalysis(
name="buffalo_l", root=model_path, providers=["CPUExecutionProvider"]
name="buffalo_l",
root=model_path, providers=["CPUExecutionProvider"],allowed_modules=allowed_modules
)
else:
FACE_ANALYSER = insightface.app.FaceAnalysis(
name="buffalo_l", root=model_path, providers=roop.globals.execution_providers
name="buffalo_l", root=model_path, providers=roop.globals.execution_providers,allowed_modules=allowed_modules
)
FACE_ANALYSER.prepare(
ctx_id=0,
Expand Down
59 changes: 59 additions & 0 deletions roop/filters.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import numpy as np
import cv2

c64_palette = np.array([
[0, 0, 0],
[255, 255, 255],
[0x81, 0x33, 0x38],
[0x75, 0xce, 0xc8],
[0x8e, 0x3c, 0x97],
[0x56, 0xac, 0x4d],
[0x2e, 0x2c, 0x9b],
[0xed, 0xf1, 0x71],
[0x8e, 0x50, 0x29],
[0x55, 0x38, 0x00],
[0xc4, 0x6c, 0x71],
[0x4a, 0x4a, 0x4a],
[0x7b, 0x7b, 0x7b],
[0xa9, 0xff, 0x9f],
[0x70, 0x6d, 0xeb],
[0xb2, 0xb2, 0xb2]
])

def fast_quantize_to_palette(image):
# Simply round the color values to the nearest color in the palette
palette = c64_palette / 255.0 # Normalize palette
img_normalized = image / 255.0 # Normalize image

# Calculate the index in the palette that is closest to each pixel in the image
indices = np.sqrt(((img_normalized[:, :, None, :] - palette[None, None, :, :]) ** 2).sum(axis=3)).argmin(axis=2)
# Map the image to the palette colors
mapped_image = palette[indices]

return (mapped_image * 255).astype(np.uint8) # Denormalize and return the image


'''
knn = None
def quantize_to_palette(image, palette):
global knn
NumColors = 16
quantized_image = None
cv2.pyrMeanShiftFiltering(image, NumColors / 4, NumColors / 2, quantized_image, 1, cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 5, 1)
palette = c64_palette
X_query = image.reshape(-1, 3).astype(np.float32)
if(knn == None):
X_index = palette.astype(np.float32)
knn = cv2.ml.KNearest_create()
knn.train(X_index, cv2.ml.ROW_SAMPLE, np.arange(len(palette)))
ret, results, neighbours, dist = knn.findNearest(X_query, 1)
quantized_image = np.array([palette[idx] for idx in neighbours.astype(int)])
quantized_image = quantized_image.reshape(image.shape)
return quantized_image.astype(np.uint8)
'''
6 changes: 5 additions & 1 deletion roop/globals.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,17 @@

no_face_action = 0

processing = False
processing = False

g_current_face_analysis = None
g_desired_face_analysis = None

FACE_ENHANCER = None

INPUT_FACESETS = []
TARGET_FACES = []


IMAGE_CHAIN_PROCESSOR = None
VIDEO_CHAIN_PROCESSOR = None
BATCH_IMAGE_CHAIN_PROCESSOR = None
Expand Down
94 changes: 2 additions & 92 deletions roop/processors/frame/face_swapper.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,9 @@
from typing import Any, List, Callable
import cv2
from typing import Any
import insightface
import threading

import roop.globals
import roop.processors.frame.core
from roop.face_util import get_first_face, get_all_faces
from roop.typing import Face, Frame
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video, compute_cosine_distance, get_destfilename_from_path
from roop.utilities import resolve_relative_path

FACE_SWAPPER = None
THREAD_LOCK = threading.Lock()
Expand All @@ -25,89 +21,3 @@ def get_face_swapper() -> Any:
FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=roop.globals.execution_providers)
return FACE_SWAPPER


def pre_check() -> bool:
download_directory_path = resolve_relative_path('../models')
conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/inswapper_128.onnx'])
return True


def pre_start() -> bool:
return True


def post_process() -> None:
global FACE_SWAPPER

FACE_SWAPPER = None


def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True)


def process_frame(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
global DIST_THRESHOLD

if roop.globals.many_faces:
many_faces = get_all_faces(temp_frame)
if many_faces is not None:
for target_face in many_faces:
if target_face['det_score'] > 0.65:
temp_frame = swap_face(source_face, target_face, temp_frame)
else:
if target_face:
target_embedding = target_face.embedding
many_faces = get_all_faces(temp_frame)
target_face = None
for dest_face in many_faces:
dest_embedding = dest_face.embedding
if compute_cosine_distance(target_embedding, dest_embedding) <= DIST_THRESHOLD:
target_face = dest_face
break
if target_face:
temp_frame = swap_face(source_face, target_face, temp_frame)
return temp_frame

target_face = get_first_face(temp_frame)
if target_face is not None:
temp_frame = swap_face(source_face, target_face, temp_frame)
return temp_frame



def process_frames(is_batch: bool, source_face: Face, target_face: Face, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
for temp_frame_path in temp_frame_paths:
temp_frame = cv2.imread(temp_frame_path)
if temp_frame is not None:
result = process_frame(source_face, target_face, temp_frame)
if result is not None:
if is_batch:
tf = get_destfilename_from_path(temp_frame_path, roop.globals.output_path, '_fake.png')
cv2.imwrite(tf, result)
else:
cv2.imwrite(temp_frame_path, result)
if update:
update()


def process_image(source_face: Any, target_face: Any, target_path: str, output_path: str) -> None:
global DIST_THRESHOLD

target_frame = cv2.imread(target_path)
if target_frame is not None:
result = process_frame(source_face, target_face, target_frame)
if result is not None:
cv2.imwrite(output_path, result)


def process_video(source_face: Any, target_face: Any, temp_frame_paths: List[str]) -> None:
global DIST_THRESHOLD

roop.processors.frame.core.process_video(source_face, target_face, temp_frame_paths, process_frames)


def process_batch_images(source_face: Any, target_face: Any, temp_frame_paths: List[str]) -> None:
global DIST_THRESHOLD

roop.processors.frame.core.process_batch(source_face, target_face, temp_frame_paths, process_frames)
65 changes: 38 additions & 27 deletions roop/virtualcam.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import cv2
import roop.globals
import ui.globals
import pyvirtualcam
import threading
import time
Expand All @@ -9,11 +10,13 @@
cam_thread = None
vcam = None

def virtualcamera(cam_num):
def virtualcamera(streamobs, cam_num,width,height):
from roop.core import live_swap
from roop.filters import fast_quantize_to_palette

global cam_active

time.sleep(2)
#time.sleep(2)
print('Starting capture')
cap = cv2.VideoCapture(cam_num, cv2.CAP_DSHOW)
if not cap.isOpened():
Expand All @@ -22,53 +25,61 @@ def virtualcamera(cam_num):
del cap
return

pref_width = 1280
pref_height = 720
pref_width = width
pref_height = height
pref_fps_in = 30
cap.set(cv2.CAP_PROP_FRAME_WIDTH, pref_width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, pref_height)
cap.set(cv2.CAP_PROP_FPS, pref_fps_in)
print('Starting VCAM')
cam_active = True

# native format UYVY

with pyvirtualcam.Camera(width=pref_width, height=pref_height, fps=pref_fps_in, fmt=pyvirtualcam.PixelFormat.BGR, print_fps=True) as cam:

cam = None
if streamobs:
print('Detecting virtual cam devices')
cam = pyvirtualcam.Camera(width=pref_width, height=pref_height, fps=pref_fps_in, fmt=pyvirtualcam.PixelFormat.BGR, print_fps=False)
if cam:
print(f'Using virtual camera: {cam.device}')
print(f'Using {cam.native_fmt}')

# RGB

while cam_active:
ret, frame = cap.read()
if not ret:
break

if len(roop.globals.INPUT_FACESETS) > 0:
frame = live_swap(frame, "all", False, None, None)
cam.send(frame)
else:
cam.send(frame)
cam.sleep_until_next_frame()

else:
print(f'Not streaming to virtual camera!')

while cam_active:
ret, frame = cap.read()
if not ret:
break

if len(roop.globals.INPUT_FACESETS) > 0:
frame = live_swap(frame, "all", False, None, None)
#frame = fast_quantize_to_palette(frame)
if cam:
cam.send(frame)
cam.sleep_until_next_frame()
ui.globals.ui_camera_frame = frame

if cam:
cam.close()
cap.release()
print('End cam')
print('Camera stopped')



def start_virtual_cam(cam_number):
def start_virtual_cam(streamobs, cam_number, resolution):
global cam_thread, cam_active

if not cam_active:
cam_thread = threading.Thread(target=virtualcamera, args=[cam_number])
width, height = map(int, resolution.split('x'))
cam_thread = threading.Thread(target=virtualcamera, args=[streamobs, cam_number, width, height])
cam_thread.start()



def stop_virtual_cam():
global cam_active
global cam_active, cam_thread

cam_active = False
if cam_active:
cam_active = False
cam_thread.join()


Loading

0 comments on commit 3bb6e2b

Please sign in to comment.