diff --git a/README.md b/README.md index 3879ca13e..84a84f5c5 100644 --- a/README.md +++ b/README.md @@ -59,6 +59,18 @@ Additional commandline arguments are currently unsupported and settings should b ### Changelog +**10.03.2024** v3.5.5 + +- Bugfix: Installer Path Env +- Bugfix: file attributes +- Video processing checks for presence of ffmpeg and displays warning if not found +- Removed gender + age detection to speed up processing. Option removed from UI +- Replaced restoreformer with restoreformer++ +- Live Cam recoded to run separate from virtual cam and without blocking controls +- Swapping with only 1 target face allows selecting from several input faces + + + **08.01.2024** v3.5.0 - Bugfix: wrong access options when creating folders @@ -89,15 +101,15 @@ Initial Gradio Version - old TkInter Version now deprecated Lots of ideas, code or pre-trained models used from the following projects: -https://github.com/deepinsight/insightface -https://github.com/s0md3v/roop -https://github.com/AUTOMATIC1111/stable-diffusion-webui -https://github.com/Hillobar/Rope -https://github.com/janvarev/chain-img-processor -https://github.com/TencentARC/GFPGAN -https://github.com/kadirnar/codeformer-pip -https://github.com/csxmli2016/DMDNet - - +https://github.com/deepinsight/insightface
+https://github.com/s0md3v/roop
+https://github.com/AUTOMATIC1111/stable-diffusion-webui
+https://github.com/Hillobar/Rope
+https://github.com/janvarev/chain-img-processor
+https://github.com/TencentARC/GFPGAN
+https://github.com/kadirnar/codeformer-pip
+https://github.com/csxmli2016/DMDNet
+
+
Thanks to all developers! diff --git a/installer/installer.py b/installer/installer.py index 5bd52e259..2df89ae27 100644 --- a/installer/installer.py +++ b/installer/installer.py @@ -32,7 +32,7 @@ def install_dependencies(): # Install Git and clone repo run_cmd("conda install -y -k git") run_cmd("git clone https://github.com/C0untFloyd/roop-unleashed.git") - run_cmd("git checkout 8ee085322158c4eeb0cd0126a49949f1acf0f7df") + run_cmd("git checkout 87943ad5413545db620921228bbcf73a6f9dab62") # Install the webui dependencies update_dependencies() diff --git a/roop/ProcessMgr.py b/roop/ProcessMgr.py index 96a0b5a08..b3c64c70a 100644 --- a/roop/ProcessMgr.py +++ b/roop/ProcessMgr.py @@ -81,6 +81,10 @@ def initialize(self, input_faces, target_faces, options): self.target_face_datas = target_faces self.options = options + roop.globals.g_desired_face_analysis=["landmark_3d_68", "landmark_2d_106","detection","recognition"] + if options.swap_mode == "all_female" or options.swap_mode == "all_male": + roop.globals.g_desired_face_analysis.append("genderage") + processornames = options.processors.split(",") devicename = get_device() if len(self.processors) < 1: diff --git a/roop/core.py b/roop/core.py index d7cea17bc..046abde34 100755 --- a/roop/core.py +++ b/roop/core.py @@ -120,8 +120,8 @@ def pre_check() -> bool: util.conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/inswapper_128.onnx']) util.conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/GFPGANv1.4.onnx']) util.conditional_download(download_directory_path, ['https://github.com/csxmli2016/DMDNet/releases/download/v1/DMDNet.pth']) - util.conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/GPEN-BFR-512.onnx']) - util.conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/restoreformer_plus_plus.onnx']) + util.conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/GPEN-BFR-512.onnx']) + util.conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/restoreformer_plus_plus.onnx']) download_directory_path = util.resolve_relative_path('../models/CLIP') util.conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/rd64-uni-refined.pth']) download_directory_path = util.resolve_relative_path('../models/CodeFormer') diff --git a/roop/face_util.py b/roop/face_util.py index a48e38847..9454dda19 100644 --- a/roop/face_util.py +++ b/roop/face_util.py @@ -21,16 +21,20 @@ def get_face_analyser() -> Any: global FACE_ANALYSER with THREAD_LOCK_ANALYSER: - if FACE_ANALYSER is None: + if FACE_ANALYSER is None or roop.globals.g_current_face_analysis != roop.globals.g_desired_face_analysis: model_path = resolve_relative_path('..') + # removed genderage + allowed_modules = roop.globals.g_desired_face_analysis + roop.globals.g_current_face_analysis = roop.globals.g_desired_face_analysis if roop.globals.CFG.force_cpu: print("Forcing CPU for Face Analysis") FACE_ANALYSER = insightface.app.FaceAnalysis( - name="buffalo_l", root=model_path, providers=["CPUExecutionProvider"] + name="buffalo_l", + root=model_path, providers=["CPUExecutionProvider"],allowed_modules=allowed_modules ) else: FACE_ANALYSER = insightface.app.FaceAnalysis( - name="buffalo_l", root=model_path, providers=roop.globals.execution_providers + name="buffalo_l", root=model_path, providers=roop.globals.execution_providers,allowed_modules=allowed_modules ) FACE_ANALYSER.prepare( ctx_id=0, diff --git a/roop/filters.py b/roop/filters.py new file mode 100644 index 000000000..757499dad --- /dev/null +++ b/roop/filters.py @@ -0,0 +1,59 @@ +import numpy as np +import cv2 + +c64_palette = np.array([ + [0, 0, 0], + [255, 255, 255], + [0x81, 0x33, 0x38], + [0x75, 0xce, 0xc8], + [0x8e, 0x3c, 0x97], + [0x56, 0xac, 0x4d], + [0x2e, 0x2c, 0x9b], + [0xed, 0xf1, 0x71], + [0x8e, 0x50, 0x29], + [0x55, 0x38, 0x00], + [0xc4, 0x6c, 0x71], + [0x4a, 0x4a, 0x4a], + [0x7b, 0x7b, 0x7b], + [0xa9, 0xff, 0x9f], + [0x70, 0x6d, 0xeb], + [0xb2, 0xb2, 0xb2] +]) + +def fast_quantize_to_palette(image): + # Simply round the color values to the nearest color in the palette + palette = c64_palette / 255.0 # Normalize palette + img_normalized = image / 255.0 # Normalize image + + # Calculate the index in the palette that is closest to each pixel in the image + indices = np.sqrt(((img_normalized[:, :, None, :] - palette[None, None, :, :]) ** 2).sum(axis=3)).argmin(axis=2) + # Map the image to the palette colors + mapped_image = palette[indices] + + return (mapped_image * 255).astype(np.uint8) # Denormalize and return the image + + +''' +knn = None + +def quantize_to_palette(image, palette): + global knn + + NumColors = 16 + quantized_image = None + cv2.pyrMeanShiftFiltering(image, NumColors / 4, NumColors / 2, quantized_image, 1, cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 5, 1) + + palette = c64_palette + X_query = image.reshape(-1, 3).astype(np.float32) + + if(knn == None): + X_index = palette.astype(np.float32) + knn = cv2.ml.KNearest_create() + knn.train(X_index, cv2.ml.ROW_SAMPLE, np.arange(len(palette))) + + ret, results, neighbours, dist = knn.findNearest(X_query, 1) + + quantized_image = np.array([palette[idx] for idx in neighbours.astype(int)]) + quantized_image = quantized_image.reshape(image.shape) + return quantized_image.astype(np.uint8) +''' diff --git a/roop/globals.py b/roop/globals.py index a2b4c5fd8..b1228e3d0 100644 --- a/roop/globals.py +++ b/roop/globals.py @@ -33,13 +33,17 @@ no_face_action = 0 -processing = False +processing = False + +g_current_face_analysis = None +g_desired_face_analysis = None FACE_ENHANCER = None INPUT_FACESETS = [] TARGET_FACES = [] + IMAGE_CHAIN_PROCESSOR = None VIDEO_CHAIN_PROCESSOR = None BATCH_IMAGE_CHAIN_PROCESSOR = None diff --git a/roop/processors/frame/face_swapper.py b/roop/processors/frame/face_swapper.py index 4b72e9706..35293827a 100644 --- a/roop/processors/frame/face_swapper.py +++ b/roop/processors/frame/face_swapper.py @@ -1,13 +1,9 @@ -from typing import Any, List, Callable -import cv2 +from typing import Any import insightface import threading import roop.globals -import roop.processors.frame.core -from roop.face_util import get_first_face, get_all_faces -from roop.typing import Face, Frame -from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video, compute_cosine_distance, get_destfilename_from_path +from roop.utilities import resolve_relative_path FACE_SWAPPER = None THREAD_LOCK = threading.Lock() @@ -25,89 +21,3 @@ def get_face_swapper() -> Any: FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=roop.globals.execution_providers) return FACE_SWAPPER - -def pre_check() -> bool: - download_directory_path = resolve_relative_path('../models') - conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/inswapper_128.onnx']) - return True - - -def pre_start() -> bool: - return True - - -def post_process() -> None: - global FACE_SWAPPER - - FACE_SWAPPER = None - - -def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame: - return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True) - - -def process_frame(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame: - global DIST_THRESHOLD - - if roop.globals.many_faces: - many_faces = get_all_faces(temp_frame) - if many_faces is not None: - for target_face in many_faces: - if target_face['det_score'] > 0.65: - temp_frame = swap_face(source_face, target_face, temp_frame) - else: - if target_face: - target_embedding = target_face.embedding - many_faces = get_all_faces(temp_frame) - target_face = None - for dest_face in many_faces: - dest_embedding = dest_face.embedding - if compute_cosine_distance(target_embedding, dest_embedding) <= DIST_THRESHOLD: - target_face = dest_face - break - if target_face: - temp_frame = swap_face(source_face, target_face, temp_frame) - return temp_frame - - target_face = get_first_face(temp_frame) - if target_face is not None: - temp_frame = swap_face(source_face, target_face, temp_frame) - return temp_frame - - - -def process_frames(is_batch: bool, source_face: Face, target_face: Face, temp_frame_paths: List[str], update: Callable[[], None]) -> None: - for temp_frame_path in temp_frame_paths: - temp_frame = cv2.imread(temp_frame_path) - if temp_frame is not None: - result = process_frame(source_face, target_face, temp_frame) - if result is not None: - if is_batch: - tf = get_destfilename_from_path(temp_frame_path, roop.globals.output_path, '_fake.png') - cv2.imwrite(tf, result) - else: - cv2.imwrite(temp_frame_path, result) - if update: - update() - - -def process_image(source_face: Any, target_face: Any, target_path: str, output_path: str) -> None: - global DIST_THRESHOLD - - target_frame = cv2.imread(target_path) - if target_frame is not None: - result = process_frame(source_face, target_face, target_frame) - if result is not None: - cv2.imwrite(output_path, result) - - -def process_video(source_face: Any, target_face: Any, temp_frame_paths: List[str]) -> None: - global DIST_THRESHOLD - - roop.processors.frame.core.process_video(source_face, target_face, temp_frame_paths, process_frames) - - -def process_batch_images(source_face: Any, target_face: Any, temp_frame_paths: List[str]) -> None: - global DIST_THRESHOLD - - roop.processors.frame.core.process_batch(source_face, target_face, temp_frame_paths, process_frames) diff --git a/roop/virtualcam.py b/roop/virtualcam.py index 2b404fa5c..ad750cbaf 100644 --- a/roop/virtualcam.py +++ b/roop/virtualcam.py @@ -1,5 +1,6 @@ import cv2 import roop.globals +import ui.globals import pyvirtualcam import threading import time @@ -9,11 +10,13 @@ cam_thread = None vcam = None -def virtualcamera(cam_num): +def virtualcamera(streamobs, cam_num,width,height): from roop.core import live_swap + from roop.filters import fast_quantize_to_palette + global cam_active - time.sleep(2) + #time.sleep(2) print('Starting capture') cap = cv2.VideoCapture(cam_num, cv2.CAP_DSHOW) if not cap.isOpened(): @@ -22,53 +25,61 @@ def virtualcamera(cam_num): del cap return - pref_width = 1280 - pref_height = 720 + pref_width = width + pref_height = height pref_fps_in = 30 cap.set(cv2.CAP_PROP_FRAME_WIDTH, pref_width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, pref_height) cap.set(cv2.CAP_PROP_FPS, pref_fps_in) - print('Starting VCAM') cam_active = True # native format UYVY - with pyvirtualcam.Camera(width=pref_width, height=pref_height, fps=pref_fps_in, fmt=pyvirtualcam.PixelFormat.BGR, print_fps=True) as cam: - + cam = None + if streamobs: + print('Detecting virtual cam devices') + cam = pyvirtualcam.Camera(width=pref_width, height=pref_height, fps=pref_fps_in, fmt=pyvirtualcam.PixelFormat.BGR, print_fps=False) + if cam: print(f'Using virtual camera: {cam.device}') print(f'Using {cam.native_fmt}') - - # RGB - - while cam_active: - ret, frame = cap.read() - if not ret: - break - - if len(roop.globals.INPUT_FACESETS) > 0: - frame = live_swap(frame, "all", False, None, None) - cam.send(frame) - else: - cam.send(frame) - cam.sleep_until_next_frame() - + else: + print(f'Not streaming to virtual camera!') + + while cam_active: + ret, frame = cap.read() + if not ret: + break + + if len(roop.globals.INPUT_FACESETS) > 0: + frame = live_swap(frame, "all", False, None, None) + #frame = fast_quantize_to_palette(frame) + if cam: + cam.send(frame) + cam.sleep_until_next_frame() + ui.globals.ui_camera_frame = frame + + if cam: + cam.close() cap.release() - print('End cam') + print('Camera stopped') -def start_virtual_cam(cam_number): +def start_virtual_cam(streamobs, cam_number, resolution): global cam_thread, cam_active if not cam_active: - cam_thread = threading.Thread(target=virtualcamera, args=[cam_number]) + width, height = map(int, resolution.split('x')) + cam_thread = threading.Thread(target=virtualcamera, args=[streamobs, cam_number, width, height]) cam_thread.start() def stop_virtual_cam(): - global cam_active + global cam_active, cam_thread - cam_active = False + if cam_active: + cam_active = False + cam_thread.join() diff --git a/ui/globals.py b/ui/globals.py index e7a91acb4..ae96bbf81 100644 --- a/ui/globals.py +++ b/ui/globals.py @@ -9,6 +9,7 @@ ui_target_thumbs = [] ui_live_cam_active = False +ui_camera_frame = None diff --git a/ui/tabs/faceswap_tab.py b/ui/tabs/faceswap_tab.py index 638485244..2853c9328 100644 --- a/ui/tabs/faceswap_tab.py +++ b/ui/tabs/faceswap_tab.py @@ -89,7 +89,7 @@ def faceswap_tab(): with gr.Row(variant='panel'): with gr.Column(scale=1): - selected_face_detection = gr.Dropdown(["First found", "All faces", "Selected face", "All female", "All male"], value="First found", label="Select face selection for swapping") + selected_face_detection = gr.Dropdown(["First found", "All female", "All male", "All faces", "Selected face"], value="First found", label="Specify face selection for swapping") max_face_distance = gr.Slider(0.01, 1.0, value=0.65, label="Max Face Similarity Threshold") video_swapping_method = gr.Dropdown(["Extract Frames to media","In-Memory processing"], value="In-Memory processing", label="Select video processing method", interactive=True) no_face_action = gr.Dropdown(choices=no_face_choices, value=no_face_choices[0], label="Action on no face detected", interactive=True) @@ -115,7 +115,7 @@ def faceswap_tab(): bt_start = gr.Button("▶ Start", variant='primary') gr.Button("👀 Open Output Folder", size='sm').click(fn=lambda: util.open_folder(roop.globals.output_path)) with gr.Column(): - bt_stop = gr.Button("⏹ Stop", variant='secondary') + bt_stop = gr.Button("⏹ Stop", variant='secondary', interactive=False) with gr.Column(scale=2): gr.Markdown(' ') with gr.Row(variant='panel'): @@ -160,10 +160,10 @@ def faceswap_tab(): start_event = bt_start.click(fn=start_swap, inputs=[ui.globals.ui_selected_enhancer, selected_face_detection, roop.globals.keep_frames, roop.globals.wait_after_extraction, roop.globals.skip_audio, max_face_distance, ui.globals.ui_blend_ratio, chk_useclip, clip_text,video_swapping_method, no_face_action, vr_mode, autorotate, maskimage], - outputs=[bt_start, resultfiles]) + outputs=[bt_start, bt_stop, resultfiles]) after_swap_event = start_event.then(fn=on_resultfiles_finished, inputs=[resultfiles], outputs=[resultimage, resultvideo]) - bt_stop.click(fn=stop_swap, cancels=[start_event, after_swap_event], queue=False) + bt_stop.click(fn=stop_swap, cancels=[start_event, after_swap_event], outputs=[bt_start, bt_stop], queue=False) bt_refresh_preview.click(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs) bt_toggle_masking.click(fn=on_toggle_masking, inputs=[previewimage, maskimage], outputs=[previewimage, maskimage]) @@ -516,7 +516,7 @@ def start_swap( enhancer, detection, keep_frames, wait_after_extraction, skip_au global is_processing, list_files_process if list_files_process is None or len(list_files_process) <= 0: - return gr.Button(variant="primary"), None + return gr.Button(variant="primary"), None, None if roop.globals.CFG.clear_output: shutil.rmtree(roop.globals.output_path) @@ -544,10 +544,10 @@ def start_swap( enhancer, detection, keep_frames, wait_after_extraction, skip_au if roop.globals.face_swap_mode == 'selected': if len(roop.globals.TARGET_FACES) < 1: gr.Error('No Target Face selected!') - return gr.Button(variant="primary"), None + return gr.Button(variant="primary"), None, None is_processing = True - yield gr.Button(variant="secondary"), None + yield gr.Button(variant="secondary", interactive=False), gr.Button(variant="primary", interactive=True), None roop.globals.execution_threads = roop.globals.CFG.max_threads roop.globals.video_encoder = roop.globals.CFG.output_video_codec roop.globals.video_quality = roop.globals.CFG.video_quality @@ -558,14 +558,15 @@ def start_swap( enhancer, detection, keep_frames, wait_after_extraction, skip_au outdir = pathlib.Path(roop.globals.output_path) outfiles = [str(item) for item in outdir.rglob("*") if item.is_file()] if len(outfiles) > 0: - yield gr.Button(variant="primary"),gr.Files(value=outfiles) + yield gr.Button(variant="primary", interactive=True),gr.Button(variant="secondary", interactive=False),gr.Files(value=outfiles) else: - yield gr.Button(variant="primary"),None + yield gr.Button(variant="primary", interactive=True),gr.Button(variant="secondary", interactive=False),None def stop_swap(): roop.globals.processing = False gr.Info('Aborting processing - please wait for the remaining threads to be stopped') + return gr.Button(variant="primary", interactive=True),gr.Button(variant="secondary", interactive=False),None def on_fps_changed(fps): diff --git a/ui/tabs/livecam_tab.py b/ui/tabs/livecam_tab.py index 9526fdbc5..6a986f574 100644 --- a/ui/tabs/livecam_tab.py +++ b/ui/tabs/livecam_tab.py @@ -2,67 +2,55 @@ import roop.globals import ui.globals -fake_cam_image = None -current_cam_image = None -cam_swapping = False -camthread = None +camera_frame = None def livecam_tab(): with gr.Tab("🎥 Live Cam"): - with gr.Row(): - with gr.Column(scale=2): - cam_toggle = gr.Checkbox(label='Activate', value=ui.globals.ui_live_cam_active) - with gr.Column(scale=1): - vcam_toggle = gr.Checkbox(label='Stream to virtual camera', value=False) - with gr.Column(scale=1): - camera_num = gr.Slider(0, 2, value=0, label="Camera Number", step=1.0, interactive=True) - - if ui.globals.ui_live_cam_active: - with gr.Row(): - with gr.Column(): - cam = gr.Webcam(label='Camera', source='webcam', interactive=True, streaming=False) - with gr.Column(): - fake_cam_image = gr.Image(label='Fake Camera Output', interactive=False) - - cam_toggle.change(fn=on_cam_toggle, inputs=[cam_toggle]) - - if ui.globals.ui_live_cam_active: - vcam_toggle.change(fn=on_vcam_toggle, inputs=[vcam_toggle, camera_num], outputs=[cam, fake_cam_image]) - cam.stream(on_stream_swap_cam, inputs=[cam, ui.globals.ui_selected_enhancer, ui.globals.ui_blend_ratio], outputs=[fake_cam_image], preprocess=True, postprocess=True, show_progress="hidden") - -def on_cam_toggle(state): - ui.globals.ui_live_cam_active = state - gr.Warning('Server will be restarted for this change!') - ui.globals.ui_restart_server = True + with gr.Row(variant='panel'): + gr.Markdown(""" + This feature will allow you to use your physical webcam and apply the selected faces to the stream. + You can also forward the stream to a virtual camera, which can be used in video calls or streaming software.
+ Supported are: v4l2loopback (linux), OBS Virtual Camera (macOS/Windows) and unitycapture (Windows).
+ **Please note:** to change the face or any other settings you need to stop and restart a running live cam. + """) + + with gr.Row(variant='panel'): + with gr.Column(): + bt_start = gr.Button("▶ Start", variant='primary') + with gr.Column(): + bt_stop = gr.Button("⏹ Stop", variant='secondary', interactive=False) + with gr.Column(): + camera_num = gr.Slider(0, 2, value=0, label="Camera Number", step=1.0, interactive=True) + cb_obs = gr.Checkbox(label="Forward stream to virtual camera", interactive=True) + with gr.Column(): + dd_reso = gr.Dropdown(choices=["640x480","1280x720", "1920x1080"], value="1280x720", label="Fake Camera Resolution", interactive=True) -def on_vcam_toggle(state, num): - from roop.virtualcam import stop_virtual_cam, start_virtual_cam - - if state: - yield gr.Webcam.update(interactive=False), None - start_virtual_cam(num) - return gr.Webcam.update(interactive=False), None - else: - stop_virtual_cam() - return gr.Webcam.update(interactive=True), None + with gr.Row(): + fake_cam_image = gr.Image(label='Fake Camera Output', interactive=False) + start_event = bt_start.click(fn=start_cam, inputs=[cb_obs, camera_num, dd_reso, ui.globals.ui_selected_enhancer, ui.globals.ui_blend_ratio],outputs=[bt_start, bt_stop,fake_cam_image]) + bt_stop.click(fn=stop_swap, cancels=[start_event], outputs=[bt_start, bt_stop], queue=False) -def on_stream_swap_cam(camimage, enhancer, blend_ratio): - from roop.core import live_swap - global current_cam_image, cam_swapping, fake_cam_image +def start_cam(stream_to_obs, cam, reso, enhancer, blend_ratio): + from roop.virtualcam import start_virtual_cam + from roop.utilities import convert_to_gradio + ui.globals.ui_live_cam_active = True + start_virtual_cam(stream_to_obs, cam, reso) roop.globals.selected_enhancer = enhancer roop.globals.blend_ratio = blend_ratio - if not cam_swapping: - cam_swapping = True - if len(roop.globals.INPUT_FACESETS) > 0: - current_cam_image = live_swap(camimage, "all", False, None, None, ui.globals.ui_SELECTED_INPUT_FACE_INDEX) - else: - current_cam_image = camimage - cam_swapping = False - return current_cam_image + while ui.globals.ui_live_cam_active: + yield gr.Button(interactive=False), gr.Button(interactive=True), convert_to_gradio(ui.globals.ui_camera_frame) + return gr.Button(interactive=True), gr.Button(interactive=False), convert_to_gradio(ui.globals.ui_camera_frame) + +def stop_swap(): + from roop.virtualcam import stop_virtual_cam + stop_virtual_cam() + return gr.Button(interactive=True), gr.Button(interactive=False) + +