From 4a5531b759a608d97a2090b77efbd597eb0ee6d3 Mon Sep 17 00:00:00 2001 From: chmjkb Date: Thu, 9 Apr 2026 13:15:31 +0200 Subject: [PATCH 01/19] mvp --- apps/computer-vision/app.json | 10 +- apps/computer-vision/app/_layout.tsx | 8 + apps/computer-vision/app/index.tsx | 6 + .../computer-vision/app/webrtc_test/index.tsx | 494 ++++++++++++++++++ apps/computer-vision/package.json | 2 + .../react-native-executorch-webrtc/README.md | 120 +++++ .../android/CMakeLists.txt | 83 +++ .../android/build.gradle | 75 +++ .../android/src/main/AndroidManifest.xml | 3 + .../src/main/cpp/FrameProcessorBridge.cpp | 461 ++++++++++++++++ .../webrtc/ExecutorchFrameProcessor.kt | 234 +++++++++ .../webrtc/ExecutorchFrameProcessorFactory.kt | 14 + .../com/executorch/webrtc/ExecutorchWebRTC.kt | 44 ++ .../webrtc/ExecutorchWebRTCModule.kt | 67 +++ .../webrtc/ExecutorchWebRTCPackage.kt | 16 + .../package.json | 45 ++ .../react-native.config.js | 15 + .../src/index.ts | 63 +++ .../src/useWebRTCFrameProcessor.ts | 173 ++++++ .../tsconfig.json | 28 + yarn.lock | 61 ++- 21 files changed, 2019 insertions(+), 3 deletions(-) create mode 100644 apps/computer-vision/app/webrtc_test/index.tsx create mode 100644 packages/react-native-executorch-webrtc/README.md create mode 100644 packages/react-native-executorch-webrtc/android/CMakeLists.txt create mode 100644 packages/react-native-executorch-webrtc/android/build.gradle create mode 100644 packages/react-native-executorch-webrtc/android/src/main/AndroidManifest.xml create mode 100644 packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp create mode 100644 packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt create mode 100644 packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessorFactory.kt create mode 100644 packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt create mode 100644 packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt create mode 100644 packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCPackage.kt create mode 100644 packages/react-native-executorch-webrtc/package.json create mode 100644 packages/react-native-executorch-webrtc/react-native.config.js create mode 100644 packages/react-native-executorch-webrtc/src/index.ts create mode 100644 packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts create mode 100644 packages/react-native-executorch-webrtc/tsconfig.json diff --git a/apps/computer-vision/app.json b/apps/computer-vision/app.json index 4fcbca2ce3..12e2c35cea 100644 --- a/apps/computer-vision/app.json +++ b/apps/computer-vision/app.json @@ -17,7 +17,8 @@ "supportsTablet": true, "bundleIdentifier": "com.anonymous.computervision", "infoPlist": { - "NSCameraUsageDescription": "Process photo from camera" + "NSCameraUsageDescription": "Process photo from camera", + "NSMicrophoneUsageDescription": "Required for WebRTC (not currently used)" } }, "android": { @@ -26,7 +27,12 @@ "backgroundColor": "#ffffff" }, "package": "com.anonymous.computervision", - "permissions": ["android.permission.CAMERA"] + "permissions": [ + "android.permission.CAMERA", + "android.permission.INTERNET", + "android.permission.RECORD_AUDIO", + "android.permission.ACCESS_NETWORK_STATE" + ] }, "web": { "favicon": "./assets/icons/favicon.png" diff --git a/apps/computer-vision/app/_layout.tsx b/apps/computer-vision/app/_layout.tsx index ea47ebdb3f..0d11f0ed70 100644 --- a/apps/computer-vision/app/_layout.tsx +++ b/apps/computer-vision/app/_layout.tsx @@ -76,6 +76,14 @@ export default function _layout() { headerTitleStyle: { color: ColorPalette.primary }, }} /> + Vision Camera + router.navigate('webrtc_test/')} + > + WebRTC Test + router.navigate('classification/')} diff --git a/apps/computer-vision/app/webrtc_test/index.tsx b/apps/computer-vision/app/webrtc_test/index.tsx new file mode 100644 index 0000000000..a0406d66e3 --- /dev/null +++ b/apps/computer-vision/app/webrtc_test/index.tsx @@ -0,0 +1,494 @@ +import React, { useEffect, useRef, useState } from 'react'; +import { + View, + Text, + StyleSheet, + TouchableOpacity, + Alert, + Platform, +} from 'react-native'; +import { + RTCView, + mediaDevices, + MediaStream, + MediaStreamTrack, +} from 'react-native-webrtc'; +import { + useWebRTCFrameProcessor, + configureBackgroundRemoval, +} from 'react-native-executorch-webrtc'; +import { SELFIE_SEGMENTATION, ResourceFetcher } from 'react-native-executorch'; +import ColorPalette from '../../colors'; + +export default function WebRTCTest() { + const [stream, setStream] = useState(null); + const [isFrontCamera, setIsFrontCamera] = useState(true); + const [cameraStarted, setCameraStarted] = useState(false); + const [devices, setDevices] = useState([]); + const [processingResults, setProcessingResults] = useState(''); + const [modelStatus, setModelStatus] = useState('Not loaded'); + const [downloadProgress, setDownloadProgress] = useState(0); + const streamRef = useRef(null); + + // Download and configure the segmentation model on mount + useEffect(() => { + const downloadModel = async () => { + try { + setModelStatus('Downloading...'); + const paths = await ResourceFetcher.fetch((progress) => { + setDownloadProgress(progress); + }, SELFIE_SEGMENTATION.modelSource); + + if (!paths?.[0]) { + throw new Error('Failed to download model'); + } + + const modelPath = paths[0]; + console.log('Model downloaded:', modelPath); + + // Configure native WebRTC processor with the model path + configureBackgroundRemoval(modelPath); + setModelStatus(`Ready: ${modelPath.split('/').pop()}`); + } catch (error) { + console.error('Error downloading model:', error); + setModelStatus( + `Error: ${error instanceof Error ? error.message : 'Unknown'}` + ); + } + }; + + downloadModel(); + }, []); + + // Enable ExecuTorch frame processing on the stream + useWebRTCFrameProcessor(stream, { + onResults: (results) => { + console.log('Frame processing results:', results); + setProcessingResults(JSON.stringify(results, null, 2)); + }, + }); + + // Enumerate available devices + const enumerateDevices = async () => { + try { + const deviceInfos = await mediaDevices.enumerateDevices(); + console.log('Available devices:', deviceInfos); + setDevices(deviceInfos.filter((d: any) => d.kind === 'videoinput')); + } catch (error) { + console.error('Error enumerating devices:', error); + } + }; + + // Start camera with WebRTC getUserMedia + const startCamera = async () => { + try { + console.log('Requesting camera access...'); + + // Enumerate devices before requesting camera + await enumerateDevices(); + + const mediaStream = await mediaDevices.getUserMedia({ + video: { + facingMode: isFrontCamera ? 'user' : 'environment', + frameRate: 30, + width: { ideal: 640 }, + height: { ideal: 480 }, + }, + audio: false, + }); + + console.log('Camera stream obtained:', mediaStream.id); + console.log('Video tracks:', mediaStream.getVideoTracks().length); + + const videoTrack = mediaStream.getVideoTracks()[0]; + if (videoTrack) { + console.log('Video track settings:', videoTrack.getSettings()); + + // getCapabilities() is not implemented on Android + try { + if (typeof videoTrack.getCapabilities === 'function') { + console.log( + 'Video track capabilities:', + videoTrack.getCapabilities() + ); + } + } catch (e) { + console.log('getCapabilities not supported on this platform'); + } + } + + setStream(mediaStream); + streamRef.current = mediaStream; + setCameraStarted(true); + } catch (error) { + console.error('Error accessing camera:', error); + Alert.alert( + 'Camera Error', + `Failed to access camera: ${error instanceof Error ? error.message : 'Unknown error'}` + ); + } + }; + + // Stop camera and release resources + const stopCamera = () => { + if (streamRef.current) { + console.log('Stopping camera...'); + streamRef.current.getTracks().forEach((track) => { + track.stop(); + console.log('Stopped track:', track.kind, track.id); + }); + setStream(null); + streamRef.current = null; + setCameraStarted(false); + } + }; + + // Switch between front and back camera using applyConstraints (recommended) + const switchCamera = async () => { + if (!streamRef.current) return; + + try { + console.log('Switching camera...'); + const videoTrack = streamRef.current.getVideoTracks()[0]; + + if (videoTrack) { + // Use applyConstraints instead of stopping and restarting + await videoTrack.applyConstraints({ + facingMode: isFrontCamera ? 'environment' : 'user', + }); + + setIsFrontCamera(!isFrontCamera); + console.log('Camera switched successfully'); + } + } catch (error) { + console.error( + 'Error switching camera with applyConstraints, falling back to restart:', + error + ); + + // Fallback: stop and restart + stopCamera(); + setIsFrontCamera(!isFrontCamera); + setTimeout(() => { + startCamera(); + }, 100); + } + }; + + // Cleanup on unmount + useEffect(() => { + return () => { + if (streamRef.current) { + stopCamera(); + } + }; + }, []); + + // Auto-start camera when facingMode changes + useEffect(() => { + if (cameraStarted && !stream) { + startCamera(); + } + }, [isFrontCamera]); + + return ( + + WebRTC Camera Test + + + Basic WebRTC camera test using react-native-webrtc's getUserMedia. This + tests the camera access without any ExecuTorch processing. + + + {/* Camera Preview */} + + {stream ? ( + + ) : ( + + + {cameraStarted ? 'Starting camera...' : 'Camera not started'} + + + )} + + {/* Overlay Info */} + {stream && ( + + + Camera: {isFrontCamera ? 'Front' : 'Back'} + + + Stream ID: {stream.id.slice(0, 8)}... + + + Tracks: {stream.getTracks().length} + + + )} + + + {/* Controls */} + + + Start Camera + + + + Stop Camera + + + + + Switch to {isFrontCamera ? 'Back' : 'Front'} + + + + + {/* Model Status */} + + Segmentation Model: + Status: {modelStatus} + {downloadProgress > 0 && downloadProgress < 1 && ( + + Progress: {(downloadProgress * 100).toFixed(0)}% + + )} + + + {/* Stream Info */} + + Stream Information: + {stream ? ( + <> + Stream URL: {stream.toURL()} + + Active: {stream.active ? 'Yes' : 'No'} + + {stream.getVideoTracks().map((track, idx) => ( + + Track {idx + 1}: + - ID: {track.id} + + {' '} + - Enabled: {track.enabled ? 'Yes' : 'No'} + + + {' '} + - Ready State: {track.readyState} + + - Label: {track.label} + + ))} + + ) : ( + No active stream + )} + + {devices.length > 0 && ( + + + Available Cameras: {devices.length} + + {devices.map((device, idx) => ( + + - {device.label || `Camera ${idx + 1}`} ( + {device.facing || 'unknown'}) + + ))} + + )} + + + {/* Processing Results */} + {processingResults && ( + + Frame Processing Results: + {processingResults} + + )} + + {/* Notes */} + + Implementation Notes: + + ✓ Uses mediaDevices.getUserMedia() for camera access + + + ✓ Displays stream in RTCView component + + + ✓ Uses ExecuTorch frame processor for real-time processing + + + ✓ Processes frames at ~10 FPS (every 100ms) + + + ✓ Results sent back to JS via event emitter + + + + ); +} + +const styles = StyleSheet.create({ + container: { + flex: 1, + padding: 20, + backgroundColor: '#fff', + }, + title: { + fontSize: 24, + fontWeight: 'bold', + color: ColorPalette.strongPrimary, + marginBottom: 10, + }, + description: { + fontSize: 14, + color: '#666', + marginBottom: 20, + lineHeight: 20, + }, + videoContainer: { + width: '100%', + height: 300, + backgroundColor: '#000', + borderRadius: 12, + overflow: 'hidden', + marginBottom: 20, + }, + video: { + width: '100%', + height: '100%', + }, + placeholder: { + flex: 1, + justifyContent: 'center', + alignItems: 'center', + }, + placeholderText: { + color: '#fff', + fontSize: 16, + }, + overlay: { + position: 'absolute', + top: 10, + left: 10, + backgroundColor: 'rgba(0,0,0,0.7)', + padding: 10, + borderRadius: 8, + }, + overlayText: { + color: '#fff', + fontSize: 12, + marginBottom: 2, + }, + controls: { + flexDirection: 'row', + justifyContent: 'space-between', + marginBottom: 20, + gap: 10, + }, + button: { + flex: 1, + backgroundColor: ColorPalette.strongPrimary, + borderRadius: 8, + padding: 12, + alignItems: 'center', + justifyContent: 'center', + }, + switchButton: { + backgroundColor: ColorPalette.primary, + }, + buttonDisabled: { + backgroundColor: '#ccc', + opacity: 0.5, + }, + buttonText: { + color: 'white', + fontSize: 14, + fontWeight: '600', + }, + infoContainer: { + backgroundColor: '#f5f5f5', + padding: 15, + borderRadius: 8, + marginBottom: 15, + }, + infoTitle: { + fontSize: 16, + fontWeight: 'bold', + color: ColorPalette.strongPrimary, + marginBottom: 10, + }, + infoText: { + fontSize: 12, + color: '#333', + marginBottom: 4, + fontFamily: Platform.OS === 'ios' ? 'Courier' : 'monospace', + }, + trackInfo: { + marginTop: 8, + paddingTop: 8, + borderTopWidth: 1, + borderTopColor: '#ddd', + }, + resultsContainer: { + backgroundColor: '#e6f7ff', + padding: 15, + borderRadius: 8, + marginBottom: 15, + borderWidth: 1, + borderColor: '#91d5ff', + }, + resultsTitle: { + fontSize: 16, + fontWeight: 'bold', + color: ColorPalette.strongPrimary, + marginBottom: 10, + }, + resultsText: { + fontSize: 11, + color: '#333', + fontFamily: Platform.OS === 'ios' ? 'Courier' : 'monospace', + }, + notesContainer: { + backgroundColor: '#fff9e6', + padding: 15, + borderRadius: 8, + borderWidth: 1, + borderColor: '#ffe066', + }, + notesTitle: { + fontSize: 14, + fontWeight: 'bold', + color: '#996600', + marginBottom: 8, + }, + notesText: { + fontSize: 12, + color: '#664400', + marginBottom: 4, + }, +}); diff --git a/apps/computer-vision/package.json b/apps/computer-vision/package.json index a597992955..5306b15a53 100644 --- a/apps/computer-vision/package.json +++ b/apps/computer-vision/package.json @@ -28,6 +28,7 @@ "react-native-device-info": "^15.0.2", "react-native-executorch": "workspace:*", "react-native-executorch-expo-resource-fetcher": "workspace:*", + "react-native-executorch-webrtc": "workspace:*", "react-native-gesture-handler": "~2.28.0", "react-native-image-picker": "^7.2.2", "react-native-loading-spinner-overlay": "^3.0.1", @@ -39,6 +40,7 @@ "react-native-svg": "15.15.3", "react-native-svg-transformer": "^1.5.3", "react-native-vision-camera": "5.0.0-beta.7", + "react-native-webrtc": "^124.0.7", "react-native-worklets": "0.7.4" }, "devDependencies": { diff --git a/packages/react-native-executorch-webrtc/README.md b/packages/react-native-executorch-webrtc/README.md new file mode 100644 index 0000000000..0af5dfe382 --- /dev/null +++ b/packages/react-native-executorch-webrtc/README.md @@ -0,0 +1,120 @@ +# react-native-executorch-webrtc + +ExecuTorch frame processor integration for react-native-webrtc. + +Process WebRTC camera frames with ExecuTorch vision models in real-time. + +## Installation + +```bash +yarn add react-native-executorch-webrtc +``` + +**That's it!** The package auto-registers via React Native autolinking. No native code setup needed. + +### Platform Support + +- ✅ Android (auto-configured) +- 🚧 iOS (coming soon) + +## Usage + +### Basic Usage + +Just import and use the hook - everything auto-registers: + +```typescript +import { useWebRTCFrameProcessor } from 'react-native-executorch-webrtc'; +import { RTCView, mediaDevices } from 'react-native-webrtc'; + +function WebRTCCamera() { + const [stream, setStream] = useState(null); + + // Enable ExecuTorch frame processing + useWebRTCFrameProcessor(stream); + + useEffect(() => { + async function startCamera() { + const mediaStream = await mediaDevices.getUserMedia({ + video: true, + audio: false, + }); + setStream(mediaStream); + } + startCamera(); + }, []); + + return ; +} +``` + +### Manual Control + +```typescript +import { + enableFrameProcessor, + disableFrameProcessor, +} from 'react-native-executorch-webrtc'; + +// Enable processing +const videoTrack = stream.getVideoTracks()[0]; +enableFrameProcessor(videoTrack); + +// Disable processing +disableFrameProcessor(videoTrack); +``` + +## API + +### `useWebRTCFrameProcessor(stream, enabled?)` + +React hook that automatically enables/disables frame processing. + +**Parameters:** +- `stream`: `MediaStream | null | undefined` - The WebRTC media stream +- `enabled`: `boolean` (optional, default: `true`) - Whether to enable processing + +### `enableFrameProcessor(videoTrack)` + +Manually enable frame processing on a video track. + +**Parameters:** +- `videoTrack`: `MediaStreamTrack` - The video track to process + +### `disableFrameProcessor(videoTrack)` + +Manually disable frame processing on a video track. + +**Parameters:** +- `videoTrack`: `MediaStreamTrack` - The video track to stop processing + +## Current Status + +**✅ Implemented:** +- Android frame processor registration +- Frame info logging (FPS, resolution, etc.) +- TypeScript API and hooks + +**🚧 Coming Soon:** +- ExecuTorch model integration +- Object detection on frames +- Segmentation on frames +- iOS support +- Result callbacks to JavaScript + +## How It Works + +1. The package registers a `VideoFrameProcessor` with react-native-webrtc +2. When enabled, every camera frame passes through the processor +3. The processor can run ExecuTorch models on each frame +4. Results are sent back to JavaScript (coming soon) + +Currently, the processor logs frame information for debugging. Model inference integration is next. + +## Example + +See `apps/computer-vision` in the repo for a complete example. + +## License + +MIT diff --git a/packages/react-native-executorch-webrtc/android/CMakeLists.txt b/packages/react-native-executorch-webrtc/android/CMakeLists.txt new file mode 100644 index 0000000000..81b0205e13 --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/CMakeLists.txt @@ -0,0 +1,83 @@ +cmake_minimum_required(VERSION 3.13) +project(react-native-executorch-webrtc) + +set(CMAKE_VERBOSE_MAKEFILE ON) +set(CMAKE_CXX_STANDARD 20) + +# Resolve React Native directory +if(NOT DEFINED REACT_NATIVE_DIR) + # Try to find it via node + execute_process( + COMMAND node --print "require.resolve('react-native/package.json')" + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE REACT_NATIVE_PACKAGE + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + get_filename_component(REACT_NATIVE_DIR "${REACT_NATIVE_PACKAGE}" DIRECTORY) +endif() + +# Paths to react-native-executorch +set(RN_EXECUTORCH_DIR "${CMAKE_SOURCE_DIR}/../../react-native-executorch") +set(RN_EXECUTORCH_THIRD_PARTY "${RN_EXECUTORCH_DIR}/third-party/include") + +# Add our JNI bridge source files +file(GLOB SOURCES "src/main/cpp/*.cpp") + +add_library( + ${CMAKE_PROJECT_NAME} + SHARED + ${SOURCES} +) + +# Include headers +target_include_directories( + ${CMAKE_PROJECT_NAME} + PRIVATE + "${RN_EXECUTORCH_DIR}/common" + "${RN_EXECUTORCH_THIRD_PARTY}" + "${REACT_NATIVE_DIR}/ReactCommon" + "${REACT_NATIVE_DIR}/ReactCommon/jsi" + "${REACT_NATIVE_DIR}/ReactCommon/callinvoker" + "${REACT_NATIVE_DIR}/ReactAndroid/src/main/jni/react/turbomodule" +) + +# Find prebuilt libraries +find_library(LOG_LIB log) +find_library(ANDROID_LIB android) + +# Find packages +find_package(react-native-executorch REQUIRED CONFIG) + +# Import ExecuTorch library +set(LIBS_DIR "${RN_EXECUTORCH_DIR}/third-party/android/libs") +add_library(executorch SHARED IMPORTED) +set_target_properties(executorch PROPERTIES + IMPORTED_LOCATION "${LIBS_DIR}/executorch/${ANDROID_ABI}/libexecutorch.so") + +# OpenCV libraries +set(OPENCV_LIBS + "${LIBS_DIR}/opencv/${ANDROID_ABI}/libopencv_core.a" + "${LIBS_DIR}/opencv/${ANDROID_ABI}/libopencv_imgproc.a" +) + +# OpenCV third-party libraries (arm64 specific) +if(ANDROID_ABI STREQUAL "arm64-v8a") + set(OPENCV_THIRD_PARTY_LIBS + "${LIBS_DIR}/opencv-third-party/${ANDROID_ABI}/libkleidicv_hal.a" + "${LIBS_DIR}/opencv-third-party/${ANDROID_ABI}/libkleidicv_thread.a" + "${LIBS_DIR}/opencv-third-party/${ANDROID_ABI}/libkleidicv.a" + ) +else() + set(OPENCV_THIRD_PARTY_LIBS "") +endif() + +# Link against libraries +target_link_libraries( + ${CMAKE_PROJECT_NAME} + ${LOG_LIB} + ${ANDROID_LIB} + react-native-executorch::react-native-executorch + ${OPENCV_LIBS} + ${OPENCV_THIRD_PARTY_LIBS} + executorch +) diff --git a/packages/react-native-executorch-webrtc/android/build.gradle b/packages/react-native-executorch-webrtc/android/build.gradle new file mode 100644 index 0000000000..a6f178a3ac --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/build.gradle @@ -0,0 +1,75 @@ +buildscript { + ext.kotlin_version = '1.9.0' + + repositories { + google() + mavenCentral() + } + + dependencies { + classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" + } +} + +apply plugin: 'com.android.library' +apply plugin: 'kotlin-android' + +android { + compileSdkVersion 34 + + buildFeatures { + prefab true + } + + defaultConfig { + minSdkVersion 26 + targetSdkVersion 34 + + externalNativeBuild { + cmake { + cppFlags "-std=c++20" + arguments "-DANDROID_STL=c++_shared" + } + } + + ndk { + // Only build for architectures where OpenCV/ExecuTorch libraries exist + abiFilters 'arm64-v8a' + } + } + + externalNativeBuild { + cmake { + path "CMakeLists.txt" + } + } + + compileOptions { + sourceCompatibility JavaVersion.VERSION_17 + targetCompatibility JavaVersion.VERSION_17 + } + + kotlinOptions { + jvmTarget = '17' + } +} + +repositories { + google() + mavenCentral() +} + +dependencies { + implementation "org.jetbrains.kotlin:kotlin-stdlib:$kotlin_version" + implementation 'com.facebook.react:react-native:+' + + // WebRTC classes - provided by app via autolinking + if (findProject(':react-native-webrtc') != null) { + compileOnly project(':react-native-webrtc') + } + + // ExecuTorch for vision model processing + if (findProject(':react-native-executorch') != null) { + implementation project(':react-native-executorch') + } +} diff --git a/packages/react-native-executorch-webrtc/android/src/main/AndroidManifest.xml b/packages/react-native-executorch-webrtc/android/src/main/AndroidManifest.xml new file mode 100644 index 0000000000..e68f8b269a --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/src/main/AndroidManifest.xml @@ -0,0 +1,3 @@ + + diff --git a/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp b/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp new file mode 100644 index 0000000000..c8bd503879 --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp @@ -0,0 +1,461 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define LOG_TAG "ExecutorchWebRTC-JNI" +#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__) +#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__) + +// Global model instance +static std::unique_ptr g_model = nullptr; +static bool g_modelLoaded = false; +static std::string g_modelPath; + +// Model input dimensions (dynamically read from model) +static int g_modelHeight = 256; +static int g_modelWidth = 256; +static bool g_buffersInitialized = false; + +// Pre-allocated buffers (resized dynamically based on model input) +static std::vector g_inputData; +static cv::Mat g_resizedRgb; +static cv::Mat g_floatMat; + +// Debug logging rate limiter +static long long g_lastDebugLogTime = 0; + +extern "C" { + +/** + * Reallocate buffers based on model input dimensions + */ +static void reallocateBuffers(int height, int width) { + g_modelHeight = height; + g_modelWidth = width; + g_inputData.resize(1 * 3 * height * width); + g_resizedRgb = cv::Mat(height, width, CV_8UC3); + g_floatMat = cv::Mat(height, width, CV_32FC3); + g_buffersInitialized = true; + LOGD("Buffers reallocated for model size: %dx%d", width, height); +} + +/** + * Ensure buffers are initialized (called before first frame if needed) + */ +static void ensureBuffersInitialized() { + if (!g_buffersInitialized) { + reallocateBuffers(256, 256); + } +} + +/** + * Load the segmentation model + */ +JNIEXPORT jboolean JNICALL +Java_com_executorch_webrtc_ExecutorchFrameProcessor_loadModel( + JNIEnv *env, jobject thiz, jstring modelPath) { + const char *pathChars = env->GetStringUTFChars(modelPath, nullptr); + if (pathChars == nullptr) { + LOGE("Failed to get model path string"); + return JNI_FALSE; + } + + g_modelPath = std::string(pathChars); + env->ReleaseStringUTFChars(modelPath, pathChars); + + LOGD("Loading ExecuTorch model from: %s", g_modelPath.c_str()); + + try { + g_model = std::make_unique( + g_modelPath, + executorch::extension::Module::LoadMode::MmapUseMlockIgnoreErrors); + + // Get model input shape to determine expected dimensions + auto methodMeta = g_model->method_meta("forward"); + if (methodMeta.ok()) { + auto inputMeta = methodMeta->input_tensor_meta(0); + if (inputMeta.ok()) { + auto sizes = inputMeta->sizes(); + // Expected shape: [1, 3, H, W] (NCHW format) + if (sizes.size() >= 4) { + int modelH = static_cast(sizes[sizes.size() - 2]); + int modelW = static_cast(sizes[sizes.size() - 1]); + LOGD("Model input shape detected: [1, 3, %d, %d]", modelH, modelW); + reallocateBuffers(modelH, modelW); + } else if (sizes.size() >= 2) { + int modelH = static_cast(sizes[sizes.size() - 2]); + int modelW = static_cast(sizes[sizes.size() - 1]); + LOGD("Model input shape (2D): [%d, %d]", modelH, modelW); + reallocateBuffers(modelH, modelW); + } else { + LOGD("Could not determine model input shape, using default 256x256"); + reallocateBuffers(256, 256); + } + } else { + LOGD("Could not get input tensor meta, using default 256x256"); + reallocateBuffers(256, 256); + } + } else { + LOGD("Could not get method meta, using default 256x256"); + reallocateBuffers(256, 256); + } + + g_modelLoaded = true; + LOGD("✅ Model loaded successfully!"); + return JNI_TRUE; + } catch (const std::exception &e) { + LOGE("❌ Failed to load model: %s", e.what()); + g_modelLoaded = false; + reallocateBuffers(256, 256); // Use default size + return JNI_FALSE; + } +} + +/** + * Process I420 frame directly - does segmentation and applies mask in one call. + * This avoids multiple JNI crossings and RGB conversions in Kotlin. + * + * @param yData Y plane data + * @param uData U plane data + * @param vData V plane data + * @param width Frame width + * @param height Frame height + * @param yStride Y plane stride + * @param uvStride U/V plane stride + * @param rotation Frame rotation in degrees (0, 90, 180, 270) + * @return Modified Y plane with background blacked out (or null on error) + */ +JNIEXPORT jbyteArray JNICALL +Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( + JNIEnv *env, jobject thiz, jbyteArray yData, jbyteArray uData, + jbyteArray vData, jint width, jint height, jint yStride, jint uvStride, + jint rotation) { + // Ensure buffers are initialized + ensureBuffersInitialized(); + + // Get input buffers and their actual sizes + jsize yDataSize = env->GetArrayLength(yData); + jsize uDataSize = env->GetArrayLength(uData); + jsize vDataSize = env->GetArrayLength(vData); + + jbyte *yPtr = env->GetByteArrayElements(yData, nullptr); + jbyte *uPtr = env->GetByteArrayElements(uData, nullptr); + jbyte *vPtr = env->GetByteArrayElements(vData, nullptr); + + if (!yPtr || !uPtr || !vPtr) { + LOGE("Failed to get buffer pointers"); + if (yPtr) + env->ReleaseByteArrayElements(yData, yPtr, JNI_ABORT); + if (uPtr) + env->ReleaseByteArrayElements(uData, uPtr, JNI_ABORT); + if (vPtr) + env->ReleaseByteArrayElements(vData, vPtr, JNI_ABORT); + return nullptr; + } + + // Determine actual stride based on buffer sizes + // If buffer is smaller than stride * height, the actual stride is width (no + // padding) + int actualYStride = (yDataSize >= yStride * height) ? yStride : width; + int actualUVStride = + (uDataSize >= uvStride * (height / 2)) ? uvStride : (width / 2); + + // Rate-limited logging of buffer info + static long long lastBufferLogTime = 0; + auto now = std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count(); + if (now - lastBufferLogTime > 2000) { + LOGD("Buffer sizes: Y=%d (expected %d), U=%d, V=%d, actualYStride=%d, " + "actualUVStride=%d", + yDataSize, yStride * height, uDataSize, vDataSize, actualYStride, + actualUVStride); + lastBufferLogTime = now; + } + + // Create output Y buffer with actual stride + jbyteArray outYData = env->NewByteArray(actualYStride * height); + if (!outYData) { + env->ReleaseByteArrayElements(yData, yPtr, JNI_ABORT); + env->ReleaseByteArrayElements(uData, uPtr, JNI_ABORT); + env->ReleaseByteArrayElements(vData, vPtr, JNI_ABORT); + return nullptr; + } + + // Merge I420 to single buffer for cvtColor + // Create the combined I420 buffer (Y plane followed by U and V planes) + cv::Mat i420(height * 3 / 2, width, CV_8UC1); + + // Copy Y plane row by row (handle stride correctly) + uint8_t *ySrc = reinterpret_cast(yPtr); + for (int row = 0; row < height; row++) { + memcpy(i420.ptr(row), ySrc + row * actualYStride, width); + } + + // Copy U and V planes + uint8_t *uSrc = reinterpret_cast(uPtr); + uint8_t *vSrc = reinterpret_cast(vPtr); + uint8_t *uvDst = i420.ptr(height); + int uvWidth = width / 2; + int uvHeight = height / 2; + + for (int row = 0; row < uvHeight; row++) { + memcpy(uvDst + row * uvWidth, uSrc + row * actualUVStride, uvWidth); + } + for (int row = 0; row < uvHeight; row++) { + memcpy(uvDst + uvHeight * uvWidth + row * uvWidth, + vSrc + row * actualUVStride, uvWidth); + } + + // Convert to RGB + cv::Mat rgbFull; + cv::cvtColor(i420, rgbFull, cv::COLOR_YUV2RGB_I420); + + // Rotate image to upright for model inference + // Frame rotation tells us how the sensor is rotated, so we rotate opposite to + // get upright + cv::Mat rgbRotated; + int rotateCode = -1; // -1 means no rotation needed + if (rotation == 90) { + rotateCode = cv::ROTATE_90_CLOCKWISE; + } else if (rotation == 180) { + rotateCode = cv::ROTATE_180; + } else if (rotation == 270) { + rotateCode = cv::ROTATE_90_COUNTERCLOCKWISE; + } + + if (rotateCode >= 0) { + cv::rotate(rgbFull, rgbRotated, rotateCode); + } else { + rgbRotated = rgbFull; + } + + // Resize to model input size (use dynamic dimensions) + cv::resize(rgbRotated, g_resizedRgb, cv::Size(g_modelWidth, g_modelHeight)); + + // Run segmentation + cv::Mat mask; + + if (!g_modelLoaded || !g_model) { + // Rate-limited logging for missing model + auto now = std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count(); + if (now - g_lastDebugLogTime > 1000) { + LOGD("Model not loaded (g_modelLoaded=%d, g_model=%p), using placeholder " + "ellipse mask", + g_modelLoaded ? 1 : 0, g_model.get()); + g_lastDebugLogTime = now; + } + + // Placeholder ellipse mask + mask = cv::Mat(g_modelHeight, g_modelWidth, CV_32FC1); + const float centerY = g_modelHeight / 2.0f; + const float centerX = g_modelWidth / 2.0f; + const float radiusY = g_modelHeight * 0.4f; + const float radiusX = g_modelWidth * 0.35f; + + for (int y = 0; y < g_modelHeight; y++) { + float *row = mask.ptr(y); + for (int x = 0; x < g_modelWidth; x++) { + float dy = (y - centerY) / radiusY; + float dx = (x - centerX) / radiusX; + float dist = dx * dx + dy * dy; + row[x] = (dist < 1.0f) + ? 1.0f + : ((dist < 1.3f) ? (1.0f - ((dist - 1.0f) / 0.3f)) : 0.0f); + } + } + } else { + // Run ExecuTorch model + g_resizedRgb.convertTo(g_floatMat, CV_32FC3, 1.0 / 255.0); + + // Convert HWC to NCHW + float *inputPtr = g_inputData.data(); + for (int c = 0; c < 3; c++) { + for (int y = 0; y < g_modelHeight; y++) { + const cv::Vec3f *row = g_floatMat.ptr(y); + for (int x = 0; x < g_modelWidth; x++) { + *inputPtr++ = row[x][c]; + } + } + } + + std::vector shape = {1, 3, g_modelHeight, + g_modelWidth}; + auto inputTensor = executorch::extension::from_blob( + g_inputData.data(), shape, executorch::aten::ScalarType::Float); + + std::vector inputs = {inputTensor}; + auto result = g_model->forward(inputs); + + if (result.ok() && !result.get().empty()) { + auto outputTensor = result.get()[0].toTensor(); + const float *outputData = outputTensor.const_data_ptr(); + + // Get output tensor dimensions + int outputH = g_modelHeight; + int outputW = g_modelWidth; + if (outputTensor.dim() >= 2) { + outputH = static_cast(outputTensor.size(outputTensor.dim() - 2)); + outputW = static_cast(outputTensor.size(outputTensor.dim() - 1)); + } + + // Rate-limited debug logging (once per second) + auto now = std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count(); + if (now - g_lastDebugLogTime > 1000) { + // Log tensor info + LOGD("Model output: dim=%zd, outputH=%d, outputW=%d, numel=%zd", + (ssize_t)outputTensor.dim(), outputH, outputW, + (ssize_t)outputTensor.numel()); + + // Sample output values to understand the range + ssize_t totalElements = outputTensor.numel(); + float minVal = outputData[0], maxVal = outputData[0], sum = 0; + for (ssize_t i = 0; i < totalElements; i++) { + float v = outputData[i]; + if (v < minVal) + minVal = v; + if (v > maxVal) + maxVal = v; + sum += v; + } + float mean = sum / totalElements; + LOGD("Output stats: min=%.4f, max=%.4f, mean=%.4f", minVal, maxVal, + mean); + + // Log first few values + int numSamples = (totalElements < 10) ? (int)totalElements : 10; + std::string samples = "First values: "; + for (int i = 0; i < numSamples; i++) { + samples += std::to_string(outputData[i]) + " "; + } + LOGD("%s", samples.c_str()); + + g_lastDebugLogTime = now; + } + + mask = + cv::Mat(outputH, outputW, CV_32FC1, const_cast(outputData)) + .clone(); + } else { + // Fallback - keep everything + LOGE("Model forward FAILED! result.ok()=%d, result.get().empty()=%d", + result.ok() ? 1 : 0, + result.ok() ? (result.get().empty() ? 1 : 0) : -1); + mask = cv::Mat::ones(g_modelHeight, g_modelWidth, CV_32FC1); + } + } + + // Resize mask to rotated frame size, then rotate back to original orientation + cv::Mat fullMask; + if (rotation == 90 || rotation == 270) { + // For 90/270 rotation, the rotated image had swapped dimensions + cv::Mat rotatedMask; + cv::resize(mask, rotatedMask, cv::Size(height, width), 0, 0, + cv::INTER_LINEAR); // Note: swapped w/h + + // Rotate mask back to original frame orientation (inverse of what we did to + // the image) + int inverseRotateCode = (rotation == 90) ? cv::ROTATE_90_COUNTERCLOCKWISE + : cv::ROTATE_90_CLOCKWISE; + cv::rotate(rotatedMask, fullMask, inverseRotateCode); + } else if (rotation == 180) { + cv::resize(mask, fullMask, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); + cv::rotate(fullMask, fullMask, cv::ROTATE_180); + } else { + // No rotation + cv::resize(mask, fullMask, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); + } + + // Debug: log mask statistics after resize (rate-limited) + { + auto now = std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count(); + static long long lastMaskLogTime = 0; + if (now - lastMaskLogTime > 1000) { + double minVal, maxVal; + cv::minMaxLoc(fullMask, &minVal, &maxVal); + cv::Scalar meanVal = cv::mean(fullMask); + LOGD("Resized mask stats: size=%dx%d, min=%.4f, max=%.4f, mean=%.4f", + fullMask.cols, fullMask.rows, minVal, maxVal, meanVal[0]); + lastMaskLogTime = now; + } + } + + // Apply smoothstep to mask using OpenCV (vectorized/SIMD optimized) + // smoothstep: values < 0.3 → 0, values > 0.7 → 1, smooth transition in + // between + const float lowThresh = 0.3f; + const float highThresh = 0.7f; + cv::Mat t; + cv::subtract(fullMask, lowThresh, t); + cv::multiply(t, 1.0f / (highThresh - lowThresh), t); + cv::min(t, 1.0f, t); + cv::max(t, 0.0f, t); + // smoothstep: t*t*(3 - 2*t) + cv::Mat t2, smoothMask; + cv::multiply(t, t, t2); + cv::multiply(t, -2.0f, smoothMask); + cv::add(smoothMask, 3.0f, smoothMask); + cv::multiply(t2, smoothMask, fullMask); + + // Apply mask directly to Y plane (black = Y value of 16) + std::vector outY(actualYStride * height); + const uint8_t BLACK_Y = 16; // Black in YUV + + for (int row = 0; row < height; row++) { + const uint8_t *srcY = ySrc + row * actualYStride; + const float *maskRow = fullMask.ptr(row); + uint8_t *dstY = outY.data() + row * actualYStride; + + for (int col = 0; col < width; col++) { + float prob = maskRow[col]; + dstY[col] = + static_cast(BLACK_Y * (1.0f - prob) + srcY[col] * prob); + } + // Copy stride padding if any + if (actualYStride > width) { + memcpy(dstY + width, srcY + width, actualYStride - width); + } + } + + env->SetByteArrayRegion(outYData, 0, actualYStride * height, + reinterpret_cast(outY.data())); + + env->ReleaseByteArrayElements(yData, yPtr, JNI_ABORT); + env->ReleaseByteArrayElements(uData, uPtr, JNI_ABORT); + env->ReleaseByteArrayElements(vData, vPtr, JNI_ABORT); + + return outYData; +} + +// Keep old method for compatibility but mark deprecated +JNIEXPORT jfloatArray JNICALL +Java_com_executorch_webrtc_ExecutorchFrameProcessor_runSegmentation( + JNIEnv *env, jobject thiz, jbyteArray rgbData, jint width, jint height) { + LOGD("runSegmentation called (deprecated path): %dx%d", width, height); + + const int maskSize = width * height; + jfloatArray result = env->NewFloatArray(maskSize); + if (!result) + return nullptr; + + std::vector mask(maskSize, 0.5f); + env->SetFloatArrayRegion(result, 0, maskSize, mask.data()); + return result; +} + +} // extern "C" diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt new file mode 100644 index 0000000000..df5fe039ad --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt @@ -0,0 +1,234 @@ +package com.executorch.webrtc + +import android.util.Log +import com.oney.WebRTCModule.videoEffects.VideoFrameProcessor +import org.webrtc.SurfaceTextureHelper +import org.webrtc.VideoFrame +import org.webrtc.VideoFrame.I420Buffer +import java.nio.ByteBuffer + +/** + * WebRTC frame processor that applies background blur using ExecuTorch segmentation. + */ +class ExecutorchFrameProcessor : VideoFrameProcessor { + private var frameCount = 0 + private var lastLogTime = System.currentTimeMillis() + private var lastProcessTime = System.currentTimeMillis() + private val TAG = "ExecutorchFrameProcessor" + + // Model state + private var modelLoaded = false + private var loadedModelPath: String? = null + + init { + Log.d(TAG, "ExecutorchFrameProcessor created - background removal enabled") + tryLoadModel() + } + + /** + * Try to load the model if not already loaded and path is available. + * Called from init and on each frame to handle late model configuration. + */ + private fun tryLoadModel() { + val configuredPath = ExecutorchWebRTC.modelPath + + // Skip if no path configured or already loaded this path + if (configuredPath == null) { + return + } + + if (modelLoaded && loadedModelPath == configuredPath) { + return + } + + try { + Log.d(TAG, "Loading segmentation model from: $configuredPath") + val success = loadModel(configuredPath) + if (success) { + modelLoaded = true + loadedModelPath = configuredPath + Log.d(TAG, "✅ Segmentation model loaded successfully!") + } else { + Log.e(TAG, "❌ loadModel returned false") + } + } catch (e: Exception) { + Log.e(TAG, "❌ Failed to load model: $configuredPath", e) + } + } + + /** + * Load the segmentation model + */ + private external fun loadModel(modelPath: String): Boolean + + /** + * Process I420 frame directly in native code - much faster than the old path. + * Does segmentation and mask application in one JNI call. + * @return Modified Y plane with background blacked out + */ + private external fun processI420Frame( + yData: ByteArray, + uData: ByteArray, + vData: ByteArray, + width: Int, + height: Int, + yStride: Int, + uvStride: Int, + rotation: Int + ): ByteArray? + + companion object { + init { + try { + System.loadLibrary("react-native-executorch-webrtc") + } catch (e: Exception) { + Log.e("ExecutorchFrameProcessor", "Failed to load native library", e) + } + } + } + + override fun process(frame: VideoFrame, helper: SurfaceTextureHelper): VideoFrame { + frameCount++ + + // Try to load model if not loaded yet (handles late configuration) + if (!modelLoaded) { + tryLoadModel() + } + + // Log frame info every second + val now = System.currentTimeMillis() + if (now - lastLogTime >= 1000) { + val buffer = frame.buffer + Log.d(TAG, """ + ========== FRAME INFO ========== + Frame count: $frameCount + Size: ${buffer.width}x${buffer.height} + Rotation: ${frame.rotation} degrees + Buffer type: ${buffer.javaClass.simpleName} + FPS: ${frameCount / ((now - lastLogTime) / 1000.0)} + Background Removal: ACTIVE + ================================ + """.trimIndent()) + + lastLogTime = now + frameCount = 0 + } + + // Apply background blur + val blurredFrame = processWithModel(frame) + + if (blurredFrame != null) { + lastProcessTime = now + if (frameCount % 30 == 0) { + Log.d(TAG, "Returning PROCESSED frame (rotation=${blurredFrame.rotation}, timestamp=${blurredFrame.timestampNs})") + } + // Return the blurred frame + return blurredFrame + } + + // Fallback: return original frame if processing failed + if (frameCount % 30 == 0) { + Log.w(TAG, "Returning ORIGINAL frame (processing returned null)") + } + frame.retain() + return frame + } + + private fun processWithModel(frame: VideoFrame): VideoFrame? { + val i420Buffer = frame.buffer.toI420() + if (i420Buffer == null) { + Log.e(TAG, "Failed to convert frame buffer to I420!") + return null + } + + try { + val width = i420Buffer.width + val height = i420Buffer.height + + // Extract Y, U, V planes as byte arrays + val yPlane = i420Buffer.dataY + val uPlane = i420Buffer.dataU + val vPlane = i420Buffer.dataV + val yStride = i420Buffer.strideY + val uStride = i420Buffer.strideU + val vStride = i420Buffer.strideV + + // Calculate sizes - use minimum of calculated size and available bytes + val uvHeight = height / 2 + val yCalcSize = yStride * height + val uCalcSize = uStride * uvHeight + val vCalcSize = vStride * uvHeight + + val yAvail = yPlane.remaining() + val uAvail = uPlane.remaining() + val vAvail = vPlane.remaining() + + val ySize = minOf(yCalcSize, yAvail) + val uSize = minOf(uCalcSize, uAvail) + val vSize = minOf(vCalcSize, vAvail) + + // Log buffer info occasionally for debugging + if (frameCount % 60 == 0) { + Log.d(TAG, "Buffer info: Y=$ySize/$yAvail (stride=$yStride), U=$uSize/$uAvail (stride=$uStride), V=$vSize/$vAvail (stride=$vStride), ${width}x${height}") + } + + val yData = ByteArray(ySize) + val uData = ByteArray(uSize) + val vData = ByteArray(vSize) + + yPlane.get(yData) + uPlane.get(uData) + vPlane.get(vData) + + // Process in native - returns modified Y plane + // Pass rotation so native code can rotate image before model inference + val processedY = processI420Frame(yData, uData, vData, width, height, yStride, uStride, frame.rotation) + + if (processedY == null) { + Log.e(TAG, "processI420Frame returned null!") + i420Buffer.release() + return null + } + + // Calculate actual Y stride from returned data + val actualYStride = processedY.size / height + + // Log success occasionally + if (frameCount % 30 == 0) { + Log.d(TAG, "Frame processed: ${width}x${height}, processedY=${processedY.size}, actualYStride=$actualYStride") + } + + // Create output buffers + // For Y: use processed data with calculated stride + // For U/V: keep original data and strides (we don't modify chroma) + val outYPlane = ByteBuffer.allocateDirect(processedY.size) + val outUPlane = ByteBuffer.allocateDirect(uSize) + val outVPlane = ByteBuffer.allocateDirect(vSize) + + outYPlane.put(processedY) + outUPlane.put(uData) + outVPlane.put(vData) + + outYPlane.rewind() + outUPlane.rewind() + outVPlane.rewind() + + // Use original U/V strides since we're passing through the original chroma data + val resultBuffer = org.webrtc.JavaI420Buffer.wrap( + width, height, + outYPlane, actualYStride, + outUPlane, uStride, + outVPlane, vStride, + null + ) + + i420Buffer.release() + return VideoFrame(resultBuffer, frame.rotation, frame.timestampNs) + } catch (e: Exception) { + Log.e(TAG, "Exception in processWithModel: ${e.message}", e) + i420Buffer.release() + return null + } + } + +} diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessorFactory.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessorFactory.kt new file mode 100644 index 0000000000..ab9954bf56 --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessorFactory.kt @@ -0,0 +1,14 @@ +package com.executorch.webrtc + +import com.oney.WebRTCModule.videoEffects.VideoFrameProcessor +import com.oney.WebRTCModule.videoEffects.VideoFrameProcessorFactoryInterface + +/** + * Factory for creating ExecutorchFrameProcessor instances. + * Required by react-native-webrtc's ProcessorProvider system. + */ +class ExecutorchFrameProcessorFactory : VideoFrameProcessorFactoryInterface { + override fun build(): VideoFrameProcessor { + return ExecutorchFrameProcessor() + } +} diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt new file mode 100644 index 0000000000..abccc0aed9 --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt @@ -0,0 +1,44 @@ +package com.executorch.webrtc + +import android.util.Log +import com.oney.WebRTCModule.videoEffects.ProcessorProvider + +/** + * Main entry point for ExecuTorch WebRTC integration. + * Call registerProcessors() from your Application.onCreate() + */ +object ExecutorchWebRTC { + private const val TAG = "ExecutorchWebRTC" + private const val PROCESSOR_NAME = "executorch" + + // Configuration for background removal + var modelPath: String? = null + + /** + * Registers the ExecuTorch frame processor with react-native-webrtc. + * Call this in your Application.onCreate() method. + */ + fun registerProcessors() { + try { + ProcessorProvider.addProcessor(PROCESSOR_NAME, ExecutorchFrameProcessorFactory()) + Log.d(TAG, "✅ ExecuTorch frame processor registered successfully") + } catch (e: Exception) { + Log.e(TAG, "❌ Failed to register ExecuTorch processor", e) + } + } + + /** + * Configure the segmentation model for background removal + */ + fun configureModel(path: String) { + Log.d(TAG, "📥 configureModel called with path: $path") + modelPath = path + Log.d(TAG, "✅ Model path configured - processors will load model on next frame") + } + + /** + * Gets the processor name to use in JavaScript. + * Use this when calling videoTrack._setVideoEffects(['...']) + */ + fun getProcessorName(): String = PROCESSOR_NAME +} diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt new file mode 100644 index 0000000000..368e1de1d7 --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt @@ -0,0 +1,67 @@ +package com.executorch.webrtc + +import com.facebook.react.bridge.ReactApplicationContext +import com.facebook.react.bridge.ReactContextBaseJavaModule +import com.facebook.react.bridge.ReactMethod +import com.facebook.react.bridge.WritableMap +import com.facebook.react.module.annotations.ReactModule +import com.facebook.react.modules.core.DeviceEventManagerModule + +/** + * Native module that auto-registers the frame processor when loaded. + * This allows the package to work without manual native code setup. + */ +@ReactModule(name = ExecutorchWebRTCModule.NAME) +class ExecutorchWebRTCModule(reactContext: ReactApplicationContext) : + ReactContextBaseJavaModule(reactContext) { + + companion object { + const val NAME = "ExecutorchWebRTC" + private var initialized = false + private var moduleContext: ReactApplicationContext? = null + + /** + * Send event to JavaScript + */ + fun sendEvent(eventName: String, params: WritableMap?) { + moduleContext?.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java) + ?.emit(eventName, params) + } + } + + init { + moduleContext = reactContext + + // Auto-register the processor when the module is loaded + if (!initialized) { + ExecutorchWebRTC.registerProcessors() + initialized = true + } + } + + override fun getName(): String = NAME + + /** + * No-op method just to ensure the module is loaded. + * Called from JS to trigger initialization. + */ + @ReactMethod + fun setup() { + // Module init happens in constructor, this is just a trigger + } + + /** + * Configure the segmentation model for background removal + * @param modelPath Path to the .pte model file + */ + @ReactMethod + fun configureBackgroundRemoval(modelPath: String) { + ExecutorchWebRTC.configureModel(modelPath) + } + + // Legacy alias + @ReactMethod + fun configureBackgroundBlur(modelPath: String, blurIntensity: Int) { + ExecutorchWebRTC.configureModel(modelPath) + } +} diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCPackage.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCPackage.kt new file mode 100644 index 0000000000..265c11e661 --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCPackage.kt @@ -0,0 +1,16 @@ +package com.executorch.webrtc + +import com.facebook.react.ReactPackage +import com.facebook.react.bridge.NativeModule +import com.facebook.react.bridge.ReactApplicationContext +import com.facebook.react.uimanager.ViewManager + +class ExecutorchWebRTCPackage : ReactPackage { + override fun createNativeModules(reactContext: ReactApplicationContext): List { + return listOf(ExecutorchWebRTCModule(reactContext)) + } + + override fun createViewManagers(reactContext: ReactApplicationContext): List> { + return emptyList() + } +} diff --git a/packages/react-native-executorch-webrtc/package.json b/packages/react-native-executorch-webrtc/package.json new file mode 100644 index 0000000000..6abc74ae06 --- /dev/null +++ b/packages/react-native-executorch-webrtc/package.json @@ -0,0 +1,45 @@ +{ + "name": "react-native-executorch-webrtc", + "version": "0.1.0", + "description": "ExecuTorch WebRTC frame processor integration for react-native-webrtc", + "main": "lib/index.js", + "types": "lib/index.d.ts", + "exports": { + ".": { + "import": "./lib/index.js", + "types": "./lib/index.d.ts" + } + }, + "files": [ + "lib", + "android", + "react-native.config.js", + "README.md" + ], + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/software-mansion/react-native-executorch.git", + "directory": "packages/react-native-executorch-webrtc" + }, + "scripts": { + "prepare": "tsc", + "typecheck": "tsc --noEmit", + "lint": "eslint \"**/*.{js,ts,tsx}\"", + "clean": "del-cli lib" + }, + "peerDependencies": { + "react": "*", + "react-native": "*", + "react-native-executorch": "*", + "react-native-webrtc": ">=121.0.0" + }, + "devDependencies": { + "@types/react": "~19.1.10", + "react": "19.1.0", + "react-native": "0.81.5", + "react-native-executorch": "workspace:*", + "react-native-webrtc": "^124.0.7", + "typescript": "~5.9.2" + } +} diff --git a/packages/react-native-executorch-webrtc/react-native.config.js b/packages/react-native-executorch-webrtc/react-native.config.js new file mode 100644 index 0000000000..a8b172a218 --- /dev/null +++ b/packages/react-native-executorch-webrtc/react-native.config.js @@ -0,0 +1,15 @@ +module.exports = { + dependency: { + platforms: { + android: { + sourceDir: './android', + packageImportPath: + 'import com.executorch.webrtc.ExecutorchWebRTCPackage;', + packageInstance: 'new ExecutorchWebRTCPackage()', + }, + ios: { + // iOS support coming soon + }, + }, + }, +}; diff --git a/packages/react-native-executorch-webrtc/src/index.ts b/packages/react-native-executorch-webrtc/src/index.ts new file mode 100644 index 0000000000..ec68702877 --- /dev/null +++ b/packages/react-native-executorch-webrtc/src/index.ts @@ -0,0 +1,63 @@ +/** + * ExecuTorch WebRTC integration + * + * This package provides frame processing integration between + * react-native-executorch and react-native-webrtc. + * + * @packageDocumentation + */ + +import { NativeModules, Platform } from 'react-native'; + +// Auto-initialize the native module to register the processor +// This happens when the package is first imported +if (Platform.OS === 'android') { + const { ExecutorchWebRTC } = NativeModules; + if (ExecutorchWebRTC) { + try { + ExecutorchWebRTC.setup(); + } catch (error) { + console.warn('Failed to initialize ExecutorchWebRTC:', error); + } + } else { + console.warn( + 'ExecutorchWebRTC native module not found - is the package properly linked?' + ); + } +} + +/** + * Configure background removal using semantic segmentation + * @param modelPath Path to the selfie segmentation model (.pte file) + */ +export function configureBackgroundRemoval(modelPath: string): void { + if (Platform.OS !== 'android') { + console.warn( + 'configureBackgroundRemoval: Currently only supported on Android' + ); + return; + } + + const { ExecutorchWebRTC } = NativeModules; + if (ExecutorchWebRTC) { + console.log( + '[ExecutorchWebRTC] Calling configureBackgroundRemoval:', + modelPath + ); + ExecutorchWebRTC.configureBackgroundRemoval(modelPath); + console.log('[ExecutorchWebRTC] configureBackgroundRemoval call completed'); + } else { + console.error( + '[ExecutorchWebRTC] Native module not found! Is the package linked?' + ); + } +} + +// Legacy alias +export const configureBackgroundBlur = configureBackgroundRemoval; + +export { + useWebRTCFrameProcessor, + enableFrameProcessor, + disableFrameProcessor, +} from './useWebRTCFrameProcessor'; diff --git a/packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts b/packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts new file mode 100644 index 0000000000..a303647d6c --- /dev/null +++ b/packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts @@ -0,0 +1,173 @@ +import { useEffect } from 'react'; +import { Platform, DeviceEventEmitter } from 'react-native'; +import type { MediaStream, MediaStreamTrack } from 'react-native-webrtc'; + +const PROCESSOR_NAME = 'executorch'; + +/** + * Result from frame processing + */ +export interface FrameProcessingResult { + result: string; // JSON string with detection results + width: number; + height: number; + timestamp: number; +} + +/** + * Options for frame processor + */ +export interface WebRTCFrameProcessorOptions { + enabled?: boolean; + onResults?: (results: FrameProcessingResult) => void; + // Future options: + // modelPath?: string; + // modelType?: 'object_detection' | 'segmentation' | 'classification'; + // threshold?: number; +} + +/** + * Hook to enable ExecuTorch frame processing on a WebRTC video track. + * + * @param stream - The MediaStream containing the video track to process + * @param options - Configuration options + * + * @example + * ```tsx + * const stream = await mediaDevices.getUserMedia({ video: true }); + * useWebRTCFrameProcessor(stream, { + * onResults: (results) => { + * console.log('Detections:', JSON.parse(results.result)); + * } + * }); + * ``` + */ +export function useWebRTCFrameProcessor( + stream: MediaStream | null | undefined, + options: WebRTCFrameProcessorOptions = {} +): void { + const { enabled = true, onResults } = options; + useEffect(() => { + if (!stream || !enabled) { + return; + } + + // Only supported on Android for now + if (Platform.OS !== 'android') { + console.warn( + 'useWebRTCFrameProcessor: Currently only supported on Android' + ); + return; + } + + const videoTracks = stream.getVideoTracks(); + if (videoTracks.length === 0) { + console.warn('useWebRTCFrameProcessor: No video tracks found in stream'); + return; + } + + const videoTrack = videoTracks[0]; + if (!videoTrack) { + return; + } + + // Set up event listener for results + const subscription = onResults + ? DeviceEventEmitter.addListener( + 'onFrameProcessed', + (event: FrameProcessingResult) => { + onResults(event); + } + ) + : null; + + try { + const track = videoTrack as any; + if (typeof track._setVideoEffects === 'function') { + track._setVideoEffects([PROCESSOR_NAME]); + console.log( + `✅ ExecuTorch frame processor enabled on track ${videoTrack.id}` + ); + } else { + console.warn('useWebRTCFrameProcessor: _setVideoEffects not available'); + } + } catch (error) { + console.error( + 'useWebRTCFrameProcessor: Failed to enable processor:', + error + ); + } + + // Cleanup: disable processor when unmounting + return () => { + subscription?.remove(); + + try { + const track = videoTrack as any; + if (typeof track._setVideoEffects === 'function') { + track._setVideoEffects([]); + console.log( + `ExecuTorch frame processor disabled on track ${videoTrack.id}` + ); + } + } catch (error) { + console.error( + 'useWebRTCFrameProcessor: Failed to disable processor:', + error + ); + } + }; + }, [stream, enabled, onResults]); +} + +/** + * Manually enable ExecuTorch frame processing on a video track. + * + * @param videoTrack - The video track to process + * + * @example + * ```tsx + * const stream = await mediaDevices.getUserMedia({ video: true }); + * const track = stream.getVideoTracks()[0]; + * enableFrameProcessor(track); + * ``` + */ +export function enableFrameProcessor(videoTrack: MediaStreamTrack): void { + if (Platform.OS !== 'android') { + console.warn('enableFrameProcessor: Currently only supported on Android'); + return; + } + + try { + const track = videoTrack as any; + if (typeof track._setVideoEffects === 'function') { + track._setVideoEffects([PROCESSOR_NAME]); + console.log( + `✅ ExecuTorch frame processor enabled on track ${videoTrack.id}` + ); + } + } catch (error) { + console.error('enableFrameProcessor: Failed to enable processor:', error); + throw error; + } +} + +/** + * Manually disable ExecuTorch frame processing on a video track. + * + * @param videoTrack - The video track to stop processing + */ +export function disableFrameProcessor(videoTrack: MediaStreamTrack): void { + try { + const track = videoTrack as any; + if (typeof track._setVideoEffects === 'function') { + track._setVideoEffects([]); + console.log( + `ExecuTorch frame processor disabled on track ${videoTrack.id}` + ); + } + } catch (error) { + console.error('disableFrameProcessor: Failed to disable processor:', error); + throw error; + } +} diff --git a/packages/react-native-executorch-webrtc/tsconfig.json b/packages/react-native-executorch-webrtc/tsconfig.json new file mode 100644 index 0000000000..cadd2509aa --- /dev/null +++ b/packages/react-native-executorch-webrtc/tsconfig.json @@ -0,0 +1,28 @@ +{ + "extends": "../../tsconfig.json", + "compilerOptions": { + "rootDir": "src", + "outDir": "lib", + "declaration": true, + "declarationMap": true, + "tsBuildInfoFile": "./lib/typescript/tsconfig.tsbuildinfo", + "composite": true, + "allowJs": false, + "allowUnreachableCode": false, + "allowUnusedLabels": false, + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "module": "esnext", + "moduleResolution": "node", + "noFallthroughCasesInSwitch": true, + "noImplicitReturns": true, + "noStrictGenericChecks": false, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noUncheckedIndexedAccess": true, + "strict": true, + "types": ["react", "node"] + }, + "include": ["src"], + "exclude": ["node_modules", "lib"] +} diff --git a/yarn.lock b/yarn.lock index a817f061a7..03b6b8ab9a 100644 --- a/yarn.lock +++ b/yarn.lock @@ -6533,7 +6533,7 @@ __metadata: languageName: unknown linkType: soft -"base64-js@npm:^1.2.3, base64-js@npm:^1.3.1, base64-js@npm:^1.5.1": +"base64-js@npm:1.5.1, base64-js@npm:^1.2.3, base64-js@npm:^1.3.1, base64-js@npm:^1.5.1": version: 1.5.1 resolution: "base64-js@npm:1.5.1" checksum: 10/669632eb3745404c2f822a18fc3a0122d2f9a7a13f7fb8b5823ee19d1d2ff9ee5b52c53367176ea4ad093c332fd5ab4bd0ebae5a8e27917a4105a4cfc86b1005 @@ -7268,6 +7268,7 @@ __metadata: react-native-device-info: "npm:^15.0.2" react-native-executorch: "workspace:*" react-native-executorch-expo-resource-fetcher: "workspace:*" + react-native-executorch-webrtc: "workspace:*" react-native-gesture-handler: "npm:~2.28.0" react-native-image-picker: "npm:^7.2.2" react-native-loading-spinner-overlay: "npm:^3.0.1" @@ -7279,6 +7280,7 @@ __metadata: react-native-svg: "npm:15.15.3" react-native-svg-transformer: "npm:^1.5.3" react-native-vision-camera: "npm:5.0.0-beta.7" + react-native-webrtc: "npm:^124.0.7" react-native-worklets: "npm:0.7.4" languageName: unknown linkType: soft @@ -7690,6 +7692,18 @@ __metadata: languageName: node linkType: hard +"debug@npm:4.3.4": + version: 4.3.4 + resolution: "debug@npm:4.3.4" + dependencies: + ms: "npm:2.1.2" + peerDependenciesMeta: + supports-color: + optional: true + checksum: 10/0073c3bcbd9cb7d71dd5f6b55be8701af42df3e56e911186dfa46fac3a5b9eb7ce7f377dd1d3be6db8977221f8eb333d945216f645cf56f6b688cd484837d255 + languageName: node + linkType: hard + "debug@npm:^3.1.0": version: 3.2.7 resolution: "debug@npm:3.2.7" @@ -8655,6 +8669,13 @@ __metadata: languageName: node linkType: hard +"event-target-shim@npm:6.0.2": + version: 6.0.2 + resolution: "event-target-shim@npm:6.0.2" + checksum: 10/aa69fc4193cad3f1e4dc0c2d3f2689ea2d477f5ff2fbee8b65f866035b15658e1985932b06ba2190c3d2cc9cc6802c26facd6c60487590c1a05f44545ec24f42 + languageName: node + linkType: hard + "event-target-shim@npm:^5.0.0": version: 5.0.1 resolution: "event-target-shim@npm:5.0.1" @@ -13287,6 +13308,13 @@ __metadata: languageName: node linkType: hard +"ms@npm:2.1.2": + version: 2.1.2 + resolution: "ms@npm:2.1.2" + checksum: 10/673cdb2c3133eb050c745908d8ce632ed2c02d85640e2edb3ace856a2266a813b30c613569bf3354fdf4ea7d1a1494add3bfa95e2713baa27d0c2c71fc44f58f + languageName: node + linkType: hard + "ms@npm:2.1.3, ms@npm:^2.1.1, ms@npm:^2.1.3": version: 2.1.3 resolution: "ms@npm:2.1.3" @@ -14488,6 +14516,24 @@ __metadata: languageName: unknown linkType: soft +"react-native-executorch-webrtc@workspace:*, react-native-executorch-webrtc@workspace:packages/react-native-executorch-webrtc": + version: 0.0.0-use.local + resolution: "react-native-executorch-webrtc@workspace:packages/react-native-executorch-webrtc" + dependencies: + "@types/react": "npm:~19.1.10" + react: "npm:19.1.0" + react-native: "npm:0.81.5" + react-native-executorch: "workspace:*" + react-native-webrtc: "npm:^124.0.7" + typescript: "npm:~5.9.2" + peerDependencies: + react: "*" + react-native: "*" + react-native-executorch: "*" + react-native-webrtc: ">=121.0.0" + languageName: unknown + linkType: soft + "react-native-executorch@workspace:*, react-native-executorch@workspace:packages/react-native-executorch": version: 0.0.0-use.local resolution: "react-native-executorch@workspace:packages/react-native-executorch" @@ -14730,6 +14776,19 @@ __metadata: languageName: node linkType: hard +"react-native-webrtc@npm:^124.0.7": + version: 124.0.7 + resolution: "react-native-webrtc@npm:124.0.7" + dependencies: + base64-js: "npm:1.5.1" + debug: "npm:4.3.4" + event-target-shim: "npm:6.0.2" + peerDependencies: + react-native: ">=0.60.0" + checksum: 10/eeeb390aaa51d42dac36c846aa2aa143e49d8fb3f4d349f776fdc47e9a4f66b310097f9dd84c17f6fc9f7e563fddc95766e676f74e2333a4f127dbfa43b59ee4 + languageName: node + linkType: hard + "react-native-worklets@npm:0.7.4": version: 0.7.4 resolution: "react-native-worklets@npm:0.7.4" From 384672cb24335a4a2b2b3c4e0e77b66db295105c Mon Sep 17 00:00:00 2001 From: chmjkb Date: Thu, 9 Apr 2026 13:43:43 +0200 Subject: [PATCH 02/19] (wip)i think the blur sort of works --- .../computer-vision/app/webrtc_test/index.tsx | 72 +-- .../src/main/cpp/FrameProcessorBridge.cpp | 32 +- .../webrtc/ExecutorchFrameProcessor.kt | 429 +++++++++--------- .../webrtc/ExecutorchFrameProcessorFactory.kt | 4 +- .../com/executorch/webrtc/ExecutorchWebRTC.kt | 56 +-- .../webrtc/ExecutorchWebRTCModule.kt | 95 ++-- .../webrtc/ExecutorchWebRTCPackage.kt | 8 +- .../src/index.ts | 16 + 8 files changed, 386 insertions(+), 326 deletions(-) diff --git a/apps/computer-vision/app/webrtc_test/index.tsx b/apps/computer-vision/app/webrtc_test/index.tsx index a0406d66e3..48f1648f43 100644 --- a/apps/computer-vision/app/webrtc_test/index.tsx +++ b/apps/computer-vision/app/webrtc_test/index.tsx @@ -6,6 +6,7 @@ import { TouchableOpacity, Alert, Platform, + ScrollView, } from 'react-native'; import { RTCView, @@ -143,35 +144,41 @@ export default function WebRTCTest() { } }; - // Switch between front and back camera using applyConstraints (recommended) + // Switch between front and back camera const switchCamera = async () => { if (!streamRef.current) return; - try { - console.log('Switching camera...'); - const videoTrack = streamRef.current.getVideoTracks()[0]; + const newFacingMode = isFrontCamera ? 'environment' : 'user'; + console.log('Switching camera to:', newFacingMode); - if (videoTrack) { - // Use applyConstraints instead of stopping and restarting - await videoTrack.applyConstraints({ - facingMode: isFrontCamera ? 'environment' : 'user', - }); + // Stop current stream completely + streamRef.current.getTracks().forEach((track) => { + track.stop(); + }); + setStream(null); + streamRef.current = null; - setIsFrontCamera(!isFrontCamera); - console.log('Camera switched successfully'); - } - } catch (error) { - console.error( - 'Error switching camera with applyConstraints, falling back to restart:', - error - ); + // Wait for camera to fully release, then start new stream + await new Promise((resolve) => setTimeout(resolve, 300)); - // Fallback: stop and restart - stopCamera(); + try { + const mediaStream = await mediaDevices.getUserMedia({ + video: { + facingMode: newFacingMode, + frameRate: 30, + width: { ideal: 640 }, + height: { ideal: 480 }, + }, + audio: false, + }); + + setStream(mediaStream); + streamRef.current = mediaStream; setIsFrontCamera(!isFrontCamera); - setTimeout(() => { - startCamera(); - }, 100); + console.log('Camera switched successfully'); + } catch (error) { + console.error('Error switching camera:', error); + setCameraStarted(false); } }; @@ -184,15 +191,11 @@ export default function WebRTCTest() { }; }, []); - // Auto-start camera when facingMode changes - useEffect(() => { - if (cameraStarted && !stream) { - startCamera(); - } - }, [isFrontCamera]); - return ( - + WebRTC Camera Test @@ -348,16 +351,19 @@ export default function WebRTCTest() { ✓ Results sent back to JS via event emitter - + ); } const styles = StyleSheet.create({ container: { flex: 1, - padding: 20, backgroundColor: '#fff', }, + contentContainer: { + padding: 20, + paddingBottom: 40, + }, title: { fontSize: 24, fontWeight: 'bold', @@ -372,7 +378,7 @@ const styles = StyleSheet.create({ }, videoContainer: { width: '100%', - height: 300, + height: 500, backgroundColor: '#000', borderRadius: 12, overflow: 'hidden', diff --git a/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp b/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp index c8bd503879..6f42157084 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp +++ b/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp @@ -412,23 +412,45 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( cv::add(smoothMask, 3.0f, smoothMask); cv::multiply(t2, smoothMask, fullMask); - // Apply mask directly to Y plane (black = Y value of 16) + // Blur the mask edges for smoother blending + cv::GaussianBlur(fullMask, fullMask, cv::Size(15, 15), 0); + + // Create Y plane Mat (packed, no stride padding) + cv::Mat yMat(height, width, CV_8UC1); + for (int row = 0; row < height; row++) { + memcpy(yMat.ptr(row), ySrc + row * actualYStride, width); + } + + // Create blurred Y using downscale-blur-upscale for performance + // Downscale 3x, stack blur (O(1) fast blur), upscale back + cv::Mat ySmall, yBlurredSmall, yBlurred; + int smallW = width / 3; + int smallH = height / 3; + cv::resize(yMat, ySmall, cv::Size(smallW, smallH), 0, 0, cv::INTER_AREA); + cv::stackBlur(ySmall, yBlurredSmall, cv::Size(25, 25)); // O(1) fast blur + cv::resize(yBlurredSmall, yBlurred, cv::Size(width, height), 0, 0, + cv::INTER_LINEAR); + + // Blend: foreground (mask=1) uses original, background (mask=0) uses blurred std::vector outY(actualYStride * height); - const uint8_t BLACK_Y = 16; // Black in YUV for (int row = 0; row < height; row++) { - const uint8_t *srcY = ySrc + row * actualYStride; + const uint8_t *srcY = yMat.ptr(row); + const uint8_t *blurY = yBlurred.ptr(row); const float *maskRow = fullMask.ptr(row); uint8_t *dstY = outY.data() + row * actualYStride; for (int col = 0; col < width; col++) { float prob = maskRow[col]; + // prob=1: foreground (person) = original + // prob=0: background = blurred dstY[col] = - static_cast(BLACK_Y * (1.0f - prob) + srcY[col] * prob); + static_cast(blurY[col] * (1.0f - prob) + srcY[col] * prob); } // Copy stride padding if any if (actualYStride > width) { - memcpy(dstY + width, srcY + width, actualYStride - width); + memcpy(dstY + width, ySrc + row * actualYStride + width, + actualYStride - width); } } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt index df5fe039ad..1e3aec00d2 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt @@ -4,231 +4,244 @@ import android.util.Log import com.oney.WebRTCModule.videoEffects.VideoFrameProcessor import org.webrtc.SurfaceTextureHelper import org.webrtc.VideoFrame -import org.webrtc.VideoFrame.I420Buffer import java.nio.ByteBuffer /** * WebRTC frame processor that applies background blur using ExecuTorch segmentation. */ class ExecutorchFrameProcessor : VideoFrameProcessor { - private var frameCount = 0 - private var lastLogTime = System.currentTimeMillis() - private var lastProcessTime = System.currentTimeMillis() - private val TAG = "ExecutorchFrameProcessor" + private var frameCount = 0 + private var lastLogTime = System.currentTimeMillis() + private var lastProcessTime = System.currentTimeMillis() + private val TAG = "ExecutorchFrameProcessor" - // Model state - private var modelLoaded = false - private var loadedModelPath: String? = null + // Model state + private var modelLoaded = false + private var loadedModelPath: String? = null + companion object { init { - Log.d(TAG, "ExecutorchFrameProcessor created - background removal enabled") - tryLoadModel() + try { + System.loadLibrary("react-native-executorch-webrtc") + } catch (e: Exception) { + Log.e("ExecutorchFrameProcessor", "Failed to load native library", e) + } + } + } + + init { + Log.d(TAG, "ExecutorchFrameProcessor created - background removal enabled") + tryLoadModel() + } + + /** + * Try to load the model if not already loaded and path is available. + * Called from init and on each frame to handle late model configuration. + */ + private fun tryLoadModel() { + val configuredPath = ExecutorchWebRTC.modelPath + + // Skip if no path configured or already loaded this path + if (configuredPath == null) { + return } - /** - * Try to load the model if not already loaded and path is available. - * Called from init and on each frame to handle late model configuration. - */ - private fun tryLoadModel() { - val configuredPath = ExecutorchWebRTC.modelPath - - // Skip if no path configured or already loaded this path - if (configuredPath == null) { - return - } - - if (modelLoaded && loadedModelPath == configuredPath) { - return - } - - try { - Log.d(TAG, "Loading segmentation model from: $configuredPath") - val success = loadModel(configuredPath) - if (success) { - modelLoaded = true - loadedModelPath = configuredPath - Log.d(TAG, "✅ Segmentation model loaded successfully!") - } else { - Log.e(TAG, "❌ loadModel returned false") - } - } catch (e: Exception) { - Log.e(TAG, "❌ Failed to load model: $configuredPath", e) - } + if (modelLoaded && loadedModelPath == configuredPath) { + return } - /** - * Load the segmentation model - */ - private external fun loadModel(modelPath: String): Boolean - - /** - * Process I420 frame directly in native code - much faster than the old path. - * Does segmentation and mask application in one JNI call. - * @return Modified Y plane with background blacked out - */ - private external fun processI420Frame( - yData: ByteArray, - uData: ByteArray, - vData: ByteArray, - width: Int, - height: Int, - yStride: Int, - uvStride: Int, - rotation: Int - ): ByteArray? - - companion object { - init { - try { - System.loadLibrary("react-native-executorch-webrtc") - } catch (e: Exception) { - Log.e("ExecutorchFrameProcessor", "Failed to load native library", e) - } - } + try { + Log.d(TAG, "Loading segmentation model from: $configuredPath") + val success = loadModel(configuredPath) + if (success) { + modelLoaded = true + loadedModelPath = configuredPath + Log.d(TAG, "✅ Segmentation model loaded successfully!") + } else { + Log.e(TAG, "❌ loadModel returned false") + } + } catch (e: Exception) { + Log.e(TAG, "❌ Failed to load model: $configuredPath", e) + } + } + + /** + * Load the segmentation model + */ + private external fun loadModel(modelPath: String): Boolean + + /** + * Process I420 frame directly in native code - much faster than the old path. + * Does segmentation and mask application in one JNI call. + * @return Modified Y plane with background blacked out + */ + private external fun processI420Frame( + yData: ByteArray, + uData: ByteArray, + vData: ByteArray, + width: Int, + height: Int, + yStride: Int, + uvStride: Int, + rotation: Int, + ): ByteArray? + + + override fun process( + frame: VideoFrame, + helper: SurfaceTextureHelper, + ): VideoFrame { + frameCount++ + + // Try to load model if not loaded yet (handles late configuration) + if (!modelLoaded) { + tryLoadModel() } - override fun process(frame: VideoFrame, helper: SurfaceTextureHelper): VideoFrame { - frameCount++ - - // Try to load model if not loaded yet (handles late configuration) - if (!modelLoaded) { - tryLoadModel() - } - - // Log frame info every second - val now = System.currentTimeMillis() - if (now - lastLogTime >= 1000) { - val buffer = frame.buffer - Log.d(TAG, """ - ========== FRAME INFO ========== - Frame count: $frameCount - Size: ${buffer.width}x${buffer.height} - Rotation: ${frame.rotation} degrees - Buffer type: ${buffer.javaClass.simpleName} - FPS: ${frameCount / ((now - lastLogTime) / 1000.0)} - Background Removal: ACTIVE - ================================ - """.trimIndent()) - - lastLogTime = now - frameCount = 0 - } - - // Apply background blur - val blurredFrame = processWithModel(frame) - - if (blurredFrame != null) { - lastProcessTime = now - if (frameCount % 30 == 0) { - Log.d(TAG, "Returning PROCESSED frame (rotation=${blurredFrame.rotation}, timestamp=${blurredFrame.timestampNs})") - } - // Return the blurred frame - return blurredFrame - } - - // Fallback: return original frame if processing failed - if (frameCount % 30 == 0) { - Log.w(TAG, "Returning ORIGINAL frame (processing returned null)") - } - frame.retain() - return frame + // Log frame info every second + val now = System.currentTimeMillis() + if (now - lastLogTime >= 1000) { + val buffer = frame.buffer + Log.d( + TAG, + """ + ========== FRAME INFO ========== + Frame count: $frameCount + Size: ${buffer.width}x${buffer.height} + Rotation: ${frame.rotation} degrees + Buffer type: ${buffer.javaClass.simpleName} + FPS: ${frameCount / ((now - lastLogTime) / 1000.0)} + Background Blur: ACTIVE + ================================ + """.trimIndent(), + ) + + lastLogTime = now + frameCount = 0 } - private fun processWithModel(frame: VideoFrame): VideoFrame? { - val i420Buffer = frame.buffer.toI420() - if (i420Buffer == null) { - Log.e(TAG, "Failed to convert frame buffer to I420!") - return null - } - - try { - val width = i420Buffer.width - val height = i420Buffer.height - - // Extract Y, U, V planes as byte arrays - val yPlane = i420Buffer.dataY - val uPlane = i420Buffer.dataU - val vPlane = i420Buffer.dataV - val yStride = i420Buffer.strideY - val uStride = i420Buffer.strideU - val vStride = i420Buffer.strideV - - // Calculate sizes - use minimum of calculated size and available bytes - val uvHeight = height / 2 - val yCalcSize = yStride * height - val uCalcSize = uStride * uvHeight - val vCalcSize = vStride * uvHeight - - val yAvail = yPlane.remaining() - val uAvail = uPlane.remaining() - val vAvail = vPlane.remaining() - - val ySize = minOf(yCalcSize, yAvail) - val uSize = minOf(uCalcSize, uAvail) - val vSize = minOf(vCalcSize, vAvail) - - // Log buffer info occasionally for debugging - if (frameCount % 60 == 0) { - Log.d(TAG, "Buffer info: Y=$ySize/$yAvail (stride=$yStride), U=$uSize/$uAvail (stride=$uStride), V=$vSize/$vAvail (stride=$vStride), ${width}x${height}") - } - - val yData = ByteArray(ySize) - val uData = ByteArray(uSize) - val vData = ByteArray(vSize) - - yPlane.get(yData) - uPlane.get(uData) - vPlane.get(vData) - - // Process in native - returns modified Y plane - // Pass rotation so native code can rotate image before model inference - val processedY = processI420Frame(yData, uData, vData, width, height, yStride, uStride, frame.rotation) - - if (processedY == null) { - Log.e(TAG, "processI420Frame returned null!") - i420Buffer.release() - return null - } - - // Calculate actual Y stride from returned data - val actualYStride = processedY.size / height - - // Log success occasionally - if (frameCount % 30 == 0) { - Log.d(TAG, "Frame processed: ${width}x${height}, processedY=${processedY.size}, actualYStride=$actualYStride") - } - - // Create output buffers - // For Y: use processed data with calculated stride - // For U/V: keep original data and strides (we don't modify chroma) - val outYPlane = ByteBuffer.allocateDirect(processedY.size) - val outUPlane = ByteBuffer.allocateDirect(uSize) - val outVPlane = ByteBuffer.allocateDirect(vSize) - - outYPlane.put(processedY) - outUPlane.put(uData) - outVPlane.put(vData) - - outYPlane.rewind() - outUPlane.rewind() - outVPlane.rewind() - - // Use original U/V strides since we're passing through the original chroma data - val resultBuffer = org.webrtc.JavaI420Buffer.wrap( - width, height, - outYPlane, actualYStride, - outUPlane, uStride, - outVPlane, vStride, - null - ) - - i420Buffer.release() - return VideoFrame(resultBuffer, frame.rotation, frame.timestampNs) - } catch (e: Exception) { - Log.e(TAG, "Exception in processWithModel: ${e.message}", e) - i420Buffer.release() - return null - } + // Apply background blur + val blurredFrame = processWithModel(frame) + + if (blurredFrame != null) { + lastProcessTime = now + if (frameCount % 30 == 0) { + Log.d(TAG, "Returning PROCESSED frame (rotation=${blurredFrame.rotation}, timestamp=${blurredFrame.timestampNs})") + } + // Return the blurred frame + return blurredFrame } + // Fallback: return original frame if processing failed + if (frameCount % 30 == 0) { + Log.w(TAG, "Returning ORIGINAL frame (processing returned null)") + } + frame.retain() + return frame + } + + private fun processWithModel(frame: VideoFrame): VideoFrame? { + val i420Buffer = frame.buffer.toI420() + if (i420Buffer == null) { + Log.e(TAG, "Failed to convert frame buffer to I420!") + return null + } + + try { + val width = i420Buffer.width + val height = i420Buffer.height + + // Extract Y, U, V planes as byte arrays + val yPlane = i420Buffer.dataY + val uPlane = i420Buffer.dataU + val vPlane = i420Buffer.dataV + val yStride = i420Buffer.strideY + val uStride = i420Buffer.strideU + val vStride = i420Buffer.strideV + + // Calculate sizes - use minimum of calculated size and available bytes + val uvHeight = height / 2 + val yCalcSize = yStride * height + val uCalcSize = uStride * uvHeight + val vCalcSize = vStride * uvHeight + + val yAvail = yPlane.remaining() + val uAvail = uPlane.remaining() + val vAvail = vPlane.remaining() + + val ySize = minOf(yCalcSize, yAvail) + val uSize = minOf(uCalcSize, uAvail) + val vSize = minOf(vCalcSize, vAvail) + + // Log buffer info occasionally for debugging + if (frameCount % 60 == 0) { + Log.d( + TAG, + "Buffer info: Y=$ySize/$yAvail (stride=$yStride), U=$uSize/$uAvail (stride=$uStride), V=$vSize/$vAvail (stride=$vStride), ${width}x$height", + ) + } + + val yData = ByteArray(ySize) + val uData = ByteArray(uSize) + val vData = ByteArray(vSize) + + yPlane.get(yData) + uPlane.get(uData) + vPlane.get(vData) + + // Process in native - returns modified Y plane + // Pass rotation so native code can rotate image before model inference + val processedY = processI420Frame(yData, uData, vData, width, height, yStride, uStride, frame.rotation) + + if (processedY == null) { + Log.e(TAG, "processI420Frame returned null!") + i420Buffer.release() + return null + } + + // Calculate actual Y stride from returned data + val actualYStride = processedY.size / height + + // Log success occasionally + if (frameCount % 30 == 0) { + Log.d(TAG, "Frame processed: ${width}x$height, processedY=${processedY.size}, actualYStride=$actualYStride") + } + + // Create output buffers + // For Y: use processed data with calculated stride + // For U/V: keep original data and strides (we don't modify chroma) + val outYPlane = ByteBuffer.allocateDirect(processedY.size) + val outUPlane = ByteBuffer.allocateDirect(uSize) + val outVPlane = ByteBuffer.allocateDirect(vSize) + + outYPlane.put(processedY) + outUPlane.put(uData) + outVPlane.put(vData) + + outYPlane.rewind() + outUPlane.rewind() + outVPlane.rewind() + + // Use original U/V strides since we're passing through the original chroma data + val resultBuffer = + org.webrtc.JavaI420Buffer.wrap( + width, + height, + outYPlane, + actualYStride, + outUPlane, + uStride, + outVPlane, + vStride, + null, + ) + + i420Buffer.release() + return VideoFrame(resultBuffer, frame.rotation, frame.timestampNs) + } catch (e: Exception) { + Log.e(TAG, "Exception in processWithModel: ${e.message}", e) + i420Buffer.release() + return null + } + } } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessorFactory.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessorFactory.kt index ab9954bf56..30f42a65a8 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessorFactory.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessorFactory.kt @@ -8,7 +8,5 @@ import com.oney.WebRTCModule.videoEffects.VideoFrameProcessorFactoryInterface * Required by react-native-webrtc's ProcessorProvider system. */ class ExecutorchFrameProcessorFactory : VideoFrameProcessorFactoryInterface { - override fun build(): VideoFrameProcessor { - return ExecutorchFrameProcessor() - } + override fun build(): VideoFrameProcessor = ExecutorchFrameProcessor() } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt index abccc0aed9..d9fd2d11db 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt @@ -8,37 +8,37 @@ import com.oney.WebRTCModule.videoEffects.ProcessorProvider * Call registerProcessors() from your Application.onCreate() */ object ExecutorchWebRTC { - private const val TAG = "ExecutorchWebRTC" - private const val PROCESSOR_NAME = "executorch" + private const val TAG = "ExecutorchWebRTC" + private const val PROCESSOR_NAME = "executorch" - // Configuration for background removal - var modelPath: String? = null + // Configuration for background removal + var modelPath: String? = null - /** - * Registers the ExecuTorch frame processor with react-native-webrtc. - * Call this in your Application.onCreate() method. - */ - fun registerProcessors() { - try { - ProcessorProvider.addProcessor(PROCESSOR_NAME, ExecutorchFrameProcessorFactory()) - Log.d(TAG, "✅ ExecuTorch frame processor registered successfully") - } catch (e: Exception) { - Log.e(TAG, "❌ Failed to register ExecuTorch processor", e) - } + /** + * Registers the ExecuTorch frame processor with react-native-webrtc. + * Call this in your Application.onCreate() method. + */ + fun registerProcessors() { + try { + ProcessorProvider.addProcessor(PROCESSOR_NAME, ExecutorchFrameProcessorFactory()) + Log.d(TAG, "✅ ExecuTorch frame processor registered successfully") + } catch (e: Exception) { + Log.e(TAG, "❌ Failed to register ExecuTorch processor", e) } + } - /** - * Configure the segmentation model for background removal - */ - fun configureModel(path: String) { - Log.d(TAG, "📥 configureModel called with path: $path") - modelPath = path - Log.d(TAG, "✅ Model path configured - processors will load model on next frame") - } + /** + * Configure the segmentation model for background removal + */ + fun configureModel(path: String) { + Log.d(TAG, "📥 configureModel called with path: $path") + modelPath = path + Log.d(TAG, "✅ Model path configured - processors will load model on next frame") + } - /** - * Gets the processor name to use in JavaScript. - * Use this when calling videoTrack._setVideoEffects(['...']) - */ - fun getProcessorName(): String = PROCESSOR_NAME + /** + * Gets the processor name to use in JavaScript. + * Use this when calling videoTrack._setVideoEffects(['...']) + */ + fun getProcessorName(): String = PROCESSOR_NAME } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt index 368e1de1d7..51a502bea8 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt @@ -1,5 +1,6 @@ package com.executorch.webrtc +import com.facebook.react.bridge.Promise import com.facebook.react.bridge.ReactApplicationContext import com.facebook.react.bridge.ReactContextBaseJavaModule import com.facebook.react.bridge.ReactMethod @@ -12,56 +13,64 @@ import com.facebook.react.modules.core.DeviceEventManagerModule * This allows the package to work without manual native code setup. */ @ReactModule(name = ExecutorchWebRTCModule.NAME) -class ExecutorchWebRTCModule(reactContext: ReactApplicationContext) : - ReactContextBaseJavaModule(reactContext) { +class ExecutorchWebRTCModule( + reactContext: ReactApplicationContext, +) : ReactContextBaseJavaModule(reactContext) { + companion object { + const val NAME = "ExecutorchWebRTC" + private var initialized = false + private var moduleContext: ReactApplicationContext? = null - companion object { - const val NAME = "ExecutorchWebRTC" - private var initialized = false - private var moduleContext: ReactApplicationContext? = null - - /** - * Send event to JavaScript - */ - fun sendEvent(eventName: String, params: WritableMap?) { - moduleContext?.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java) - ?.emit(eventName, params) - } + /** + * Send event to JavaScript + */ + fun sendEvent( + eventName: String, + params: WritableMap?, + ) { + moduleContext + ?.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java) + ?.emit(eventName, params) } + } - init { - moduleContext = reactContext + init { + moduleContext = reactContext - // Auto-register the processor when the module is loaded - if (!initialized) { - ExecutorchWebRTC.registerProcessors() - initialized = true - } + // Auto-register the processor when the module is loaded + if (!initialized) { + ExecutorchWebRTC.registerProcessors() + initialized = true } + } - override fun getName(): String = NAME + override fun getName(): String = NAME - /** - * No-op method just to ensure the module is loaded. - * Called from JS to trigger initialization. - */ - @ReactMethod - fun setup() { - // Module init happens in constructor, this is just a trigger - } + /** + * No-op method just to ensure the module is loaded. + * Called from JS to trigger initialization. + */ + @ReactMethod + fun setup() { + // Module init happens in constructor, this is just a trigger + } - /** - * Configure the segmentation model for background removal - * @param modelPath Path to the .pte model file - */ - @ReactMethod - fun configureBackgroundRemoval(modelPath: String) { - ExecutorchWebRTC.configureModel(modelPath) - } + /** + * Configure the segmentation model for background removal + * @param modelPath Path to the .pte model file + */ + @ReactMethod + fun configureBackgroundRemoval(modelPath: String) { + ExecutorchWebRTC.configureModel(modelPath) + } + + // Legacy alias + @ReactMethod + fun configureBackgroundBlur( + modelPath: String, + blurIntensity: Int, + ) { + ExecutorchWebRTC.configureModel(modelPath) + } - // Legacy alias - @ReactMethod - fun configureBackgroundBlur(modelPath: String, blurIntensity: Int) { - ExecutorchWebRTC.configureModel(modelPath) - } } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCPackage.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCPackage.kt index 265c11e661..2b63e88243 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCPackage.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCPackage.kt @@ -6,11 +6,7 @@ import com.facebook.react.bridge.ReactApplicationContext import com.facebook.react.uimanager.ViewManager class ExecutorchWebRTCPackage : ReactPackage { - override fun createNativeModules(reactContext: ReactApplicationContext): List { - return listOf(ExecutorchWebRTCModule(reactContext)) - } + override fun createNativeModules(reactContext: ReactApplicationContext): List = listOf(ExecutorchWebRTCModule(reactContext)) - override fun createViewManagers(reactContext: ReactApplicationContext): List> { - return emptyList() - } + override fun createViewManagers(reactContext: ReactApplicationContext): List> = emptyList() } diff --git a/packages/react-native-executorch-webrtc/src/index.ts b/packages/react-native-executorch-webrtc/src/index.ts index ec68702877..0ec095c457 100644 --- a/packages/react-native-executorch-webrtc/src/index.ts +++ b/packages/react-native-executorch-webrtc/src/index.ts @@ -53,6 +53,22 @@ export function configureBackgroundRemoval(modelPath: string): void { } } +/** + * Get the current frame processing FPS + * @returns Promise resolving to current FPS (0 if not processing) + */ +export async function getFps(): Promise { + if (Platform.OS !== 'android') { + return 0; + } + + const { ExecutorchWebRTC } = NativeModules; + if (ExecutorchWebRTC) { + return ExecutorchWebRTC.getFps(); + } + return 0; +} + // Legacy alias export const configureBackgroundBlur = configureBackgroundRemoval; From c1890c39d409abfb1148f9342b20746850d01fd9 Mon Sep 17 00:00:00 2001 From: chmjkb Date: Fri, 10 Apr 2026 10:25:30 +0200 Subject: [PATCH 03/19] (mvp)ios works --- .../android/CMakeLists.txt | 27 +- .../android/build.gradle | 2 + .../android/gradle/wrapper/gradle-wrapper.jar | Bin 0 -> 43739 bytes .../gradle/wrapper/gradle-wrapper.properties | 7 + .../android/gradlew | 251 ++++++++++++++ .../android/gradlew.bat | 94 ++++++ .../src/main/cpp/FrameProcessorBridge.cpp | 295 ++++++----------- .../webrtc/ExecutorchFrameProcessor.kt | 3 +- .../com/executorch/webrtc/ExecutorchWebRTC.kt | 2 +- .../webrtc/ExecutorchWebRTCModule.kt | 2 - .../ios/ExecutorchFrameProcessor.h | 17 + .../ios/ExecutorchFrameProcessor.mm | 305 ++++++++++++++++++ .../ios/ExecutorchWebRTC.h | 5 + .../ios/ExecutorchWebRTC.mm | 44 +++ .../package.json | 2 + .../react-native-executorch-webrtc.podspec | 33 ++ .../react-native.config.js | 4 +- .../src/index.ts | 6 +- .../src/useWebRTCFrameProcessor.ts | 10 +- 19 files changed, 867 insertions(+), 242 deletions(-) create mode 100644 packages/react-native-executorch-webrtc/android/gradle/wrapper/gradle-wrapper.jar create mode 100644 packages/react-native-executorch-webrtc/android/gradle/wrapper/gradle-wrapper.properties create mode 100755 packages/react-native-executorch-webrtc/android/gradlew create mode 100644 packages/react-native-executorch-webrtc/android/gradlew.bat create mode 100644 packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.h create mode 100644 packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.mm create mode 100644 packages/react-native-executorch-webrtc/ios/ExecutorchWebRTC.h create mode 100644 packages/react-native-executorch-webrtc/ios/ExecutorchWebRTC.mm create mode 100644 packages/react-native-executorch-webrtc/react-native-executorch-webrtc.podspec diff --git a/packages/react-native-executorch-webrtc/android/CMakeLists.txt b/packages/react-native-executorch-webrtc/android/CMakeLists.txt index 81b0205e13..71447a0b45 100644 --- a/packages/react-native-executorch-webrtc/android/CMakeLists.txt +++ b/packages/react-native-executorch-webrtc/android/CMakeLists.txt @@ -4,18 +4,6 @@ project(react-native-executorch-webrtc) set(CMAKE_VERBOSE_MAKEFILE ON) set(CMAKE_CXX_STANDARD 20) -# Resolve React Native directory -if(NOT DEFINED REACT_NATIVE_DIR) - # Try to find it via node - execute_process( - COMMAND node --print "require.resolve('react-native/package.json')" - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - OUTPUT_VARIABLE REACT_NATIVE_PACKAGE - OUTPUT_STRIP_TRAILING_WHITESPACE - ) - get_filename_component(REACT_NATIVE_DIR "${REACT_NATIVE_PACKAGE}" DIRECTORY) -endif() - # Paths to react-native-executorch set(RN_EXECUTORCH_DIR "${CMAKE_SOURCE_DIR}/../../react-native-executorch") set(RN_EXECUTORCH_THIRD_PARTY "${RN_EXECUTORCH_DIR}/third-party/include") @@ -29,25 +17,23 @@ add_library( ${SOURCES} ) +# Find packages - ReactAndroid provides JSI headers +find_package(ReactAndroid REQUIRED CONFIG) +find_package(fbjni REQUIRED CONFIG) +find_package(react-native-executorch REQUIRED CONFIG) + # Include headers target_include_directories( ${CMAKE_PROJECT_NAME} PRIVATE "${RN_EXECUTORCH_DIR}/common" "${RN_EXECUTORCH_THIRD_PARTY}" - "${REACT_NATIVE_DIR}/ReactCommon" - "${REACT_NATIVE_DIR}/ReactCommon/jsi" - "${REACT_NATIVE_DIR}/ReactCommon/callinvoker" - "${REACT_NATIVE_DIR}/ReactAndroid/src/main/jni/react/turbomodule" ) # Find prebuilt libraries find_library(LOG_LIB log) find_library(ANDROID_LIB android) -# Find packages -find_package(react-native-executorch REQUIRED CONFIG) - # Import ExecuTorch library set(LIBS_DIR "${RN_EXECUTORCH_DIR}/third-party/android/libs") add_library(executorch SHARED IMPORTED) @@ -76,6 +62,9 @@ target_link_libraries( ${CMAKE_PROJECT_NAME} ${LOG_LIB} ${ANDROID_LIB} + ReactAndroid::jsi + ReactAndroid::reactnative + fbjni::fbjni react-native-executorch::react-native-executorch ${OPENCV_LIBS} ${OPENCV_THIRD_PARTY_LIBS} diff --git a/packages/react-native-executorch-webrtc/android/build.gradle b/packages/react-native-executorch-webrtc/android/build.gradle index a6f178a3ac..88a9cebe65 100644 --- a/packages/react-native-executorch-webrtc/android/build.gradle +++ b/packages/react-native-executorch-webrtc/android/build.gradle @@ -7,6 +7,7 @@ buildscript { } dependencies { + classpath 'com.android.tools.build:gradle:8.2.2' classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" } } @@ -15,6 +16,7 @@ apply plugin: 'com.android.library' apply plugin: 'kotlin-android' android { + namespace 'com.executorch.webrtc' compileSdkVersion 34 buildFeatures { diff --git a/packages/react-native-executorch-webrtc/android/gradle/wrapper/gradle-wrapper.jar b/packages/react-native-executorch-webrtc/android/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..980502d167d3610f88fa03b2f717935189d9fbcf GIT binary patch literal 43739 zcma&OV|1kL)-4>{b~@RPlI`agO}&qLNq0LVAdON+ZYxkG9wHh1Y?(XH82k$p_jmVdm zi@S!-+Tr)-L-!jKecV1e)7tD~6YpNnx1fAPz+2-3F=ehLkP4F%`kuCCA0o^<4|SFz z%JRrA@@qUF$g%QiEtXs#W1M0eU#+=3R?kaJ;AL_)O7q-^4h z3ZyV@;D?*d*3SnJd*`nN`@DeoA-DpvZr&qZ8hr8eC5H1ljV+R&6xCkr`ZTK1}y6(I+AOBpmD*v%HQ zMLQOWbyOT0?xxI%l;5C5%^_xv)%Gs7#m!H5{C5s4gdL>77ZF><13R$%08r2RXB!qL zm)oggrdN*5@9e?7t*3R|H_Q%0%L;z;iw##pPW0TP#20wjkX}U%%KP z;F43x7tGyxpG_~UiA{IXO?CKktzX7|WqMkXXbrIV1*&SS;=@4~%-D9YGl7n?BWk*k zCDuU1+GB~4A_)t_7W2$S(_EwTBWIULqrNfS$JcXs;gp%@nDED_bn~;NkT97~!A31N zGNckrHn>{gKYqwP6H7+|D{lQ>l=Zh|w*%(p@c`QtoDt1P^5R3cAnCnk)5A&YK(l~B0ukD#vSwwsE8y`5XddNYd% zL1&tsuVH7Y)*p0v{0!8Ln4KK&YrSgIM`mfnO~F-_OdwF8i1L_gId;JX5O$J(UwN_m zn+iPv-?1(Tk}Ms|JZA7*ZudW3v(^x__YIEVnKI+)FRAsA!}njzBtz|+FRVZQXfZr) zG63J#h;#G_b%CCIb%eF&0h%$eVZe&4!*3y|yC{>3*iTD&a^F zSpohx{U;{Uz;XaSs^f1|(o$IJpA4kCUWQ~}`GvTx9lw-K=JOi{KABDzez`iShbfz-Bch3PgjEET6RvhOQ67Q3hSna$D(^s7!W**H;_JuVqyB zE%eti3ks+y%tYx_^0Y-E-tBk#8mcOUpZUj~NYu07y=pyuNIo-d-{4>SBLUm(ts3P% zOe`gp+MY7QZjnO%L0k@*&;*^oZ-&21;2PE=3&ie1VZ*;|^+)p9X0`_N2bqXkg$#eA zY|tuN9&5DBBj0@?s5u)_Ft6Tc&2iY1j>!K6%Q~+!CYmD zf!zLeEZ!hEr79*73&7|$<4jhTqnkuXl)RH(S&3MA6>>xVr|(`^RZiu_McSpEdAyH2 z=nC%K$(^6%sM zcxDvX?*Qn|EoaQoCs(_}@huU8mXugwzGEV#+ekRmve+qFp^7~dHo~#c3aYmaqwXYe z6SD867qoY*=?XRX_#DLisQ1boo266>s>Zk$XQW0TH4dMc>z^_zGqc7s<*_>|Up*Ygs1SR$xUx-1!(?r!$(A{oY;Z`EQN=j2V2}079TcMe zzw zHEdYcJW(?BDQ$gdiSa8P94^Y>R4ZgnT(6r6lrFNFT}8hsG?PhY4ZqP{Lrj8|U5L}6 zOvwj1*fPGg-@gA(AY`Gzz%t5tUP-}k{uekvtPJljrXUZHO$(%Qx7nM{St7$lrc{Zd9>=q0SWfmTfu7LI$R2W0b?50c}3KRkm1NnGN{NwFhM37xfym+#N|G+l4;{`Crop=P^ZeXt z0xGZ8qgTIN8e5=m?%t}pfW3Y_f7z(cJJ>xs2s?NuL=(D9dn{jr|KV$}W9p+*(PM~6 zh+%zwZTNm|=RCHMY7dLsp$YWvy{s}<3A!=vpw0o0d6mW5xgarh@|#rzvrFhY4T(K7 z?WSRdb6dn?9cXD4xsF@;beW8~^wnD}WAG5O@@Rr)Xp{f&it{HLrth>}? zz>l_oI|J;iQh*`(F;uo2n-w&>CX#?KAJg%C)y(fMDOcV8wF@Jr(U_!M`oULpRPd}5 zb}#AR*yObx9^y^yU|PsGh`@ri>#^saV@^s!j$~*$YZlu-gGX>L9ae zpgPr8cD(Jrp}`pkZr~NW+jOeUaOgz*UqpuC=Up4?hsrSJRDP}bs!kW+|obs&#Ucu zTEMG8-Bn}4iT;xgEq7F4-{2zahKs`4+>HSss`?QvkYSK~_q{mDP7x))L{bq0!jCMP zH>nCcmvM)4YlO|ULAJ=sLfr$LVeho}SZ6ggo+AFtVjy|4pz)+>Ts{^!2|zt$mJ(Jv z@VxHfeP=>~e;ke>!4_lk5ie>ihFd^~_q(|qx1v0)3*zy#TR|EUs;0Ss-~=8BsEZs3 zNa5f5MYR9hFUktaNs5UotI)}c{U6VGD?2_WBTY*;120WWH90<2uf#CVynS#pPCG0) zAv-}WNdpXX8fucdU#Ladg8998zmO^z^E(DwA;z^6IAn{+@rwyr$su~lsaG*jig~eV zF@`232L>sbdEqHeK=keb$k)R`LVdp0?izhLRFkjs?;n=&>tXGk%<0XY3{7lI>5XkH z>4oiWZ4K>AWGwAW1)a=YZB6Z5L_Lg69b7E!?dXhc44s|-&nJJpbqoZWS5+~Cn&7>L1h_Ju;iqzu@*oVRq11&os;L`kquX~dp z$N&lwPOoWA^QIqDsA{IjXZ#@Xv1Qz*Du%-4kT|nQqC-50_*(=uGI6U=D?$;xPX`*A zLEI5l9dVost1%-E{Qgy72jBC#e(E3+vKlcCl1NE|@ZBn<5&JRdIWgZ!?qd?g0Q?U- zVB_f=^P)755_qO#GrfV)sCfgLm{{`k#@-_343|As%Ng|MIF#Gd3$l4^JpS;Q@E8ZG zoRq3*jL#Ezh#2Z~7srY1W0M#2Y|I7Cw30`Biyl4PjA=84+-X7vf89V;1}UXqZczBW z2;T+vDqbO8tNCMb7Vt}bLH~*bNH5qH@mIIN;p_bSNRa()B;@~JU%#o6wmhmL(gy-s zY7@0W9+aMAXJe5mEtDEV7ZN?GYV>!cX!?@&u=9XUmUiuY#vA@S#HTVbQVS!W2lprL z`IV4W6urr;^xKKYiS%^+A6@T23{l@hAHBV&sO=lL*xiDyt)en&i_ls7oHJ$*0e9<( zd+C9@T{Yl{V7zP|3QRb?%g|bKd9-$p+(@F8mMM6fG$Y{!T{Q}9W=GIx>Z{M%v}?rz z)7wQ%t-Xzf)WP(+QTgq?h!Rn|De0~0QX^>Xt82gvp(+#B&!HX^wml37YL>kT1x z%S!qWcwy~kDL>UR6>DKo;QH2l($3i2X?+{JXrmPb`Ga-`!u<^!a6q*H4fjJl7V{z! zF$%5rjd(ku*43G$DZkt_Qf&#qz$7z?8GNvB8KPZ?tPJmv14(JOtXbJjmJN=($#tD**>}mSyjKMN6Fk%wdDylGpO2bAX13-zAcRMtF2I4LoNyUMZq5Z3e z^^jDPB-#C(ItmGqcD^lzMpqP$k)!9NRl&VSuG$pSSP&-rAvseFIb-hVZBBUlN{;YL zb1jj$wyCI>Fi!KjT9GBYs#rhHLLxzm=Y|U;23j3GDke5qF^zHp2zKnUf37OBqmQHD zvdf0rT)5a3I^o_b24Psp(jZqgL1yt@$4!Q#jx|J5DD$IfeS$G5>YohehYqL>VArLW z+M0M%Fa;ZOVUCmo2s=#(Wii?G2@LMI>oOs+wubu6b=HRtK0i`~*G)?02=n`|5R$;# zQn3AuE=DkA(7Jag2gAC%`GIRyz}@4(nM|-(x<3{{>|mlPXy|5Fns> z(7%H+^WQ>Q!O+Rs)QMEk%*E8{pRjiR7g|YCK9@rkMB^0>C|a8hgnFW_`u47c!;lCw z2qq~bfy0oG^<-S!K4)s^-ju$P-#;Am1otrw_I;)w@(K{`zJ`L7!E!<7Y<`&Ie3{Pe z?)Uk84f`7e1}--?R!@x&i`DKN))H4bRFz#S6#WT)X)gg+Vh+(p@KwqqFf1^uork4T z*YG?{mY*f{bRAZ7#Db%E3bz<{sFap&QX4i7s+_9i&1>$~f@J;RkcT$JMTaujsYtjT zQYa)j>VeuB@rbIJ79l!L*Z}UuY+5DNT52{&iU z=y8<88bjAtC-GMz~ z?WYme8OXE&18Iw`zJ-6xYEBKYl|M@{W5FIDfr4=5EVc3QAn{_RpKPgm$078%va;pf zBDUBbL4hX!glnP2_>2__^j(51dJ`)7Fj}|aKJ~7B@&`4(LZ}VgskG0<)1X7+US<&( zblpns&t*D1f!=ib;m$vMsH0c5K!ykMV_B=B}P!02Q6&`eve^ z(3D5l987Q&bE)Fod-EvkHfzlLW$&nj9!ShFXlLQ}$h}T}fp{q`SXYT$#aB`GSDUda ze9~*Ev30643aNVtWed4QeJ`(UHI(m2xn>Sm?XawT;k=b*y@x6@2=0K4nFt}Tv28K0Olb68 z>YQm>noPo?ED7(q21c_qv&jjYJMYeee1z!G_lC3QXRX>=dO*mIPT@tR71HHCj5}^( z$CNJ-AO)~cjivW#3E@gMDib?>1iyAg&!j^b9r_tl8YO+^&>K2rqNz7Q$r0Rl&Jj)mr<>XI9naW; z4mQ`&fOkfFKwRjNQZ`crM(!Kg9>+`50rsI_ucXm{?3`w+IsM9HB>pHbL{r{28U%=$ zW9ap8zuolKbhZ+i!wfK2>%t zv6WkTr_MDcQy}1l?{Rq(?oLd7gx}unZ2w*Uj)oe7b@3(vzgU!1LC84X>y;)+FSx{uwqLOSWw+G2-k$VI-k|HEF zS_$Uuug>gvR3vrWWt6%$v2YJysF>`8p6LEZ=?X4^&`he9TwWi355sW8-0UX zWSHgW0!C!A(i>F3AAPq5Hq^n~XPTo%9iR6hyS?rRr}qfAIdk^NulblIaV2Wz>C@9+ z*S&MM-ZyBsKA#VkfY|mv;pho@+p5nWrt>oR`ekY738WB1hye{L6DOgkr>WQzS~%pb z6Yy1BSqNdOE7VQPK~`h9u9}vD8!22xHo(?04^uerSS1ks_{qyvmLjLUH+ zO*ze(9$VdDv#2nmQY%dJEV)qBJCXq+P8Dg_SJ_hw3YXG!)@@5;=ZHw6Q?*z~dTR_0 zYqn-tp({m{^sO%Cu+w0Qu-F)Bcre?7X_LK!yyo#AN>^$3m~3E;sOd`VRp&}gq)0CC zd4}ig#Bcqu)$?=}R(Fs^koV!}tZrwEIcJIXww94e%c@N*IJ-?5!MPDjJOc_Q>xpjw zlE-Gto0e2Ona)GW#37@lrxcuPI5VtOl)|Z%Xl?XX@%97uI;MMG=Em0WZYbo1iK>)a z>MFyJ@aRyKONq6xOE6^axqXJURvmTgs3MrVaN2ECLE+xo-r0B!`s+o<>s^w7O~mIQhax~z>d z;=zD#FW35k89Nt^Wz2Ij*92#TCKA{S8-}8La;uBHmhI$KJ@9a2lSQr6)wnp#-`BDF zWiN|cluh^aXT;1DVqz67H=Fi}O{jm9uI zGAePT1~BVDfjPnvUHeVU5jTB2w4MXxGB0vuWgW14B`U|cOo}-fw<`zyKu7f<Wru*%T{S?GMci2jVV(i^o6AGM?+jR%fulh>32< zxRE1Jbs*tP^JVQ&G88|eW9O6wrsQl@QLJ@>-s6Es7QLW-T}^k)Ohc;z>oRo%oKy6T zCI?j^rv!$MKkbT`QSfuKUnbrW?)CHvMX#7c1|>^7Y>v_ky&Af63EPUfDP;;?A#=oI z?&!L*PUxz|hdX=^L=;{%s5zNJ)ZCNat_+m}yMMtWt)5r>Ft3lnyqgYO%eZ^ELs z%%-@X5I@FIg8|6K-BO~Jx{A0wANTM%pX>DYvWT{DK7Y$XDdt+%=IH?4V@|_JC;7SK zrKI=P9O+<9xF7%h2bMG`Fk7%PB!Y^p;fW=U{CRrs=oOe+vy6eP35az8s>Tx5&);5# zL_D^?2Ls;~>vF`W-gg`;@Wt<#ZTuj$waI4O;O=+kIXKh%9|9zGTu)irlnNWodzGiJAA_=PNBKZIgni z63TJIBr44)c50k-F}?uwG;iy6XPSk7LJ^P>EXh27^~+CkMqZ^ub{-S_M^Z z@x(h(@xFI2wT5F+lP6}L{u<3GQ{#XpaU|1Rp~)d{ob8(MAy5l0^3v`Ni<3FNHghCO z1)rQ<9CKW}g2avY`MbaFtqom+@v3d2aKtWxbcB}38GJ}q{EAO9kL0}W=|-}D2B~JS zsvd;u?CiOsZF-0wD;<6wVeWn+_+*c8FJ|4_$v0y*)LB9$+IWPT@GVRZf08oyix!K# zSOo>s?tk8?W#&hHlkb0}boxN$)aOqUMWr5#6lM|UR_u-6 zIsXkY{?K^-+mw=bhk&Jb=I9@=#-SmNX#HBSZbjd>Skouau@xAWx`hTo@6PtJ<3-QX z7udgC+ok_S);a_bkP*V)>FIw|@XA^`J6qbB|5H)F+C%?OIaRimpHo2dqXUJ}P6&|e zXKx5}qu*GcZ}p#{nCUkOL=H-@ci+(c)zB=xM$0JX7v9~2m~kxgwvBitjx8^3K20NN zk>q{B>zi|wmE(LdrN8w9sI)vajVuPq+3+slPcZf*5W-NvZ zKcvBiGT2@^s>62&5-sX2=BCoA&myYp!4D>ysPo|7N13MMaY~LnY|k8hCV#s|HLEbEz))ZWGdK71H;wS(fxUKMo?lD)e)?Rdy+^J( z9$n?A8rFgvZFgVZF=AkcBAwh0%k@VH#V~UJ0nt5+LaqNZ%j7OzUo`n=;uba$Kk@cR zq$)R`bdP8TqC85&S$O>6UhxOv4C0WgBet}qPA@w8ks}dR-C+FnekgfZ_q&CPY8)n+ zTvBoE5e#-&OKb|oA-t7X)_eT3MiaJ@*ZPxd8!KFWf_K4DQ}AbEGhP4{t3IH8i~(~3 z zN(@AgbO3E7y%*C2vPFvq-r-z1zco4qELEDNghay;$QNp);0hAOk{n6N%QYT5>R>4T z1^zGm_WSp6%YGTQw7)fM|4}{ozk%y+=w$lu>%kC}FUO{U<%fWq9OH=14y*_Ww6ihQ z0UHz@Cbf`opb;Q_3dwQ}Q?lT8S|#cqM!aT!5`<3^LH*&+Kl8UAHP`R*fU{1d2#@ z=-ZuwM4AzD4gmq7oHe*(sb3kSF;q3Cv=U~utTclRdQk!sDZK`9k+zvtt;O0pWktMF zthD-Yzyc`$(KsX>eS&M8w~$~wk>Ue!7Z#T@LCi5d23hwdCcR%@6q zdDc`8GyYtrxd&>wTr~mT2VEcoj!>y6sxU*-A5i3mV8AyVL0+M*Uon7z!y!+>5UJ`} zhS1pMQ3C#b$|!CztBu2Ae3bscJf^{f@eCB9^JEgNo zxu#>Ci0(G|4y%Essi2Gn^-4c*UpU#_;UpfCm_%B*(am1)dAQ1>Otn zYZ9TmGc6gWl1p?5hO84u2-^uWia1)1jYF(~R?$>JisG1cKovcPH#f0~9#J1;TqwWi zsYF6?@9&U>lq&y1j+v*dC1FYZAnt0vHZ8y<O&1l zP+;O3u8%$|6qhSm*I>z&gW0+@o_1e1#4m%xpnnP%k33`HilA3$;hM*q?~S~_CVL0EK&`FAO9C{i%PfzYSQfq%S!u@2e5oX@ckQ#$)G_Kh? z<#nHP_XFmxcC#ryj&1RED?*+eB2+X5OwZJnK{j|mz;5ohj2p4=dsfuEBtVo0@v|EE zS+(&i+@;1N%T;gm{)t(_?BrojP=_udyXi>7rjh44mX)^a1&L0Rl_S{dnZ?@j&Z(nv zkrzAw{tFPq9LzrXUy}*nFHrpZ>BN83()}kOlwF*@DujQr*se;t^8aY*T5F$LSpt{m zSrmXAMZS*!`<0xNQ3F*I<^o z!fk%R>3q5Jx_7j63AAX);KRecX5TTVz0QN4Q)GW?rd@qndGmiwd(1-Dj6&o-){bdf@keI zSRvLNbrqGWuo=p}f>+lXEA{x~lGy-m1+=?d=6WYTo8O5KsEDz`&!} zBN2KmHK{$%J&EUd6X^qC_$5b@C>A@bVFNFmC58eb?r0de^kG6KtV5{-Npu-(F^J|5 zs`oO$nu!FKBYKE^c4;32KcL|zArzd(%n{NZJMwe0`w!PF3RTSOCjgPlBYpsdAQ74e z%7SkAMe>*Nw1kgz9|_G6N*wFBAz!Q-7S_G0nS|Y(2*dvF)szD-!c;bP?ceBHot!3sPpYNVv{_sz|+ZPWH4X%6QIy!*ZcUygz_hNdP z)wIZ?+2e1lj7sbILODbUygA_cVY^hgh3VZJ2ULB1wGZR(k~e)7IBYm z8DI&iwgp23e|?lY>IGn%FDd!q6G^VTwOn%b7_@GFR4&coC7wb-5&Alr;>An1JE!Hi`InU_2>pwVX&;uR|VHx}oEe<2SRmw@w zq6%@yC4R)@nG1!zGyVwqv)$ciV&>NPh1$?iMjd~`z_db;4gQpb=C_4G;l2YhUtC?P z9<_=%-+2O8x+oW{f)B`FZ1j@;6Q=TujVAw=jrji)RH)in|0m7Ae+-)xk$BUZ&_-cW z?a|TH=bK#G{gJ7$P)QkaaKDC4;SsGHoiwnoGwU1qgZ~^h6&ma!68;WjnxqxQCAEC2 zXLdK6OlNj}{CIiaBlq_lXY%3W@KF3HRc~!12hrA_ue9wf)duK0^AfZh8ax4LDdtcvYO8;o0viukgt=Nq3{{b1U4+dt;*Z$M|4co~paxvt z{;oorbEYF9D$xh`7JQ=9D5pFbs=fa?MEpfkKH^W__3qvHl=GHRzJog{5V?qL_gju9?fFS=yMblbzw6kkJ|>x(v?uCowNn4IIH;@CDRT(6*4I9v)UlU#&z6(_5Zxjf zfZ97qV z&E-FX8}L$T6y=s&Ry-jW^$)L6}`>+)Ss_T^!`j{c^-{(UUJ@E z5PrVh;0PdE!A%kHW*q-O%H3J*sJVL*(8-K(A7pJ;VwJhTZb~UzZu{0wBGaQQ7-f1< z+)y`txS=%=gE;Oqhn{_HMX9>8kWAz|es}L`&07L}c2|9#TbWLVz0M@>I;W!Xy$_|A zu>wUCGk9-S)8z8<^!!x*#E9sF0c;S7ZkbgaRUJGnWA{*J#7Zx-B&8Y0-Evf%e+nIH$B9FzTfc zv{{^HP;Ar4R2h&@V>;VlfI zvJ2+V5Hi@9CNj_}0yh+98-#8U zMG`82E6zUtl{Dr+#@LwDyy2ZU$#C5zUr!r{o61d$fsJ5`P_~I2lo)~5M%rpwW=R!% z@yR0h&H~;!A5b-fKh-3+pA2{L`}V`NPS;5FlFMA0F_ zkZO!}>_MgK%qrWa@w{-Y*h&3hF+(&-cuYq{y;Y@Elh*kZrma4sOp!~cfU3=fe4$At z^E2J<3}%NZ#bMEnf-kh5dz!UTn6;^ZxFt~jdy%?3(MC6CZb-raRNwEEjdB2o(Yirqkb=o}+p9DJ70IZ#h(j62M za@6*tOZ!BI(GM=WV8)0{QsRip^858DLmB^miQ1)L({d>E;5!#wcO2;F0XAQNDY$-O z9+ryu#mamgZNvlsu6lK31g?RBL*g@kZ3%r`3F}UKIO0_g%(US`7#c!&ni#aNN96S( zi{xh*r6amhu~m%0JNNor^W$h6HLj{>%H?c==F3Jr(`ebgRSNcwjMR=8dqk8Ff9E$N zC2Rk8MoNDZ%hXjV{ZRLJPk*t(kvlg_xA9!i852kB8FdZ~Jk3GCzC6bp=ss$%_u2B^ z@}4oG2yUta&C?V3yoC>V1g6AdkNkANr0SJEngXbA8n<39q7KLE3x1cB8{RoKR9F9w zh66Mqe_x{p!)kr-hq=UOcp*!Uw$LFZPB(PTk^P5Hhz;Wtb@HynHw+q)utaJsy}@I9 z?LV!$fA(-6B8(g>jKOp1jZeQ9r+pLS_8Y|OQ^u2=4R3gI&l*)U_%->RgFUzVx&78V zmoaYBaP&nRhyS`BG2RprkWau_0;L5rJ&8xHq6@FuJt(d=(ga(CgqcsDP1dP}+sY?WuT_Zs(^i%3cd~98*r|r~|PoXew`?%(}`(c7v9d0TvDC`ax>5W|(^~(>f|c z80NWeRd*o#O;}C=9e0RRLjDNjI!f~&j?;qVoyg%X(LSaQYq*lZc9Wczp>5pUmXd9( zP!kC#COFqS%ALY!Lojr(>8&`utP24}CTMe$EOZKNP>}m#kRrQIQD;|6c_E2GQRHg~ z2=EKrQ2!lA@p~I_7oM5fo6681f;|*;QBuZmJ(HIxWLTt16A-qFwr#I4`Qh+iHG9xj zS*tvob2E~7I;w~Gb}`q8J)l!AFc>>@?DKm9nVZ^R*0N6P@2`Suoy{N#68Xv!|9%`FFa1Z`bZEZC>e0KA-uGH$X*W^Z^hOl(7= z^xbE{_dHDD3FIjlXKGDO@w-@+o8?)Dzjp4e^_{rOOe?Dvhz3_JF z$Hpi#afmQMamSPkE18)K>ATPEq4*ZPVH^}p30qYhYF$KDu#IvKxhY<+*VKG3%Xc!I z5wm7$4#fcnHr1YA$B0=XeM3}8Jq?Qx%+Px=L-0{!=CP8iCDSDluG!t<`SqD>(0>E zujspe(}ucKDB7LsRHW11gE^75_=QipLi|K+*vEbfcqm>J~M)^?pqH}!)5?6CcF z+0o@Qm2~Lpu#UN9ya_U+{Ybn^zma~Mz`Z2AP69{wLek(&4u5oz2m&bY$T0yS`+N_w zx9#nvZ#6?G^irB&5K&RBTttPOv6;9D{*+Ny>yMT#IAWaVDO0F`QCl?)(X`e0-)d%i z3{6Z-XjBD#)|U99n3{>hELP5X<{X+UeHFh?gYZVChglG~Hz1W-!#o3?6Ij4G17aRj z=$&kPovjaywcFo<;Y+v{p3}dESw~foFc{QV3g{ZILzjlFf#@pb6vl?Y&ZW@fdRLtm z3|CKa;8v)%s}YhoORQp<6poPd?w4D&cM&PCw8)Y#h8>0g59s;uoi zCx-UH#+G0-UX)*mX&0#_L2UF(Qi?&cC1YBM7mZ;$;HEBhsZIdX-90l#u`@C94YYPY50Yll>6k0Z7da7afrFK#pN2oT(Jit;irjxZd0l69Fd1ko(;NTK%u9*uE%>9lv710+Bd z8GK@dIr|J}ufN6dO4ZKK6WgOdXn#=Fc@daCg<|+!xOE6foUjz>u`x=cn~qAKSjc3( z)f~$fi5le%)^MY}gpR)mola2s$f#`Zw7YMD;t%*S!+Oxb(J0WmYr(1A8m~#AZ|%*4 zVbqF5b6K0=3b9#?H+kQ5JtFY_%8zCC)Ez7IB9Y==31zAs-L`4hi!nyNdCG@_jF7lr z-4nRGondo`?aAb;Dn6H3u`IT=I|?#%#=gsUHB72sZ(z23v{d=?CUD$B#}Mt#obE8; zU>2tLvHIq*@3!a*9i#JTny(!*E0Oz}JJX^)fc~b?ug8|hK^EXv4nuCWVeXBcVkhtXhTQ45tJ34Q2h1u+eS?{%?&JBYtJHcPAwWx!uOysW?V zv4a1dApd)++1MLff3=M1zpfeBxH?<9eJxblmsto8rm6}|Bu8o{hBPS2_X4u zqsRy;NdSEOy#t_(L&_*Xf`b*jsmfNR?m9MQvc8|WHdu>)`x-0oPcxH)LB`@em6jPt zl|eo*6nI`vWUhF=IDY~eVB&&wS56+UJ=v@@cvCGslXtAo$P;M6&oXC0%ly#7626-_9bJjfWlCNIR@)xyz_rJF<{m zxf_Q$nyfc<7HV8H{HkneDn%zCY;dnLX=+1C-A#H_7zq>w5*4<3x+`H|=zLZY^u1vf zY8vC%^o9w5n&kbWWkoe!^n38|XjSCMQFge0x@QqPO8+;o9xRKhs#{IuB9q!Qs>W+K zShaNh=_v3b7!J7)72Ndx$$~eyB5mI_tvc^ypmY6?sDn(cX(7lB^Op_g&eWf*3OHFoh}1Nm|Ab0Jj$ONR$yLP zVE85i8CAF~4Z%7SBeAr8MHDPwb>)}VD2Xa*Gi8(?2kmHz9TtsOCUoZG`An6btF%wX z?gDxlx~If>lyrt>);9T|&kREiHDx3)3*qEXxl2IO#frSWkD5D-$6D9QQ6-XM9^CYs zvETTu5miIekCdce0|9EEVIQ4{gz9BK`&2{67*}Gwm-@kV%c?(Q9~t995`Es+Pi$Ba zr6;%3I+7CNK`A#KjeTk4S=g}%-ub6b1e(iAci8$GWOd;l?e1X(eb)y%6J2rvb?05p zb%B;16lr4tn$6e*ZDkQK#p~Ev232dmw@+A0gBow41E}+D)-jBB1bdTMh_N_dKU0wC z3$4umkEzq3s^Veq7iR3a6VyMA%YY^+CuLLgz?c-xS6T0jLC|SZ5)~)@_XsBJN^!GzbY7gEjOWMqG z`G1vR<+DtUv>1{Su54YBYp5^h%mg|~R}Je&n^hPG%7z4?Tb22ooRsQ#MUe>#8{Q^3 zlJTi&Si9>rPWQ)qq-iH56 z>U3}hbNvmhB2Rnyg>@74C1bv@z5NzDj8Gk{G0@}+^)ID3ZMox%{fark-^To3l;Xc) zF7man4Pb5l$OG! zP>EkCpn;YZh7GO=fL9AmMp^tND4IZ4i~Bn0c(%N=DwlxNaW&YN<6(%nwwIHzsAM}k zh?>;&$#Sj(T964ek?QkB9qmWlM?PIY-tKR!fl{wH``i%;D%3C2ZS8EKx7gBT%Z(>9 z)uzwd3BK;q(%*w>}ExCl9P3iAg1WPg?WHT3#j{lG$6L>^6J(mIBGm(=s8o*Ih>+>f{AM6jCU(U-bI4cO}GX1OxPc@%2v8okh>Ka5}ba z8y!38*tUMLZQHhO+qP||W82Q{bH00D&Uf$sVUHSPkG-DOs=caa&6@S_4T%}J18ZO& zZut-GxA1qGh&gPbnAl8s`4CL01>fw!u=_Yvncu6&=mz+bfrv%?w)%Hsyogv~8I9Q9 z{8D+ZxsRHkOX`T>24M#QyBvZ{Q!G1zbK-24uq15JeS1h~4rkVVKUz@5TnniNyWsY( z2%_?dK0|%y%CjP?u7z?~vWI__x{)|U> z`ePflYZ*wf`*y`rj)-hRT)iEdYnVM-g7?3pda)Ma@SRz4DJt-LEAyT_@@)u15uCI7 zsvtoFBE$x)0(Ve7wStMr0s2{%=tR-)d{S~-L8vSc{Db+6 z+-U~f99bLvkBh?lP%6_jx4D=l7;b4e-k-cZRNp~^y@kp1%+HsFTZMYi%vyg-Y;O9u z2DL?BY+SXHA5oN{*t2DR{2UlF$$hCacLvXNXibi(&rT?f7dtszBO!f<)}D0b&CzoA z68}0h#^<_aiJr{?pZHuIcSFV`T4)l#(l-#mW+d$)!R<_WZSq+!^sS#CBFmJBUW@ zk!9kL^~rhen&|aTkZ4}?ukZYyhsLt-IZ^+MVE*H{^BGBRDI#5?IGUlylOe}q?_P46aXe+x(v~Ep$ zyzzVOp-0au1h^-xufU+>yT$kDE@Oqh>05CEy5MX!Yoa3aQmPKRA}MXuSTTr#J8?-X zOWQy_*olV3)Q~hrx_(4wBd(Mb9^>$}n8Wt0xO#Kb(H7jCb3oF}#rAktDpHFn_Lid& zkP57IrdQ^u)!y)g11qe|bS3u|d#8i*b1{L^1z)Dd>nz>Jr!r7b&#&a+3UOaP+;&8%&B4v>>Q zk*O*5gSC}I^;~JK25O!*+m3`%-C`YRQ_)6z7?lYfdTZyJNMpytVFv|&E*>*uQ zKR+t3YY+(pK8D_efo;fmGw@#Wy9;?IF)l&?c6kr~dU0w(1?>0KoE`1K5|uz4_m~9& zCfAQ)ArSt85idpjo`a%f&!l{D=n675Ib*RNC{)HF|43sB$MX{j7)qMLd=&c6ZjQWr z0H4`o{5Z%XihdH&?4lM}H%(dL2eW4Ld`CczB!|0SNY1R;JJ65CGk3P$vwX0m$AsN4 zYXbWxUbca8U>npi-`qJTNZ$2_d!T$^VmihSw7DO!J7}*_affe+kinz8dZxKcxuvG4 zS$qRx3Dm{zAkQuWzP`mT61vNv z!TRYnV&~qrAzE+oFK&t<%5QgB++^_=Oau@?5M>t8)2)nOPnu+GryHBFK_q(+0{kMx z=u{NCs3SHYw^@YqEX>gmgztn!hCle^?+ScuP1v_dEidIBCT)A)dR}@CyjMSV3}^XY zfBQLeOxhjp_UDX%pn*s_rCHWTlti5BiFk`B%atqz8I*UROP@2!It5a+88~R-Do*J+ zg^7huJ1tb1VJjn(*Vc*;2TG8kDF;XS%Ve&Iu38s$iyFrG{>~Oh?8j9Mu!K6&)L!Ob zSEiF)Lb6FKiad?Zf65=xi;7j=qV;EV8}!%+yT`K#V6K{rb`#o?H-OstZ9!R%%8uP~ zR;VecW`N&@DfvP}A}J&|zYn(xe|e%X8HgN`5QHD=82HK!4S8CJkr~cud}<$k?b@uvA6BG7A)?V)sxsH7?m<169U6+g|nEFVRovl*mMRQtyS2; zxK&Bsp3{PtTnix;_e29-F{JHem<=7mKoj3A3NVMPPV*z5YUgF50?qD21+%KPdYR_F!_- zk}hn8VsHXukB=omnixKLB_1n5Z)^vdASKgauBb4?wS(>Ns4gJk-U^^{`sQ zDcF=NbpCyvqe_^MriX4ThrQ<$YG&OiEmAZ;y)d7AqX}hty|+j3z(iM&9#|?(KurQ5 zmH&PhCslAgD>)jp34JyQU8%;A1;iJx8pf)xXX+?a5fK3PFLzFc-aA;>DK$_-Ir{3- z!nk3{a*6_95gkvL>J^tCP)@ukt$%Pb4?f!D`G)Mq+n;h+I61~!V@2F0-PQxYl;OBh zEh-t;7mkt)(zMA}CB%O((OWs+#O23QgFxjhm%9Js6l_@fzz!T_6h|FVb;WaS#~Bw3 zQXxI^CE_4YX>1SJ$eKq8YO|zI!l26bGhq#{8AR!2c~r*P9@B92A!<@?o{;CAval;b zROBocX9xCdMFzToJ=LWL&ggri>8O*pHn0lgk4tT8Xj%K$j#LA-0#@VxkfB>DNV9Tv zqGMVdAA0kx%vV@qxBIara4>Cf8c}+RkN&bvbEe{|f82FZgl`Y(94GfN%rHR;ISOV9y^eg@$7i_i`@po>?rtuvthOxOGEGo) zSF^QkwffM_!2{ALZV@`dbe0st6OZI2YTwMs z0L?64=s$dn8n6=|fWQR)sZaE{C8_|+&^gYctX<)c^6ch_h?U2J8gv%w9&gba@IFL1 z5k}^tcX$EpjTtJFoPaiyQ=Z~`B|s;3SsLOwVKTmgnryqdOQEJi*lk6A#E+Y(ila(| zIo-+NkY*7Y!Z3N<5lcO6b*3*)v6q?(JqrHp zC>;)^xO0yG{;L-^98y20&V+<5->hzyX+X8&7SYPZT*`6MYBnK-RC~mcC$fxcs6F6% zpZT{6{jlG)98BTiBA@B7Bsm+uoTiqqmRr8);pMIgrp=hMnF1Xc^kkBlilv$V zfQpR9BHg-lsiITkBAHl|y@%x3>Z7aT-WMf1%m(@4wjzItkZ`gvi%cy6J@(8BheYwW zVd)U$6Hj*6L*r#1(gdG2=_j$4Mt=Mu=YFkOz}$3P32A%KUwL2@rp8xpQw7EXA>?mD z6S$AGLga5@IX_n9Z|N7YAXOxKW#49QY2T2ZIpq@N^Md3g*MQvu&(;d-k?~r1w_baSp!@lux!pU`pkn^ z;#CinDNCD}@!XbyPgvURAE)a|&7*|hW^AJ=RSm2~+LV=5(U;rY;g#nxhL&IUO|pib zMcp>YoffeN8Ofvb@%_yxY%j!42OYF8bV1=H0M=+bVezrN-t74uaZ_-1tMT1pnHtyv znQK_^Oi7E2b2V`7U#@vZ$mjLX=CER);~Nr1`1QfGC9VDf_*9Mg1e4b4vNt5Z)bx_! zjI~V$sXZcKD7(Z5ZCCQL54lC_YMy^Jy;oChDGUGmx;}M%%{)owKu=hn{B8op?JDRh z{^V>ml301+#UjAi4G(YwX$&zgMh41IH@syM$r;W~ti$$Qg1d9b6iSN-RVFqYNJV%CGZ@cs;v2Pu}_y)yIEB%%gbuBbXNUi@nfT4qOg#83hX6 z*5RW!rJp^u^JV@X#fYd}x0ieO-dF3EFL`#dfmX{ZCYdv&(3GIIwiEc%{|2e{iVp-#pn!m=k^fT_iSGX%y@~y|qK?WxsS9yiYh!6!TT5rV{}IVW z{~uB`&9+aTx`Mp2kqRZ$CLE?j;1FnWpAcM0F)1JG;`Zj%!q>#54IJaW&?m+SXf*jZ zkZ;mmi&@luAOo}G%$DO#yX*1h%dGCNDnp6g?KAVnXVDu;%Rm0rw&$vHy35sbvD$Lv zHkg<`W+)F4JPC|<#=0XR%M_M~r9M@*&qWxE75JPX3?zeiN2fMcRT>wuoT|${XP)IJ zj7TrV^&@e>qi|tKI2=>(62sb&Z<7OJyim3#1oq&x^{Y2}}dufW&QNs(k`V<+}dFJKL`Z}p9l zunwR7pr$9CcM?LsM0N<6G)F*l{oWYTJf4sikM1d^viDrx=ow7s_;>p~^=Kz=x%{UP z{wtTRmVZxL|A%%p{71Bl`ad0>{|$`)7s~lRPEN4~j2EtPGr#FGV`JhKO=c2(v9W|! zr+EVuU0jQnVuspgQ)UwjobgAVv{<^G!uXXZX<}Z# z5T7LncOVN8)m3I(8#AoF$xCqo#Z!-o>_I zk8Q)=e?-_OY1xvuRrAp1k)jlQv#oj8Y8Z z9qr_Cl8mLMocg0k$Xx}x61=GC@~BHh8U@Ci;>v-INJOWY$WfI@GghWYc65*sHHCeW zX9^$EyOdmwI{e0svAa9DO!J(9t*La%7LcKn3Y;gD2M_zxd2i!Uhsvl6WS3h~Qx>6< zQ|2%9l6^4M^7X=(tvJ$E8QY&O9B&y*&h?BsG}xD}pA>n6l`9It*Cvih^;w`ZqPbZe z5kT>agWhBJ2yADL7|%bK>R-K$kt!M?|ST+Zj=z3`2r+h zKaoO~BXM|SmXju7W0%_Nr2-^hEQD=XJe9C)P139V=$a;ksR7U~2nhYT=)xt%gmKb} z6GEYoR(DU(a72`~vt&+6`LL=RXaP_(4TBm=a=}6QXk1e)tHg=SDB~mI#F3Y-sDe+Y zCVk+rs5}N7dXErO0=#us6*Yh4Z86OtgEo`~4O%%_z0{?SA2+~*ZTg_>d2uF5q2J4C zPhf5H*rEAsX-DdvJ*3DB;Yh%Lz&4f}0x(L@$TsM}9g32S5xc@|RL`cJCZ%;WWf5}= z6w-Hqk7N|rDvTkBYNT6k<8(F)uH{R z=%8hfTBgOxuZ-Y{T^G2OI|9G6#|dZ=3_mVAl{>O`=qwbBz*oWIUdo(kx>L)~ngjIR zD|c*-F72+zpU|F-J2K6sZfRvL4Qq_{3jS=O9jnM5xI=n6_iBLY@n>_$@T|qUZ4PW0mJZ&kD;>%q`oS za!|WQr3gwPK({E;&UKY^p9@Eqr0u>5KUs_6Ue4TR)3~AW0c==-*>dxP)V#vECJBN> z!Lm%jfQPmeA4v5vF{bDUAP%)`{(=QeUg+u|!H*hqKKar))7yjvY1GxKRD6uBrFp8P z5UUynExVg@J=1loFAWdHKfdtktKzBRoiDgjeU=dxC|L!%xF29#bjr`Diy6N7x+M(6 znP_WhKJy9wK^PFT{;7)eJ<_vfk7V!AWni^qE9j5wTXmB8ruhrPTr~vS^9T%q_gvkN z&K|O-=QsnY+@?w=t)OGR`4X%Pbiph$OPVu|-x@Z(LbEV7y^+BB_B3hDZr;DvEjiBv zC{OH4pM_P2bho7V>!h|2;wxY<^Fe_3Mu)%W_DhSy*5k*+{65Mh`B91+iA{=RSPIs! z-s@6*=&>ufPkqy7GU+8P>Eoj!-#?^K2|)X5l|2-i$Zh6*OBIBKjE8fBVM)K}zGUNG zbq^61>)5*=A?C-r#C?P6=ST?Y(3dZY`4`24OaKi{@T~A;&M4i3y4DZha9N~`T4{l* z-+n~8`MoeSUm#4_XEud%7;a}#bGJf>0moWS5yz+EqusROIr~H|Ni9XHXG!ZSr`qHh z6=z`j4yJ*r#;VFLjbfCj5j~&UU527;X;uttkrjQ8X#iIn4yIWl!7yo>v0z|N3WnQT zuNqpmnPMO&wQ_Ab2Ud0(uN_)Wg&}A|zcNEu#G>CJ;G^<39$FgWi;XE?3EDGKScSuQ zQS${AS*pe(VS6J)bmOGZ<4e%g)SOHd3@2#R42IGej-gB*=sC>u2(k>{1-l-cW6k)K z=m#N+uwV>Yfx}n0$ZtfO@z_kE-CK}92mi3vy(w~=x$Z7-_>f>Jlf&)sg{BY&6vQ+f z`Yc7Rc|=$<37TE*n32bT<`qV|77e&OL~zCSqIADVh|85ifxWtk3z0q@b-xH454Jcq zX+MG$5KWIYa4;k0j(ZJ=W5A(*^{FRGh&?4cDKmJy$Q53umIp~Eg3bE!0{$7t+U8>Y z#qJ884j#Zpz;ZOR&AGtd1~J+(aGArglh+Xak{NcDrxX>a*!Gx?n5`w?@{t*B2OKA` zDu?g#C=4!a-B@7nlZpiHMo$x+7H`l5i}zI$7PBjoN?nvO5uRM!N(7TE{?x728%$5^ zXDP4OZX>;U@pEoc?G8WL^UZ=K(0C>i69i=6ue;#%viWVZ_MVTA&}64D>`&XSK>T$E zR-%*)7ILs*t$s9QUFI3Q74? zEkm{*Ij$@-gouFblSa(*NIE_1bbl4f-=h2IDEsq14J^J$#tEC0TSbmD^5nmL?1f7XMyN+#O?EZeT!h38>dfjg6#@U{8^eF&ujJ!@7O)Ot^JbCGXl*ouEnS?WN>2*C zG;y6g9nyly$gGDTPf~wFUdq8t{$u zqrP;U+a~^*zS=P^@HtHpp#-ti4U@bizMj%#BZw6koWxmh%b@ZdTsBf6znqsMdqIhU z0amI#IdZvNW!wl1sr+L39sTEvZZV-TU?Qqrkd1i|Nt|k7+Nx-n@ATXMr|kabO->r; zo4|*fxg-&6C0kHyVeDKj;-cg5*2tyrY$XJ9yUOLM_LDvNXMiDf3a1l7pDk&omqf)V zv4fAaz&8`fH+i<~69K9*k{KM~NfMa|kB5#k0-&JEr!jCyjL~!8Mux>4bC`m<#+ZJp zM2Z&N8&%qh9THxY2N!Njvw|9<@7zr5u3{cDMxR6K5W)fM1JJ^@H>V>@GK>glu-#y7 zXOYW5XiY#zHJutzgP`FcE>n~*JVaUT_a9IT3scji>`|so9HL~R%ZkACA)OC5v7U%& zi^)U^d`5TEpw-EfJdu*>TGIdNkRNNPeH0p}T~nQt!YEfv4k|Z-KM#Rq+H_fDsJv9c zZPV5yf(^SX;-cYu@9U4rAiK;V$ok@;qFjb{f~AqtuMZKF%35+bTO-_X=p4j&RxJ7_ zMx%$$G#{~QHVxA*VG)NNjQ{)Sbz&=MaseBxFz1Q4K#Cm#PQ0TW_Q;!=QjV9V@FkLa zUNjRJzFXQtv0PA7%fV0qnoQ~IG0T%kJuB}zAsCu77KYg;Cs+DGe&FR;IOUVe37lPL)h*(n) z6d}!r+|s$9iE*8ux%x06?0H(tR>5xMU(9-H;BYHKQ(tX`A{zV0qLkki@PZha;~>vu zq(L|On$FLbB0+)nMQI>N!rhdN3o6o2!ROd%xc8UPWJ_F}x0)o#@B?nm^>FR*Qub4k z>B_{_5C#L=A@Y5+j44GT#!_rN8D)L+G-gWHE_#yik7p>scrg_95k_a`1-z3;@Rbp zPCsETixkF8sK+)c-i#|1G+dKn4KM#|OGK;mjG*DH!bPH)pU?+V?fE9{Hjj#lUaX$4 zbIh`d4UwJOe0ZEu=&($RiCz3C&{rF~!7YB_7kpt1-u8JRd1Nd(@Y=XR?&TkU1QicG z;>qTidHFb+=t4|Hk6OgE13P~mOE<0iR?o@dkoCzMm!CQLy6@-V%`UAC(IGL#@B3b@ z`U0=#@8y*HYlZB$4D{6kr+MW7u>5-W%ITqqivc?OKv(=j$Vnma)!>P2bu#!^*lCXB z=WFn*Q>@G&%Lg5=_=gV-7@Keqy6Bn4{e9TJ0=a4EhS>H&2y8nxZTd6!aF@WN+hlq;<>(`Y+Iv~1 z8pP7@un|yReI@siJs-#6U~DXX8h&!k+3#$SzsK?Py5~2ouVxH;F<-a2uiyMYzK8NW zSv-R=yKdp!1_`8%)-i^w}`Rm{6fKZ$|g_-BjKhi}T-(`$=yB%9-cMI~}BK%9Q} zbvrIskZ*@?WV`WlrcqTpk64fy6|L5pZ-;Y&*AO=t)}a4}T4T?9-eyeQowuNz63>Sv z>YmkoC5Af(`$U~ZlYeHipb(ef$REY4%rZ*}2w^#QmzI`sL<~OW9`4{X#0-PjHp>=N zLT#I}my>0p6^L)cJHQs@(HsTy`A+NM^VuxXE7tC^-N&`L+EJgI;IG1#5>XqmOCM4Kxj(&kbfTKlbzM-2;ew55p`0)=6xCY?h2<4!=gVU#Z-*ZqA9E z)-~C|6KIxHmTWJ-S*M0N`m>VkEhVHtvnsy)S52KMs7*k!*igjdbw?e=q1q?D02L`@ z_U^3o9CAESm_TVj#tCR{M94BRYwTY%B*h951DcDkoDQm>99lBv*Rz~F_W-%rhsF6Y zf`s?%C;79~htI0y;6fc@*&6zwShBxtkwdE#b@g~xGY{-HCCfzmav6c6t<_1$m?tNN zz^&hTvAq<6vA*&{AA3~S$G-n>As3w8$11y?(-^Q#X&Bg@cq z8CC)Vb&uX$Q%JrzH@;BmM2Z4OXkI)0a`@QD?;y)JzZ_m58H6LwL%e5sn!JN}eco8S zOX(Rn_xR=l4u6F>f6~b5ZMBM2KS-bn5)uhpcasUre6_JqY_L*KSI@XtyiqtRgbbyxho`J}wI5I76bDVPD@ZdZ6ta0n8 z+=|>53*V~Ts;oZIbY1hXak6d6$=uGq7U21(Gq~c|`hk71euCeWW9veYsCCD4+_Mf6=aVU7yo^@AuRt+atj$oRLLDXXNdi3lj^_edQge#kA91HmlhN(SR3vi=a~~;HBudLPJrdBlQs&%a zfan~m2zv`tW?Wh34a9JCcGo1Na08o`;!9xgTIKufadZw}!IwDz1;(-vbgmOk1Dvsd zld7WR8A7n^C*_N-BvsB;#q7r5^K3PxF?T=gEJh0_{KClWmwAg5$ZC5&@l=jpsZ=iA zg2}8}c@)v>=9;6X8MK0@nnIrwYD9pOl<_k$j%7OZOig$ zZ;a;7n-ch!hbE03L9N5qMb$UUC92*(n}_@YYMAT!4B@yx5dVe3j;a6&k)Z(rO;G(O z2%q@>j>1aVI6CQDS^fX0rO|3|URo+>KYxp+J-xj@%sO49UY4D4oU9jZ0#lGi^qi!r zi=&(igWx7D?=yPn#%abUJNyZ$iHRYJ%QoW};v))xUCi^N?ubA_0+jv;I6=ZI{1HJ# zjd!1sX(-WQTm8$xd$Se$;5_@4>-ggJxjP7i9+?chN#AMplps~n8NDmYj9&4q2H{z8 zQ5opcG#h~#V?64mz-_ePwiT5oI#4tYAlZX?&ghR0H)2t^x?v=SYV7G?xQxX1=8H6T zVrOT7rnf0*z9U=z;vE+c0!Qu+u|=vkp|u*8X0{m~VCfi-q7cW3W-#Zd(GO=ZvZ?4% z4n~~gx-{Z3t7#%G>4W9Qw}BmvmLIaZjK%TxHtDKoO|gp-H`*Zv0|QQE$IOfx2}6Qm z&)M$ohvkCi0cJijTc{_F7T`vg9yu_XGPlaN7Ihs`&RZCf5j6q~!DGiiRQEKsgj+jg z8nZL`Cj4Qnh0=gB4MxMDoOH0Sz;H}lN7^XE%|I=kswf)vsTz47PgUJjzL z3@i%nnt{}aI}vRecs#D!-G8IwDr&EeO~gjk>Z&kT_ql*a({Nv@kMH~7P)Eb?;4_QN zJbJj7$SEW?qua{EfPb9tJ-|v4id9r7VbD~ld&Yk+4A(Wv^m(dFK--jz#Yy6|-rn2p zsYbYQ5gF6bVxC_Dmn3lA*I|Sx@RtXXf~9N zMrGzNmk?bn4?4-Nx38${GJ#O6pNTaz(;M2&AcMsoaZ+lC`xNZi^AO3mxlBv`MMjf@ zUW%cginm`=d~6C4s}Hq5-EC=-NPo+NDS8)In0 zS_{@qh@^92LavBdmsLRslt$YQ-=PM^n^?!7JbN&*U}Hr6jM&fl?J?DCKhU7Tda!He zHKjW+iR}~pH;U$DUCMXJ;ajVRvlj&sjv7FAGIkV%_mCK0Yiuuit-XlT`mtDjOwdN2 zE?eSs>K0g7N8n4Ec_l0q#f6CGTaYl5j@CNP7}|H0R%pR8R@8diBFcK6Z51qafI`9c zPfy3s(t5PtBAHo?`|t4I$XoU<_7*nDcIa;ja88%ZZMo%u9iT$+qlq!gIf&QPke54K zhuXgp!I$4b5kK-pWkfEpG@^REysND#0FoVjKoW~#KTFsr2EoHO2x_wpO$w)@%G{vd z#wk5CYAfk_j%*uevB%kVeuUsaHA+T?a(a~Kw&(yB$mpEl*NJro||6@rO}9=m!FZp1p58{aZJO%*nPolTi?;A zN^e1%?l{4UF&2#$E0H4UuTwReJ?$^IW9MXi?7uRlViGIB3YFp53S!1VT!iPH3EnaE zUZ6lM8-H$@p@E$Cxb$GpF{XYdroIQfVKqs0b~J2X|;Kn2?O)Re8*66jV`W zIMfcL{?x!@(qowyXBfl9lN$XaFf-JUHByIuok=<$;{|%(!n`;1 zv}&X8{b?rvudHm--~5SN{%q49Yi9s2jWvk##s5cAKDzzPJ9txqLcjXU7Yzhy%} zT1vkk@*KEXN2o0IasV}k!MenkDvKkiIIv85Z>id>LMq>w2HQ*y?28(NstQ+BYqv@u z3&rd&+^nCc!S>fFDba=EZ$(Jw6>#7SbJps#6}~X6Z{Uq%2Hc@4zrRYkg0?4w1wO;u zRUR3UUWy%>H8#PjHxKAVcJZyh!Ax-?Lhb8y@&3>}q=J2(Cl+1waRZz|Qz1S#5cxjr z9P2wZ7*;1EZ~CliHES5)Un#^3BfB$FdwS;FgXzKHyUv$CE7ZJ!bkW6qwHfNrM(k8=7EO zOPa4Qf!2lx3$?S0g#&g3S*TvcxvCV5c6wMAjBVo2qw|(n?nbIVy;zECyqw{LEIIJ6 zFt2`6KRi*rTd}@HxB6#dRPZgPKn&ukFy6)kB0k~IND7FzmqQ=^eyw#hyYwhIF#$~Z zE~sptiUn<3i_46pU8Nz<@U(D%o%UR*AU@zv*16&N zX7s6b7H8#K3qXpD;0Uvb>ahs*g>}QW(R2=cX#qPsqEVD-?KtG6pD%NkBK3P z^u}U$+LF>inSCC6rsiu`DGvnJ9%+a>zoJ-$#Zf1Ooa9I2%fv_4%hX313xYHs@#a*@ zWp$YaDCM3s)dEDLifIV zJpm#^L@H^oZrAXVbM%Gi3uwLe0`dI*#&w6vy>xo-v~%fUIurjcb`p=$ai}(eWDeB> zw@$41KNH)Y6Zh|Bu0uDmd&$&|V>i)1(|hHi_HUi=rFdKc@ z0|fN*FN64hw~_xp2tfZQd-&h>zgx)v2aUrxe)GTPXNCNzH#gU;cy!#^pz%2CR8Wcx z;|QRl3JBaXZOgXKU-}zqF%0pA+3p2H0{*-GUkeQhYi#QC?O0BHhad14$jSh)TmQG$ zY^zfSYfEvx0+pJEaD-&_yXN$V2leE2bN`)WPs_XuJ`xCl>Key+} zfAo!?Cj0}(nIg(KDD|yiNy@k=4S8x!KtmATlI2fI#`u|oEM)Cfrm|)YR(!ZvKus+h8`y_4}oXsgBC`-`gpi!wqctkCJop@A-dkC*glg2LHzn7OO+K z;fy-_2myh%1;8g17@gLwCYkwjh~ptq3ANzjH^?9rpel;#ji;pc1!zO98M|O4n5{}~ zjG4!@m?M{lCyh94uQ+K@L~BA76$+oDbviE=nJ*TZZj9J$l&EFwgcg3<0>u$RO_&$T z5tx`BPfumOe82RvdmVA-_Q(ovipb6lJD!BN^6qd|6w95Nl(5cc%(RSXE~@$rjG5Qy zr{8rY&okOwaOyZZNyk{q^6=J_%5esFEoO{aaEiq?%SH`9YzS}d@``qLv=q1A1XXoc zHt=bU9sS;ovb?iCJwHy&o^ ztzC#{e)Rb11+!%D8tKvQ75#ljaZoc6g4MNa)c*0)41RJm z7VDd+7%gu zE5w;gM__dxG_)$gMX4%>Fux^74GG+{d>ornreNkNy#SqJ(=K-V3?EG@$c~@>sGPn4 zoQKaqwzNTZUdcqbV#27MUQhvavl46pXVk4eM?T}0kJWc~;F3VL7yjikPped{wWWh6 zzqOhfM5k@~-XjiMQr+b^;T5giOm@S9r z8?nHKkoDv#phIr15J5b#5({#D#LS!JQ{yPa28I@?cJtw)e)#i>stLNz@$Td30pluM08sQ zoa$QFwA?5v*G!sat}TkvBr|K9^lw(XPsm&+!MPrGQAC2;b+2U&+HzLay`GFbR&fGR zmj*#!@1reJGgG*vu6mSTAMp^L`LS9`r_SmRz2kTwKN+c z>l=T4*m%SW@=M&i#tnBN#>$8Xo#$3;IB>rKbsy*^%R)>G1j^?`4ia zSAQ?mbx&14Nr^nwa?-eE;(42k zrka@Zwz3Xg>>PM(7UR}k3|r$YQ)5WB+P3}laBPiDIe>?$h6qs<`i<^9$XY+2yPO!C z;_aX4-_WGdyh09yM!7a?SxDe+b9#ClBvY60vQ?QXyb`7~z8av$#F{3S()B7}koIJ( zqc)IXMb|rh(1kv~g%gwRrjWckAzX5>kwt?_0VP3mV=AW!CR7=&@+h1n#IWtW?Lw9# ze1-|cNU8gf^RI zGihfh9WD`8?a*rP4Im%^d;aJMM|jIYutnB;E(=EQfYrA%Q;eb)_cJre{F77nBrLMq zeBm+yQUszh#|Cxt^y)vyARl1wpo8~`0`KncU|_>rrGJpX9vDHU9L1)mI7Ss8sL85M zy~ffgRn*i_J0`Y{R%KEfv-0M;49sga#x7bO>7CL_uu^>OcYIWceL6zI1|E!hjW0YM zJ@7k;f%qqjCrwYePDhqzJC4GGiFh&ze1m_X$N10Inf zE>BL45hoMCSVEtPv<^&TRH>=bfP7(=OA3vRcOL)&*Y7!HP?Fzl-hh0yQ`c9cCAX(H zrLtdktAFVjCsY4~qES4eH)zDXfPl49Gko^sZB}fv_&y_`zC(YIv7D@1_%k(q%jYvJ z668B1LD{*e%Ky^@+H^{uoSIc^o6VieZLs`-1(ZAI<$mMZaR(kk1^$qol=bG-KFp$Y zLAOi9%Nvo8ft{tErefv$MpHM;QWKuG6~qBs_sl(6xg+Yd__Z`wn^|E?9 z)6%BZFcf%a8(rjI>?Op24a7-v0)DhjKBVz>GTUmIF)M_<`SCNTli}QkIP1lv{*n06 z$*w|B2D|?e#7Cpg>FmhF*QKtL-oE1D;UNUE>fN@u8^A^|+{($-;xWq^g zq^LUzI??9$Z;a`eiZU*u&k|Rwo65Tgh~BIluN6*!4FbcQi4QwvN_ZYIYQ2}hfKCHb ztz?@4Sr)0P3cleH?^glTdFRUBp3$<1RH6iKn`RSbfkst&FYOsd>#qWFnXI#^1roTR z4!y4}To+A>6&hkvHb3B7xASFOl5HiRy+Ajqa+8|->MhsuX+)}h7kdaT;Mhs>U1puJ z{8O&WQe{^SU3bNj_&tiD7C#jNw1>Ln2}zPUpfkeVj*yret0E^ytHE_1Q0L{=7v@ znkBSIsN7OYc_qzzWQKrCgY?e5DMt5@NKXnWu$;AMV1&e%DNiWkCMWQHd?12imcbsD zrno7o!rQ}4kyzqulx}rB4sG`h7k%Jm0r|&3D#pfDu}@nxBw4{3yhfy|&+p}F5#K2d zOr*?@rz+L4_T=w=uu1!GzqL^p+^nC~G*m>fexnwzHoS%a;^B7h#6O&K>eFxy zEHGgQe{|Y+rORx_mn}F5@%Q#SMb7|u^RW)X2i3>p+*iNi(NWu3l}$C8k}QMZyG78G zIB<|j&>PIa5*2RHF84tr(yD&v3H$TkV- z#4aFURp%t_jwK#j{_I4$c#}GNWn|RMpD*pQGz-@(Y@Dh<57#WbPa^ZdHTkoppoc@$ z72#hsm=%e;;?pgpL>5iSd)Y-?N1~f+S=<^|0*4*4VxodUdsbcBx5kd$Dq1y2tO(o&uNe%qH-&o`y%WJc*i|_V-&hF(jkL zn{V)o5cs1a?shBFu{DTW5%0Ul$E@V9tOwo6lq)kWBxRI@=3Rn8i~XzciPp~BtKRBA z63CG079`JK?3k7jov-tW0PY-~kP0JX)akjA{AC}Ne#ax0-s1RoU+5nMDu35R>ejhB2@y5)hcbr^mG#<)0|l&CYJeX>R#}X`10(4NbEV z@;YwD{LOBB*8BxUaQXCW?E=k<-R@0C)Re)5$E=fhLD!z8cg$wvvfWh(wROEOCt8@@ zD%8;Q^j_rN9l)Urv8@|;Sd1~zCsME6Vz0)@mD4S%rd0kV->6M&Nkz_Oj{eb`L!F|g2f)!%0p>`M;ImV-2Blc$SR5u z=CP>p8d;S$iwt{@aGJ@b+kTF`InxFZQrHQ(F&HcSM}#4PbXupb^DoSm)&M`obY>)1 zN`XgYE6uL4DOFQOqZS(=Af_08RNowm(EN|^yuRD2)ox}wkyFLfdvutf$&W!zho}`z zfCf=&^1K8SHPUJyl$O3>?JwVw4=k6QtH^vDc(acmJW|+h_F_Pcg?1by}~YwLF! zHHP><2&^}Q%u4uwc$2^+#^PR%(!`tv8_md!Hc}(Lai4>-q}HA@Px!%P&^`b zAo$tJN`LCk-F5l?-mEuoe37*Rn4G-bHVgM<9A#~@4wGAQbQ|}>9Z(IroXuBIS|F&b zbDsCvK7@K!v(mKNB-B#T8RR>z$nxeAH=EwbTp^9_9@I0e(6Uw3x}ouLcdEnvE`Du+ zGK6}sZk{kG+R=40wk@=o&p_X&Nrzjitc2&g%G6& z{6uM6t#wSFK(wF+2E2kL*%=-~?w$AH+>z+=S<+dYkb1LWI-ESD_G@9{RZqqUNFRRR zRXCJ4Dt@Ip4!BP&H$i6RbPosT3-%BxRXfN}5NU}3Gujz%pmg>aeJ29qY_nt-edTzU@J9H$oM82r9fW=vom0&)d z3E7OBEnnml?0M5G(?>(lJ-bwXKPszDK(MY)4+GK}+5)&C-3{-jm_^%Fx|j)2KQe9JB+hDnJf^9>;MWwfi{^D_WVWxtjai z*-DR}gi4ZHj`t4M_c_rFDukE)@t)z<7>9_?hqoZmSjCq`3~m^w@3S=^tHjhiJVcus zoe{yQYd!eWL1r1Gdq@i_ui0E$Vq?4bmH;!3f%(9>S)04U4FK;OJDC0RL`13SYTi&M zI#Q6~biS@5o0G`QgA-0oD^c8uZvR<@c?^NL^68_u2_IxQK&fOO<+Fjcr9M#U8od3n zQK6u^7v@ynKc{8}9#gLovSA>mSd@L}{gUEn*k8Ys_4v&pMJuCVFasVX(9AdTYeNh_7)B)yf@6fGA zx>r?2V7iI0C6hKrWsH7&jd|=^i9M6NlUEt?Jorkv5T^NREU}QR@Ayh}9uw=;xlg z@h-VPwxwK~;pC>bvb86NUZmynNMV8oh%lhB>$#)OlJ=ScNFr^E+$}hJX|?b(fU1c^ z6lImajjwZH3i`|q7;#(GJ~J3MIJWrJ5j{0WMp!*sST>=hdxj{9xTg5$_p0r8F}*(5 zfR7*DTbV-d&(oB~V*_n+S@M}Ssy`k~@z>;PD#)!R((>@pm8N3jKmmr3koNONrX4QV zrQoX??~^HzC(nnWPpkVKfJDZk^oklZ3stx}+_9hM``enhD({wkjSuan5L59hBpt^q z;mR*jFJ!0Pq4?mm7Q5b?nSme$B7b-As(0X_S_K3%K+5hJ;m$8~Aqg*1)AvfXJQ&5D z2gP6_>RxAg#>Ub_zjdW+p*dxBjVjg6(J}5;yMfdz6izmLfj2_N>TSy*^n+i1Hf%PV z53>_0&Pm@PPH=#5Qm8()@N6$#Xl^EYJ)J0E5#c~rd!g%Y4nAi&~Yaa}a{W&D1O17)|rEK|;lTk>1d-ZD1{OJu6mlKX?d ze=_it>%wHNX~tb}Z7J~$;kzeeoBTSXpp0L$?2jNll{G&(J{c5sVG}~JP0*o8(I|R6 z+Agbd`x3)IX5PUUo<}YXD}d=qDMN<`jHz;V)H>|~+)edSZzoq-V>h5!-mRc?Z@(B) zO4AIX@;iSl*zDH0RtKkc2~?rM(x#Gv3zmAfuILpZu z)LTz$WT7SByXA{!Xk}tLhb8=ks+(`3ad?Qb?nB>7|1oc)z{`W>otP|ySU)gQ790D| z20W9v5RfRs)Z$qYg{oia9-Pl?>z+QZ%0-`op9HXv+`k~pBc_EN9aCv4{iG2(!} zFn1>Hysi9RSE83++?ZYl>fHMVO$yGP4OA}pz=fO2{DIl@^$S@(uYNb=(YC$G!!~a= zZHsSn(p30JNB$!Naw`olKuF5q6qR^|9!xp?#To<4N8g{B^K#eTmcwbTwPg21M^Rk5 z3uaPFQt|3k)!=9zP!wV82`=gBL>nbQU#U}L=2Sh{dq$i;K7Yg02njOvI2DtA#<^$> zhr1L-YN7~JJx#kCK+cUcz2h>~-EmLuMk_Hp*RPc_JyV@_T9F6nkY|X=^fx}aJx@+xApA#6 z#`a#+wjXg-auvlM493l!wTh3>N92<&JN+th>am_KbvR zhjCl5S=e@aEd`n|6<ICvtGs}-`gUq{Veqhy|8P@3F&kM1 zAcT(QCe^|Q0Zlp&lD&+~9Z$wsnV?}6dLY&faaPvl`5QcO<T9TS33R&$cn^Bp{(QGE;^ZHLrts=5sh~1zY1bZ`HUt|HtSG7SI#J2fOg@E! ztXz6qz~viXj3pTCkrQt`NMNlN&KRE-zKKf#aUKq`;Yb?f9^x{000Eb(-Kz^_}RR)GT-$y`v3@W1=AaiBcA9cbq7F)cw4|Vkm%p z#HbA66XC7SJ|UTJ&zGQ$jCk)k=Ok(T+nfo-ZI%0Zq^oUQ^pg_37wRk@GO}D}IF>+J zrBiXHmY0kcXt$QdUhW3u@XhJTGxO4T9SATLqK`Zrh}^=)!Z9$3%PGuJ)BR*mLkUnH zlfa-4Pn=l7ZrDH78EZL7^J9MG4gaN z-q@LJ__kkHm44um5UZcihno*fyLwSN{7D)!P%@t`^=YrWy%3W0jVO}}DZokB$^BAu zLm#`RFf~(WE4Qm^rl)c*laVYVDB+%eg0G8t?{}M440rZtiKZ^}vVB1UpC=TX18?6< zw!aP=0BD&bN9H-SzODwZ=!zK>nH+@zBz=C6cIj zS<4PbVl3M*kA>9=L(@;Zjn38i?E;eS5XtX`GGIkHi@j#&Y}@@ZrK6^%NKHXcj0z5v z9#u4BS&G2`l+H^?Yc5;zoh&1>zN!vegU z-vH$8NLGC1G~-}v3#WFY-!n<(v^o(kxi1x@D?*J z0C8X?O5($nw~D(#n0Ix2mm8hcTYGjgB6h)nP%jzu)ikM(%;S{2#d2fRe^^*x1mHqY z=Fri`4!H_peQTile?U|lZ%!N@8hfc2{NG*&=J$7OtqzId8jh-`FV%~gzB8u$UnGx+;^n3 z9!27*+HCzmeV`n5msZz*v})30x=qsqg0tWMQnq{hd1wYeW3Bu;r0X@a_qV*Hx5ew} z^xeitBy0Hb5XJqS*d+3O13S6b?_9TkHN2nfcx~^3sSF-s63|D!yJ|567~J(|@tIlA z?D#rsjF{F@p(!ia_p&~4o=F$YE2?tz=UDo1m3m-VU6q&1STk4o?FMk=fCO#FIP-e$ zqlaHV59q4}m6r>JF!MZ%a(G?{ma^g}yIuR42=}FGRzUbMtlM`D9U+8fK^^wI>cob3 z0}+vk5Qq@a(P7vi1ZsrJ`aH@d90Y_G1_T6r*i-9lfF;#LS!EQY*yUx!B^6X9)zwfD zV8hgZra3;SnlpiIH-mkM&ZGgss$Kt00~S?~k&;wZV*^Y5YbvBRl$qNw5hvK^&Y4tG zuvCsNmh6^)`UJ6?+F7%oq8P!@>hAV1|5TVIzPZx{oz?R#w=*jTJHVdgFJT>IczNR_ zEb%YXxe!WnKDq6SYpJ@hu_OR)a^aQ&?$W^Uo=0;31GxVovjoBVw^eU z$-lKO=cb=;=4rGroG5(~oAg)K{Oiv!l9=pk-&Ndp(`R#&o}QhjP2~r#0T_y)!$r*r-4dmlSgp>Bl(|4>Jqx! z`8_|){ed%?PJ)*I_sOqxGd#^ZIeV3j3!IVoFW^{H0RA`Q4uOvx67ePU4ov zg?X=FfOvl2Prt1Rcg8yjeXUD0{;ot^;FEV=;PirS_)DKBF>imNz<(BTpQnqQPkef5 z@!6w8{pixfm#hvyuW@?16~0LMB%ofGY5eBIo}M#<&()rUNW_I{FPynOzq6+&g3jLN zhoUabdfDvT`Q)cd!SK1HlTeMhIQbQ3md=ZuE{>f&rR511id><_d|u=9U '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s\n' "$PWD" ) || exit + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/packages/react-native-executorch-webrtc/android/gradlew.bat b/packages/react-native-executorch-webrtc/android/gradlew.bat new file mode 100644 index 0000000000..9d21a21834 --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/gradlew.bat @@ -0,0 +1,94 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem +@rem SPDX-License-Identifier: Apache-2.0 +@rem + +@if "%DEBUG%"=="" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if %ERRORLEVEL% equ 0 goto execute + +echo. 1>&2 +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. 1>&2 +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if %ERRORLEVEL% equ 0 goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp b/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp index 6f42157084..7c6cb85a23 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp +++ b/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp @@ -4,31 +4,33 @@ #include #include #include +#include #include #include -#include -#include -#include +// Use BaseSemanticSegmentation from react-native-executorch +#include +#include #define LOG_TAG "ExecutorchWebRTC-JNI" #define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__) #define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__) -// Global model instance -static std::unique_ptr g_model = nullptr; +using namespace rnexecutorch; +using namespace rnexecutorch::models::semantic_segmentation; + +// Global segmentation model instance (reuses react-native-executorch's +// implementation) +static std::unique_ptr g_segmentation = nullptr; static bool g_modelLoaded = false; static std::string g_modelPath; // Model input dimensions (dynamically read from model) static int g_modelHeight = 256; static int g_modelWidth = 256; -static bool g_buffersInitialized = false; -// Pre-allocated buffers (resized dynamically based on model input) -static std::vector g_inputData; +// Pre-allocated buffers static cv::Mat g_resizedRgb; -static cv::Mat g_floatMat; // Debug logging rate limiter static long long g_lastDebugLogTime = 0; @@ -36,29 +38,7 @@ static long long g_lastDebugLogTime = 0; extern "C" { /** - * Reallocate buffers based on model input dimensions - */ -static void reallocateBuffers(int height, int width) { - g_modelHeight = height; - g_modelWidth = width; - g_inputData.resize(1 * 3 * height * width); - g_resizedRgb = cv::Mat(height, width, CV_8UC3); - g_floatMat = cv::Mat(height, width, CV_32FC3); - g_buffersInitialized = true; - LOGD("Buffers reallocated for model size: %dx%d", width, height); -} - -/** - * Ensure buffers are initialized (called before first frame if needed) - */ -static void ensureBuffersInitialized() { - if (!g_buffersInitialized) { - reallocateBuffers(256, 256); - } -} - -/** - * Load the segmentation model + * Load the segmentation model using BaseSemanticSegmentation */ JNIEXPORT jboolean JNICALL Java_com_executorch_webrtc_ExecutorchFrameProcessor_loadModel( @@ -72,57 +52,49 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_loadModel( g_modelPath = std::string(pathChars); env->ReleaseStringUTFChars(modelPath, pathChars); - LOGD("Loading ExecuTorch model from: %s", g_modelPath.c_str()); + LOGD("Loading segmentation model via BaseSemanticSegmentation: %s", + g_modelPath.c_str()); try { - g_model = std::make_unique( - g_modelPath, - executorch::extension::Module::LoadMode::MmapUseMlockIgnoreErrors); - - // Get model input shape to determine expected dimensions - auto methodMeta = g_model->method_meta("forward"); - if (methodMeta.ok()) { - auto inputMeta = methodMeta->input_tensor_meta(0); - if (inputMeta.ok()) { - auto sizes = inputMeta->sizes(); - // Expected shape: [1, 3, H, W] (NCHW format) - if (sizes.size() >= 4) { - int modelH = static_cast(sizes[sizes.size() - 2]); - int modelW = static_cast(sizes[sizes.size() - 1]); - LOGD("Model input shape detected: [1, 3, %d, %d]", modelH, modelW); - reallocateBuffers(modelH, modelW); - } else if (sizes.size() >= 2) { - int modelH = static_cast(sizes[sizes.size() - 2]); - int modelW = static_cast(sizes[sizes.size() - 1]); - LOGD("Model input shape (2D): [%d, %d]", modelH, modelW); - reallocateBuffers(modelH, modelW); - } else { - LOGD("Could not determine model input shape, using default 256x256"); - reallocateBuffers(256, 256); - } - } else { - LOGD("Could not get input tensor meta, using default 256x256"); - reallocateBuffers(256, 256); - } - } else { - LOGD("Could not get method meta, using default 256x256"); - reallocateBuffers(256, 256); + // Create BaseSemanticSegmentation with: + // - modelSource: path to .pte file + // - normMean: empty (uses default 0, which means /255.0 normalization) + // - normStd: empty (uses default 1) + // - allClasses: ["foreground", "background"] for binary segmentation + // - callInvoker: nullptr (we don't need JSI operations) + std::vector normMean = {}; // Default normalization + std::vector normStd = {}; + std::vector allClasses = {"foreground", "background"}; + + g_segmentation = std::make_unique( + g_modelPath, normMean, normStd, allClasses, nullptr); + + // Get model input size from shape [N, C, H, W] + auto inputShapes = g_segmentation->getAllInputShapes(); + if (!inputShapes.empty() && inputShapes[0].size() >= 4) { + g_modelHeight = inputShapes[0][inputShapes[0].size() - 2]; + g_modelWidth = inputShapes[0][inputShapes[0].size() - 1]; } + LOGD("Model input size: %dx%d", g_modelWidth, g_modelHeight); + + // Pre-allocate buffers + g_resizedRgb = cv::Mat(g_modelHeight, g_modelWidth, CV_8UC3); + g_modelLoaded = true; - LOGD("✅ Model loaded successfully!"); + LOGD("✅ Segmentation model loaded successfully via " + "BaseSemanticSegmentation!"); return JNI_TRUE; } catch (const std::exception &e) { LOGE("❌ Failed to load model: %s", e.what()); g_modelLoaded = false; - reallocateBuffers(256, 256); // Use default size + g_segmentation.reset(); return JNI_FALSE; } } /** - * Process I420 frame directly - does segmentation and applies mask in one call. - * This avoids multiple JNI crossings and RGB conversions in Kotlin. + * Process I420 frame - does segmentation and applies blur in one call. * * @param yData Y plane data * @param uData U plane data @@ -132,15 +104,13 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_loadModel( * @param yStride Y plane stride * @param uvStride U/V plane stride * @param rotation Frame rotation in degrees (0, 90, 180, 270) - * @return Modified Y plane with background blacked out (or null on error) + * @return Modified Y plane with background blurred (or null on error) */ JNIEXPORT jbyteArray JNICALL Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( JNIEnv *env, jobject thiz, jbyteArray yData, jbyteArray uData, jbyteArray vData, jint width, jint height, jint yStride, jint uvStride, jint rotation) { - // Ensure buffers are initialized - ensureBuffersInitialized(); // Get input buffers and their actual sizes jsize yDataSize = env->GetArrayLength(yData); @@ -163,8 +133,6 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( } // Determine actual stride based on buffer sizes - // If buffer is smaller than stride * height, the actual stride is width (no - // padding) int actualYStride = (yDataSize >= yStride * height) ? yStride : width; int actualUVStride = (uDataSize >= uvStride * (height / 2)) ? uvStride : (width / 2); @@ -175,10 +143,8 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( std::chrono::system_clock::now().time_since_epoch()) .count(); if (now - lastBufferLogTime > 2000) { - LOGD("Buffer sizes: Y=%d (expected %d), U=%d, V=%d, actualYStride=%d, " - "actualUVStride=%d", - yDataSize, yStride * height, uDataSize, vDataSize, actualYStride, - actualUVStride); + LOGD("Buffer sizes: Y=%d, U=%d, V=%d, actualYStride=%d, actualUVStride=%d", + yDataSize, uDataSize, vDataSize, actualYStride, actualUVStride); lastBufferLogTime = now; } @@ -192,7 +158,6 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( } // Merge I420 to single buffer for cvtColor - // Create the combined I420 buffer (Y plane followed by U and V planes) cv::Mat i420(height * 3 / 2, width, CV_8UC1); // Copy Y plane row by row (handle stride correctly) @@ -202,8 +167,8 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( } // Copy U and V planes - uint8_t *uSrc = reinterpret_cast(uPtr); - uint8_t *vSrc = reinterpret_cast(vPtr); + auto *uSrc = reinterpret_cast(uPtr); + auto *vSrc = reinterpret_cast(vPtr); uint8_t *uvDst = i420.ptr(height); int uvWidth = width / 2; int uvHeight = height / 2; @@ -221,10 +186,8 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( cv::cvtColor(i420, rgbFull, cv::COLOR_YUV2RGB_I420); // Rotate image to upright for model inference - // Frame rotation tells us how the sensor is rotated, so we rotate opposite to - // get upright cv::Mat rgbRotated; - int rotateCode = -1; // -1 means no rotation needed + int rotateCode = -1; if (rotation == 90) { rotateCode = cv::ROTATE_90_CLOCKWISE; } else if (rotation == 180) { @@ -239,21 +202,13 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( rgbRotated = rgbFull; } - // Resize to model input size (use dynamic dimensions) - cv::resize(rgbRotated, g_resizedRgb, cv::Size(g_modelWidth, g_modelHeight)); - // Run segmentation cv::Mat mask; - if (!g_modelLoaded || !g_model) { + if (!g_modelLoaded || !g_segmentation) { // Rate-limited logging for missing model - auto now = std::chrono::duration_cast( - std::chrono::system_clock::now().time_since_epoch()) - .count(); if (now - g_lastDebugLogTime > 1000) { - LOGD("Model not loaded (g_modelLoaded=%d, g_model=%p), using placeholder " - "ellipse mask", - g_modelLoaded ? 1 : 0, g_model.get()); + LOGD("Model not loaded, using placeholder ellipse mask"); g_lastDebugLogTime = now; } @@ -276,84 +231,43 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( } } } else { - // Run ExecuTorch model - g_resizedRgb.convertTo(g_floatMat, CV_32FC3, 1.0 / 255.0); - - // Convert HWC to NCHW - float *inputPtr = g_inputData.data(); - for (int c = 0; c < 3; c++) { - for (int y = 0; y < g_modelHeight; y++) { - const cv::Vec3f *row = g_floatMat.ptr(y); - for (int x = 0; x < g_modelWidth; x++) { - *inputPtr++ = row[x][c]; + // Use BaseSemanticSegmentation via generateFromPixels + try { + // Create JSTensorViewIn from the rotated RGB image + // generateFromPixels expects [height, width, 3] RGB uint8 data + JSTensorViewIn pixelData; + pixelData.dataPtr = rgbRotated.data; + pixelData.sizes = {rgbRotated.rows, rgbRotated.cols, 3}; + pixelData.scalarType = executorch::aten::ScalarType::Byte; + + // Run inference - returns foreground probability mask + std::set> classesOfInterest = {"foreground"}; + auto result = g_segmentation->generateFromPixels( + pixelData, classesOfInterest, false); + + // Extract foreground mask from result + if (result.classBuffers && result.classBuffers->count("foreground")) { + auto &fgBuffer = result.classBuffers->at("foreground"); + auto *fgData = reinterpret_cast(fgBuffer->data()); + + // The mask is at model input size, need to get its dimensions + // For now, assume it's the model input size + mask = cv::Mat(g_modelHeight, g_modelWidth, CV_32FC1, fgData).clone(); + + // Rate-limited debug logging + if (now - g_lastDebugLogTime > 1000) { + double minVal, maxVal; + cv::minMaxLoc(mask, &minVal, &maxVal); + LOGD("Segmentation result: size=%dx%d, min=%.4f, max=%.4f", mask.cols, + mask.rows, minVal, maxVal); + g_lastDebugLogTime = now; } + } else { + LOGE("No foreground mask in result, using fallback"); + mask = cv::Mat::ones(g_modelHeight, g_modelWidth, CV_32FC1); } - } - - std::vector shape = {1, 3, g_modelHeight, - g_modelWidth}; - auto inputTensor = executorch::extension::from_blob( - g_inputData.data(), shape, executorch::aten::ScalarType::Float); - - std::vector inputs = {inputTensor}; - auto result = g_model->forward(inputs); - - if (result.ok() && !result.get().empty()) { - auto outputTensor = result.get()[0].toTensor(); - const float *outputData = outputTensor.const_data_ptr(); - - // Get output tensor dimensions - int outputH = g_modelHeight; - int outputW = g_modelWidth; - if (outputTensor.dim() >= 2) { - outputH = static_cast(outputTensor.size(outputTensor.dim() - 2)); - outputW = static_cast(outputTensor.size(outputTensor.dim() - 1)); - } - - // Rate-limited debug logging (once per second) - auto now = std::chrono::duration_cast( - std::chrono::system_clock::now().time_since_epoch()) - .count(); - if (now - g_lastDebugLogTime > 1000) { - // Log tensor info - LOGD("Model output: dim=%zd, outputH=%d, outputW=%d, numel=%zd", - (ssize_t)outputTensor.dim(), outputH, outputW, - (ssize_t)outputTensor.numel()); - - // Sample output values to understand the range - ssize_t totalElements = outputTensor.numel(); - float minVal = outputData[0], maxVal = outputData[0], sum = 0; - for (ssize_t i = 0; i < totalElements; i++) { - float v = outputData[i]; - if (v < minVal) - minVal = v; - if (v > maxVal) - maxVal = v; - sum += v; - } - float mean = sum / totalElements; - LOGD("Output stats: min=%.4f, max=%.4f, mean=%.4f", minVal, maxVal, - mean); - - // Log first few values - int numSamples = (totalElements < 10) ? (int)totalElements : 10; - std::string samples = "First values: "; - for (int i = 0; i < numSamples; i++) { - samples += std::to_string(outputData[i]) + " "; - } - LOGD("%s", samples.c_str()); - - g_lastDebugLogTime = now; - } - - mask = - cv::Mat(outputH, outputW, CV_32FC1, const_cast(outputData)) - .clone(); - } else { - // Fallback - keep everything - LOGE("Model forward FAILED! result.ok()=%d, result.get().empty()=%d", - result.ok() ? 1 : 0, - result.ok() ? (result.get().empty() ? 1 : 0) : -1); + } catch (const std::exception &e) { + LOGE("Segmentation failed: %s", e.what()); mask = cv::Mat::ones(g_modelHeight, g_modelWidth, CV_32FC1); } } @@ -361,13 +275,9 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( // Resize mask to rotated frame size, then rotate back to original orientation cv::Mat fullMask; if (rotation == 90 || rotation == 270) { - // For 90/270 rotation, the rotated image had swapped dimensions cv::Mat rotatedMask; cv::resize(mask, rotatedMask, cv::Size(height, width), 0, 0, - cv::INTER_LINEAR); // Note: swapped w/h - - // Rotate mask back to original frame orientation (inverse of what we did to - // the image) + cv::INTER_LINEAR); int inverseRotateCode = (rotation == 90) ? cv::ROTATE_90_COUNTERCLOCKWISE : cv::ROTATE_90_CLOCKWISE; cv::rotate(rotatedMask, fullMask, inverseRotateCode); @@ -375,29 +285,10 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( cv::resize(mask, fullMask, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); cv::rotate(fullMask, fullMask, cv::ROTATE_180); } else { - // No rotation cv::resize(mask, fullMask, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); } - // Debug: log mask statistics after resize (rate-limited) - { - auto now = std::chrono::duration_cast( - std::chrono::system_clock::now().time_since_epoch()) - .count(); - static long long lastMaskLogTime = 0; - if (now - lastMaskLogTime > 1000) { - double minVal, maxVal; - cv::minMaxLoc(fullMask, &minVal, &maxVal); - cv::Scalar meanVal = cv::mean(fullMask); - LOGD("Resized mask stats: size=%dx%d, min=%.4f, max=%.4f, mean=%.4f", - fullMask.cols, fullMask.rows, minVal, maxVal, meanVal[0]); - lastMaskLogTime = now; - } - } - - // Apply smoothstep to mask using OpenCV (vectorized/SIMD optimized) - // smoothstep: values < 0.3 → 0, values > 0.7 → 1, smooth transition in - // between + // Apply smoothstep to mask const float lowThresh = 0.3f; const float highThresh = 0.7f; cv::Mat t; @@ -405,7 +296,6 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( cv::multiply(t, 1.0f / (highThresh - lowThresh), t); cv::min(t, 1.0f, t); cv::max(t, 0.0f, t); - // smoothstep: t*t*(3 - 2*t) cv::Mat t2, smoothMask; cv::multiply(t, t, t2); cv::multiply(t, -2.0f, smoothMask); @@ -422,12 +312,12 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( } // Create blurred Y using downscale-blur-upscale for performance - // Downscale 3x, stack blur (O(1) fast blur), upscale back + // 4x downscale for speed, stackBlur is O(1) cv::Mat ySmall, yBlurredSmall, yBlurred; - int smallW = width / 3; - int smallH = height / 3; + int smallW = width / 4; + int smallH = height / 4; cv::resize(yMat, ySmall, cv::Size(smallW, smallH), 0, 0, cv::INTER_AREA); - cv::stackBlur(ySmall, yBlurredSmall, cv::Size(25, 25)); // O(1) fast blur + cv::stackBlur(ySmall, yBlurredSmall, cv::Size(21, 21)); cv::resize(yBlurredSmall, yBlurred, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); @@ -442,12 +332,9 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( for (int col = 0; col < width; col++) { float prob = maskRow[col]; - // prob=1: foreground (person) = original - // prob=0: background = blurred dstY[col] = static_cast(blurY[col] * (1.0f - prob) + srcY[col] * prob); } - // Copy stride padding if any if (actualYStride > width) { memcpy(dstY + width, ySrc + row * actualYStride + width, actualYStride - width); @@ -464,7 +351,7 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( return outYData; } -// Keep old method for compatibility but mark deprecated +// Keep old method for compatibility JNIEXPORT jfloatArray JNICALL Java_com_executorch_webrtc_ExecutorchFrameProcessor_runSegmentation( JNIEnv *env, jobject thiz, jbyteArray rgbData, jint width, jint height) { diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt index 1e3aec00d2..558dcc577d 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt @@ -32,8 +32,10 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { init { Log.d(TAG, "ExecutorchFrameProcessor created - background removal enabled") tryLoadModel() + tryLoadModel() } + /** * Try to load the model if not already loaded and path is available. * Called from init and on each frame to handle late model configuration. @@ -86,7 +88,6 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { rotation: Int, ): ByteArray? - override fun process( frame: VideoFrame, helper: SurfaceTextureHelper, diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt index d9fd2d11db..43adf79637 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt @@ -9,7 +9,7 @@ import com.oney.WebRTCModule.videoEffects.ProcessorProvider */ object ExecutorchWebRTC { private const val TAG = "ExecutorchWebRTC" - private const val PROCESSOR_NAME = "executorch" + private const val PROCESSOR_NAME = "executorchBackgroundBlur" // Configuration for background removal var modelPath: String? = null diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt index 51a502bea8..9e5e6431bb 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt @@ -1,6 +1,5 @@ package com.executorch.webrtc -import com.facebook.react.bridge.Promise import com.facebook.react.bridge.ReactApplicationContext import com.facebook.react.bridge.ReactContextBaseJavaModule import com.facebook.react.bridge.ReactMethod @@ -72,5 +71,4 @@ class ExecutorchWebRTCModule( ) { ExecutorchWebRTC.configureModel(modelPath) } - } diff --git a/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.h b/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.h new file mode 100644 index 0000000000..d3afecdb8c --- /dev/null +++ b/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.h @@ -0,0 +1,17 @@ +#import +#import +#import + +// Import the VideoFrameProcessor protocol from react-native-webrtc +@protocol VideoFrameProcessorDelegate +- (RTCVideoFrame *)capturer:(RTCVideoCapturer *)capturer + didCaptureVideoFrame:(RTCVideoFrame *)frame; +@end + +@interface ExecutorchFrameProcessor : NSObject + ++ (instancetype)sharedInstance; +- (void)configureWithModelPath:(NSString *)modelPath; +- (void)unloadModel; + +@end diff --git a/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.mm b/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.mm new file mode 100644 index 0000000000..6325a8e29c --- /dev/null +++ b/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.mm @@ -0,0 +1,305 @@ +#import "ExecutorchFrameProcessor.h" +#import +#import +#import +#import +#import + +#import +#import +#import + +using namespace rnexecutorch; +using namespace rnexecutorch::models::semantic_segmentation; + +@implementation ExecutorchFrameProcessor { + std::unique_ptr _segmentation; + BOOL _modelLoaded; + int _modelWidth; + int _modelHeight; + int _frameCount; + NSTimeInterval _lastLogTime; +} + ++ (instancetype)sharedInstance { + static ExecutorchFrameProcessor *instance = nil; + static dispatch_once_t onceToken; + dispatch_once(&onceToken, ^{ + instance = [[ExecutorchFrameProcessor alloc] init]; + }); + return instance; +} + +- (instancetype)init { + self = [super init]; + if (self) { + _modelLoaded = NO; + _modelWidth = 256; + _modelHeight = 256; + _frameCount = 0; + _lastLogTime = 0; + } + return self; +} + +- (void)configureWithModelPath:(NSString *)modelPath { + NSLog(@"[ExecutorchFrameProcessor] Loading model from: %@", modelPath); + + try { + std::vector normMean = {}; + std::vector normStd = {}; + std::vector allClasses = {"foreground", "background"}; + + _segmentation = std::make_unique( + std::string([modelPath UTF8String]), normMean, normStd, allClasses, + nullptr); + + auto inputShapes = _segmentation->getAllInputShapes(); + if (!inputShapes.empty() && inputShapes[0].size() >= 4) { + _modelHeight = inputShapes[0][inputShapes[0].size() - 2]; + _modelWidth = inputShapes[0][inputShapes[0].size() - 1]; + } + + _modelLoaded = YES; + NSLog(@"[ExecutorchFrameProcessor] Model loaded! Size: %dx%d", _modelWidth, + _modelHeight); + } catch (const std::exception &e) { + NSLog(@"[ExecutorchFrameProcessor] Failed to load model: %s", e.what()); + _modelLoaded = NO; + } +} + +- (void)unloadModel { + _segmentation.reset(); + _modelLoaded = NO; +} + +- (RTCVideoFrame *)capturer:(RTCVideoCapturer *)capturer + didCaptureVideoFrame:(RTCVideoFrame *)frame { + _frameCount++; + + // TEST: Just return the original frame to check if colors are correct without + // processing return frame; + + // Get I420 buffer + id i420Buffer = [frame.buffer toI420]; + if (!i420Buffer) { + return frame; + } + + int width = i420Buffer.width; + int height = i420Buffer.height; + int rotation = frame.rotation; + int uvHeight = height / 2; + int uvWidth = width / 2; + + // Rate-limited logging + NSTimeInterval now = [[NSDate date] timeIntervalSince1970]; + if (now - _lastLogTime > 1.0) { + NSLog(@"[ExecutorchFrameProcessor] Frame: %dx%d, rotation=%d, fps=%.1f, " + @"model=%d", + width, height, rotation, _frameCount / (now - _lastLogTime), + _modelLoaded); + _lastLogTime = now; + _frameCount = 0; + } + + // Create mutable buffer for output + id outBuffer = + [[RTCMutableI420Buffer alloc] initWithWidth:width height:height]; + + // Copy Y plane row by row (respecting stride) + for (int row = 0; row < height; row++) { + memcpy(outBuffer.mutableDataY + row * outBuffer.strideY, + i420Buffer.dataY + row * i420Buffer.strideY, width); + } + + // Copy U plane row by row + for (int row = 0; row < uvHeight; row++) { + memcpy(outBuffer.mutableDataU + row * outBuffer.strideU, + i420Buffer.dataU + row * i420Buffer.strideU, uvWidth); + } + + // Copy V plane row by row + for (int row = 0; row < uvHeight; row++) { + memcpy(outBuffer.mutableDataV + row * outBuffer.strideV, + i420Buffer.dataV + row * i420Buffer.strideV, uvWidth); + } + + // If no model loaded, just return the copy (no blur) + if (!_modelLoaded || !_segmentation) { + RTCVideoFrame *passthrough = + [[RTCVideoFrame alloc] initWithBuffer:outBuffer + rotation:frame.rotation + timeStampNs:frame.timeStampNs]; + return passthrough; + } + + // Convert I420 to RGB for model inference + cv::Mat i420Mat(height * 3 / 2, width, CV_8UC1); + memcpy(i420Mat.data, i420Buffer.dataY, width * height); + + uint8_t *uvDst = i420Mat.data + (height * width); + for (int row = 0; row < uvHeight; row++) { + memcpy(uvDst + row * uvWidth, i420Buffer.dataU + row * i420Buffer.strideU, + uvWidth); + } + for (int row = 0; row < uvHeight; row++) { + memcpy(uvDst + uvHeight * uvWidth + row * uvWidth, + i420Buffer.dataV + row * i420Buffer.strideV, uvWidth); + } + + cv::Mat rgbFull; + cv::cvtColor(i420Mat, rgbFull, cv::COLOR_YUV2RGB_I420); + + // Rotate for model inference + cv::Mat rgbRotated; + if (rotation == RTCVideoRotation_90) { + cv::rotate(rgbFull, rgbRotated, cv::ROTATE_90_CLOCKWISE); + } else if (rotation == RTCVideoRotation_180) { + cv::rotate(rgbFull, rgbRotated, cv::ROTATE_180); + } else if (rotation == RTCVideoRotation_270) { + cv::rotate(rgbFull, rgbRotated, cv::ROTATE_90_COUNTERCLOCKWISE); + } else { + rgbRotated = rgbFull; + } + + // Run segmentation + cv::Mat mask; + try { + JSTensorViewIn pixelData; + pixelData.dataPtr = rgbRotated.data; + pixelData.sizes = {rgbRotated.rows, rgbRotated.cols, 3}; + pixelData.scalarType = executorch::aten::ScalarType::Byte; + + std::set> classesOfInterest = {"foreground"}; + auto result = + _segmentation->generateFromPixels(pixelData, classesOfInterest, false); + + if (result.classBuffers && result.classBuffers->count("foreground")) { + auto &fgBuffer = result.classBuffers->at("foreground"); + auto *fgData = reinterpret_cast(fgBuffer->data()); + mask = cv::Mat(_modelHeight, _modelWidth, CV_32FC1, fgData).clone(); + } else { + mask = cv::Mat::ones(_modelHeight, _modelWidth, CV_32FC1); + } + } catch (const std::exception &e) { + NSLog(@"[ExecutorchFrameProcessor] Segmentation failed: %s", e.what()); + // On error, return unprocessed copy + RTCVideoFrame *passthrough = + [[RTCVideoFrame alloc] initWithBuffer:outBuffer + rotation:frame.rotation + timeStampNs:frame.timeStampNs]; + return passthrough; + } + + // Resize mask and rotate back to match frame orientation + cv::Mat fullMask; + if (rotation == RTCVideoRotation_90 || rotation == RTCVideoRotation_270) { + cv::Mat rotatedMask; + cv::resize(mask, rotatedMask, cv::Size(height, width), 0, 0, + cv::INTER_LINEAR); + int inverseCode = (rotation == RTCVideoRotation_90) + ? cv::ROTATE_90_COUNTERCLOCKWISE + : cv::ROTATE_90_CLOCKWISE; + cv::rotate(rotatedMask, fullMask, inverseCode); + } else if (rotation == RTCVideoRotation_180) { + cv::resize(mask, fullMask, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); + cv::rotate(fullMask, fullMask, cv::ROTATE_180); + } else { + cv::resize(mask, fullMask, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); + } + + // Blur mask edges for smooth transition + cv::GaussianBlur(fullMask, fullMask, cv::Size(15, 15), 0); + + // Create Y mat from original + cv::Mat yMat(height, width, CV_8UC1); + for (int row = 0; row < height; row++) { + memcpy(yMat.ptr(row), i420Buffer.dataY + row * i420Buffer.strideY, width); + } + + // Create U and V mats + cv::Mat uMat(uvHeight, uvWidth, CV_8UC1); + cv::Mat vMat(uvHeight, uvWidth, CV_8UC1); + for (int row = 0; row < uvHeight; row++) { + memcpy(uMat.ptr(row), i420Buffer.dataU + row * i420Buffer.strideU, uvWidth); + memcpy(vMat.ptr(row), i420Buffer.dataV + row * i420Buffer.strideV, uvWidth); + } + + // Blur Y plane (2x downscale for less blocky artifacts) + cv::Mat ySmall, yBlurredSmall, yBlurred; + int smallW = width / 2; + int smallH = height / 2; + cv::resize(yMat, ySmall, cv::Size(smallW, smallH), 0, 0, cv::INTER_AREA); + cv::stackBlur(ySmall, yBlurredSmall, cv::Size(31, 31)); + cv::resize(yBlurredSmall, yBlurred, cv::Size(width, height), 0, 0, + cv::INTER_LINEAR); + + // Blur U and V planes (they're already at half res, just blur directly) + cv::Mat uBlurred, vBlurred; + cv::stackBlur(uMat, uBlurred, cv::Size(15, 15)); + cv::stackBlur(vMat, vBlurred, cv::Size(15, 15)); + + // Downscale mask for UV blending (UV is half resolution) + cv::Mat uvMask; + cv::resize(fullMask, uvMask, cv::Size(uvWidth, uvHeight), 0, 0, + cv::INTER_LINEAR); + + // Blend Y plane: foreground stays sharp, background gets blurred + uint8_t *outY = outBuffer.mutableDataY; + int outYStride = outBuffer.strideY; + + for (int row = 0; row < height; row++) { + const uint8_t *srcY = yMat.ptr(row); + const uint8_t *blurY = yBlurred.ptr(row); + const float *maskRow = fullMask.ptr(row); + uint8_t *dstY = outY + row * outYStride; + + for (int col = 0; col < width; col++) { + float fg = maskRow[col]; + dstY[col] = + static_cast(blurY[col] * (1.0f - fg) + srcY[col] * fg); + } + } + + // Blend U plane + uint8_t *outU = outBuffer.mutableDataU; + int outUStride = outBuffer.strideU; + for (int row = 0; row < uvHeight; row++) { + const uint8_t *srcU = uMat.ptr(row); + const uint8_t *blurU = uBlurred.ptr(row); + const float *maskRow = uvMask.ptr(row); + uint8_t *dstU = outU + row * outUStride; + + for (int col = 0; col < uvWidth; col++) { + float fg = maskRow[col]; + dstU[col] = + static_cast(blurU[col] * (1.0f - fg) + srcU[col] * fg); + } + } + + // Blend V plane + uint8_t *outV = outBuffer.mutableDataV; + int outVStride = outBuffer.strideV; + for (int row = 0; row < uvHeight; row++) { + const uint8_t *srcV = vMat.ptr(row); + const uint8_t *blurV = vBlurred.ptr(row); + const float *maskRow = uvMask.ptr(row); + uint8_t *dstV = outV + row * outVStride; + + for (int col = 0; col < uvWidth; col++) { + float fg = maskRow[col]; + dstV[col] = + static_cast(blurV[col] * (1.0f - fg) + srcV[col] * fg); + } + } + + RTCVideoFrame *processedFrame = + [[RTCVideoFrame alloc] initWithBuffer:outBuffer + rotation:frame.rotation + timeStampNs:frame.timeStampNs]; + return processedFrame; +} + +@end diff --git a/packages/react-native-executorch-webrtc/ios/ExecutorchWebRTC.h b/packages/react-native-executorch-webrtc/ios/ExecutorchWebRTC.h new file mode 100644 index 0000000000..ddc6f1f28e --- /dev/null +++ b/packages/react-native-executorch-webrtc/ios/ExecutorchWebRTC.h @@ -0,0 +1,5 @@ +#import + +@interface ExecutorchWebRTC : NSObject + +@end diff --git a/packages/react-native-executorch-webrtc/ios/ExecutorchWebRTC.mm b/packages/react-native-executorch-webrtc/ios/ExecutorchWebRTC.mm new file mode 100644 index 0000000000..58d471d24f --- /dev/null +++ b/packages/react-native-executorch-webrtc/ios/ExecutorchWebRTC.mm @@ -0,0 +1,44 @@ +#import "ExecutorchWebRTC.h" +#import "ExecutorchFrameProcessor.h" +#import + +@implementation ExecutorchWebRTC + +RCT_EXPORT_MODULE() + +static BOOL _processorRegistered = NO; + ++ (void)registerProcessorIfNeeded { + if (!_processorRegistered) { + _processorRegistered = YES; + ExecutorchFrameProcessor *processor = + [ExecutorchFrameProcessor sharedInstance]; + [ProcessorProvider addProcessor:processor + forName:@"executorchBackgroundBlur"]; + NSLog(@"[ExecutorchWebRTC] Registered executorchBackgroundBlur processor"); + } +} + +RCT_EXPORT_METHOD(setup) { [ExecutorchWebRTC registerProcessorIfNeeded]; } + +RCT_EXPORT_METHOD(configureBackgroundRemoval : (NSString *)modelPath) { + NSLog(@"[ExecutorchWebRTC] configureBackgroundRemoval: %@", modelPath); + + [ExecutorchWebRTC registerProcessorIfNeeded]; + + // Remove file:// prefix if present + NSString *cleanPath = modelPath; + if ([modelPath hasPrefix:@"file://"]) { + cleanPath = [modelPath substringFromIndex:7]; + } + + [[ExecutorchFrameProcessor sharedInstance] configureWithModelPath:cleanPath]; +} + +RCT_EXPORT_METHOD(configureBackgroundBlur : (NSString *) + modelPath blurIntensity : (int)intensity) { + // Legacy alias + [self configureBackgroundRemoval:modelPath]; +} + +@end diff --git a/packages/react-native-executorch-webrtc/package.json b/packages/react-native-executorch-webrtc/package.json index 6abc74ae06..9ffe2c60d8 100644 --- a/packages/react-native-executorch-webrtc/package.json +++ b/packages/react-native-executorch-webrtc/package.json @@ -13,7 +13,9 @@ "files": [ "lib", "android", + "ios", "react-native.config.js", + "react-native-executorch-webrtc.podspec", "README.md" ], "license": "MIT", diff --git a/packages/react-native-executorch-webrtc/react-native-executorch-webrtc.podspec b/packages/react-native-executorch-webrtc/react-native-executorch-webrtc.podspec new file mode 100644 index 0000000000..025b520397 --- /dev/null +++ b/packages/react-native-executorch-webrtc/react-native-executorch-webrtc.podspec @@ -0,0 +1,33 @@ +require "json" + +package = JSON.parse(File.read(File.join(__dir__, "package.json"))) + +Pod::Spec.new do |s| + s.name = "react-native-executorch-webrtc" + s.version = package["version"] + s.summary = package["description"] + s.homepage = package["repository"]["url"] + s.license = package["license"] + s.authors = "Software Mansion" + + s.platforms = { :ios => "13.0" } + s.source = { :git => package["repository"]["url"], :tag => "#{s.version}" } + + s.source_files = "ios/**/*.{h,m,mm}" + + # react-native-executorch exposes rnexecutorch/* headers via its header_dir. + # However, executorch SDK headers (executorch/*) from third-party/include + # don't propagate to dependent pods, so we need to add them here. + rne_pod_root = '"$(PODS_ROOT)/react-native-executorch"' + + s.pod_target_xcconfig = { + "USE_HEADERMAP" => "YES", + "CLANG_CXX_LANGUAGE_STANDARD" => "c++20", + "HEADER_SEARCH_PATHS" => "#{rne_pod_root}/third-party/include" + } + + s.dependency "React-Core" + s.dependency "react-native-executorch" + s.dependency "react-native-webrtc" + s.dependency "opencv-rne", "~> 4.11.0" +end diff --git a/packages/react-native-executorch-webrtc/react-native.config.js b/packages/react-native-executorch-webrtc/react-native.config.js index a8b172a218..f2400c7368 100644 --- a/packages/react-native-executorch-webrtc/react-native.config.js +++ b/packages/react-native-executorch-webrtc/react-native.config.js @@ -7,9 +7,7 @@ module.exports = { 'import com.executorch.webrtc.ExecutorchWebRTCPackage;', packageInstance: 'new ExecutorchWebRTCPackage()', }, - ios: { - // iOS support coming soon - }, + ios: {}, }, }, }; diff --git a/packages/react-native-executorch-webrtc/src/index.ts b/packages/react-native-executorch-webrtc/src/index.ts index 0ec095c457..74cedab460 100644 --- a/packages/react-native-executorch-webrtc/src/index.ts +++ b/packages/react-native-executorch-webrtc/src/index.ts @@ -11,7 +11,7 @@ import { NativeModules, Platform } from 'react-native'; // Auto-initialize the native module to register the processor // This happens when the package is first imported -if (Platform.OS === 'android') { +if (Platform.OS === 'android' || Platform.OS === 'ios') { const { ExecutorchWebRTC } = NativeModules; if (ExecutorchWebRTC) { try { @@ -31,9 +31,9 @@ if (Platform.OS === 'android') { * @param modelPath Path to the selfie segmentation model (.pte file) */ export function configureBackgroundRemoval(modelPath: string): void { - if (Platform.OS !== 'android') { + if (Platform.OS !== 'android' && Platform.OS !== 'ios') { console.warn( - 'configureBackgroundRemoval: Currently only supported on Android' + 'configureBackgroundRemoval: Only supported on Android and iOS' ); return; } diff --git a/packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts b/packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts index a303647d6c..a32126322e 100644 --- a/packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts +++ b/packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts @@ -2,7 +2,7 @@ import { useEffect } from 'react'; import { Platform, DeviceEventEmitter } from 'react-native'; import type { MediaStream, MediaStreamTrack } from 'react-native-webrtc'; -const PROCESSOR_NAME = 'executorch'; +const PROCESSOR_NAME = 'executorchBackgroundBlur'; /** * Result from frame processing @@ -52,14 +52,6 @@ export function useWebRTCFrameProcessor( return; } - // Only supported on Android for now - if (Platform.OS !== 'android') { - console.warn( - 'useWebRTCFrameProcessor: Currently only supported on Android' - ); - return; - } - const videoTracks = stream.getVideoTracks(); if (videoTracks.length === 0) { console.warn('useWebRTCFrameProcessor: No video tracks found in stream'); From cf78e9111b8446a89d74fdf5c46f74105049a69d Mon Sep 17 00:00:00 2001 From: chmjkb Date: Tue, 14 Apr 2026 08:06:31 +0200 Subject: [PATCH 04/19] feat: android gpu acceleration --- .../src/main/cpp/FrameProcessorBridge.cpp | 206 ++++++++++- .../webrtc/ExecutorchFrameProcessor.kt | 338 ++++++++---------- .../com/executorch/webrtc/ExecutorchWebRTC.kt | 19 +- .../webrtc/ExecutorchWebRTCModule.kt | 16 + .../webrtc/NewExecutorchFrameProcessor.kt | 63 ++++ .../NewExecutorchFrameProcessorFactory.kt | 8 + .../executorch/webrtc/gl/FullscreenQuad.java | 39 ++ .../executorch/webrtc/gl/GlBlurRenderer.java | 320 +++++++++++++++++ .../executorch/webrtc/gl/GlFramebuffer.java | 66 ++++ .../com/executorch/webrtc/gl/GlProgram.java | 83 +++++ .../src/index.ts | 3 + .../src/useWebRTCFrameProcessor.ts | 38 +- 12 files changed, 984 insertions(+), 215 deletions(-) create mode 100644 packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessor.kt create mode 100644 packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessorFactory.kt create mode 100644 packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/FullscreenQuad.java create mode 100644 packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java create mode 100644 packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlFramebuffer.java create mode 100644 packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlProgram.java diff --git a/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp b/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp index 7c6cb85a23..868a5e52f5 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp +++ b/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp @@ -104,9 +104,10 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_loadModel( * @param yStride Y plane stride * @param uvStride U/V plane stride * @param rotation Frame rotation in degrees (0, 90, 180, 270) - * @return Modified Y plane with background blurred (or null on error) + * @return Array of 3 byte arrays [Y, U, V] with background blurred (or null on + * error) */ -JNIEXPORT jbyteArray JNICALL +JNIEXPORT jobjectArray JNICALL Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( JNIEnv *env, jobject thiz, jbyteArray yData, jbyteArray uData, jbyteArray vData, jint width, jint height, jint yStride, jint uvStride, @@ -123,6 +124,8 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( if (!yPtr || !uPtr || !vPtr) { LOGE("Failed to get buffer pointers"); + // I'm not sure why we're releasing this here, I mean this is still used in + // the C++ for regular frames, no? Or we are copying? if (yPtr) env->ReleaseByteArrayElements(yData, yPtr, JNI_ABORT); if (uPtr) @@ -133,6 +136,7 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( } // Determine actual stride based on buffer sizes + // what the fuck is stride? int actualYStride = (yDataSize >= yStride * height) ? yStride : width; int actualUVStride = (uDataSize >= uvStride * (height / 2)) ? uvStride : (width / 2); @@ -148,9 +152,11 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( lastBufferLogTime = now; } - // Create output Y buffer with actual stride + // Create output buffers for Y, U, V jbyteArray outYData = env->NewByteArray(actualYStride * height); - if (!outYData) { + jbyteArray outUData = env->NewByteArray(actualUVStride * (height / 2)); + jbyteArray outVData = env->NewByteArray(actualUVStride * (height / 2)); + if (!outYData || !outUData || !outVData) { env->ReleaseByteArrayElements(yData, yPtr, JNI_ABORT); env->ReleaseByteArrayElements(uData, uPtr, JNI_ABORT); env->ReleaseByteArrayElements(vData, vPtr, JNI_ABORT); @@ -184,6 +190,8 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( // Convert to RGB cv::Mat rgbFull; cv::cvtColor(i420, rgbFull, cv::COLOR_YUV2RGB_I420); + // Im not sure why we need all the copying, cant we just do sumn like this? + // cv::cvtColor(i420, rgbFull, cv::COLOR_YUV2RGB); // Rotate image to upright for model inference cv::Mat rgbRotated; @@ -321,7 +329,35 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( cv::resize(yBlurredSmall, yBlurred, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); - // Blend: foreground (mask=1) uses original, background (mask=0) uses blurred + // Create U and V mats from input (packed, no stride padding) + cv::Mat uMat(uvHeight, uvWidth, CV_8UC1); + cv::Mat vMat(uvHeight, uvWidth, CV_8UC1); + for (int row = 0; row < uvHeight; row++) { + memcpy(uMat.ptr(row), uSrc + row * actualUVStride, uvWidth); + memcpy(vMat.ptr(row), vSrc + row * actualUVStride, uvWidth); + } + + // Blur U and V using same downscale-blur-upscale approach for performance + // U/V are already at half res, so 2x downscale = quarter res + cv::Mat uSmall, vSmall, uBlurredSmall, vBlurredSmall, uBlurred, vBlurred; + int uvSmallW = uvWidth / 2; + int uvSmallH = uvHeight / 2; + cv::resize(uMat, uSmall, cv::Size(uvSmallW, uvSmallH), 0, 0, cv::INTER_AREA); + cv::resize(vMat, vSmall, cv::Size(uvSmallW, uvSmallH), 0, 0, cv::INTER_AREA); + cv::stackBlur(uSmall, uBlurredSmall, cv::Size(11, 11)); + cv::stackBlur(vSmall, vBlurredSmall, cv::Size(11, 11)); + cv::resize(uBlurredSmall, uBlurred, cv::Size(uvWidth, uvHeight), 0, 0, + cv::INTER_LINEAR); + cv::resize(vBlurredSmall, vBlurred, cv::Size(uvWidth, uvHeight), 0, 0, + cv::INTER_LINEAR); + + // Downscale mask for UV blending (UV is half resolution) + cv::Mat uvMask; + cv::resize(fullMask, uvMask, cv::Size(uvWidth, uvHeight), 0, 0, + cv::INTER_LINEAR); + + // Blend Y: foreground (mask=1) uses original, background (mask=0) uses + // blurred std::vector outY(actualYStride * height); for (int row = 0; row < height; row++) { @@ -341,30 +377,166 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_processI420Frame( } } + // Blend U plane + std::vector outU(actualUVStride * uvHeight); + for (int row = 0; row < uvHeight; row++) { + const uint8_t *srcU = uMat.ptr(row); + const uint8_t *blurU = uBlurred.ptr(row); + const float *maskRow = uvMask.ptr(row); + uint8_t *dstU = outU.data() + row * actualUVStride; + + for (int col = 0; col < uvWidth; col++) { + float prob = maskRow[col]; + dstU[col] = + static_cast(blurU[col] * (1.0f - prob) + srcU[col] * prob); + } + if (actualUVStride > uvWidth) { + memcpy(dstU + uvWidth, uSrc + row * actualUVStride + uvWidth, + actualUVStride - uvWidth); + } + } + + // Blend V plane + std::vector outV(actualUVStride * uvHeight); + for (int row = 0; row < uvHeight; row++) { + const uint8_t *srcV = vMat.ptr(row); + const uint8_t *blurV = vBlurred.ptr(row); + const float *maskRow = uvMask.ptr(row); + uint8_t *dstV = outV.data() + row * actualUVStride; + + for (int col = 0; col < uvWidth; col++) { + float prob = maskRow[col]; + dstV[col] = + static_cast(blurV[col] * (1.0f - prob) + srcV[col] * prob); + } + if (actualUVStride > uvWidth) { + memcpy(dstV + uvWidth, vSrc + row * actualUVStride + uvWidth, + actualUVStride - uvWidth); + } + } + + // Copy data to output arrays env->SetByteArrayRegion(outYData, 0, actualYStride * height, reinterpret_cast(outY.data())); + env->SetByteArrayRegion(outUData, 0, actualUVStride * uvHeight, + reinterpret_cast(outU.data())); + env->SetByteArrayRegion(outVData, 0, actualUVStride * uvHeight, + reinterpret_cast(outV.data())); env->ReleaseByteArrayElements(yData, yPtr, JNI_ABORT); env->ReleaseByteArrayElements(uData, uPtr, JNI_ABORT); env->ReleaseByteArrayElements(vData, vPtr, JNI_ABORT); - return outYData; + // Create result array of 3 byte arrays [Y, U, V] + jclass byteArrayClass = env->FindClass("[B"); + jobjectArray result = env->NewObjectArray(3, byteArrayClass, nullptr); + env->SetObjectArrayElement(result, 0, outYData); + env->SetObjectArrayElement(result, 1, outUData); + env->SetObjectArrayElement(result, 2, outVData); + + return result; } -// Keep old method for compatibility -JNIEXPORT jfloatArray JNICALL +/** + * Run segmentation on RGBA pixels, returns grayscale mask (0-255 bytes). + * Used by GL-based blur pipeline. + */ +JNIEXPORT jbyteArray JNICALL Java_com_executorch_webrtc_ExecutorchFrameProcessor_runSegmentation( - JNIEnv *env, jobject thiz, jbyteArray rgbData, jint width, jint height) { - LOGD("runSegmentation called (deprecated path): %dx%d", width, height); + JNIEnv *env, jobject thiz, jbyteArray rgbaData, jint width, jint height, + jint rotation) { - const int maskSize = width * height; - jfloatArray result = env->NewFloatArray(maskSize); - if (!result) + if (!g_modelLoaded || !g_segmentation) { + LOGE("Model not loaded, cannot run segmentation"); return nullptr; + } - std::vector mask(maskSize, 0.5f); - env->SetFloatArrayRegion(result, 0, maskSize, mask.data()); - return result; -} + jbyte *rgbaPtr = env->GetByteArrayElements(rgbaData, nullptr); + if (!rgbaPtr) { + LOGE("Failed to get RGBA data pointer"); + return nullptr; + } + + try { + // Convert RGBA to RGB (OpenCV expects BGR, but we'll convert to RGB for + // model) + cv::Mat rgba(height, width, CV_8UC4, reinterpret_cast(rgbaPtr)); + cv::Mat rgb; + cv::cvtColor(rgba, rgb, cv::COLOR_RGBA2RGB); + + // Apply rotation for model inference + cv::Mat rgbRotated; + if (rotation == 90) { + cv::rotate(rgb, rgbRotated, cv::ROTATE_90_CLOCKWISE); + } else if (rotation == 180) { + cv::rotate(rgb, rgbRotated, cv::ROTATE_180); + } else if (rotation == 270) { + cv::rotate(rgb, rgbRotated, cv::ROTATE_90_COUNTERCLOCKWISE); + } else { + rgbRotated = rgb; + } + + // Run segmentation model + JSTensorViewIn pixelData; + pixelData.dataPtr = rgbRotated.data; + pixelData.sizes = {rgbRotated.rows, rgbRotated.cols, 3}; + pixelData.scalarType = executorch::aten::ScalarType::Byte; + + std::set> classesOfInterest = {"foreground"}; + auto result = + g_segmentation->generateFromPixels(pixelData, classesOfInterest, false); + + // Extract mask + cv::Mat mask; + if (result.classBuffers && result.classBuffers->count("foreground")) { + auto &fgBuffer = result.classBuffers->at("foreground"); + auto *fgData = reinterpret_cast(fgBuffer->data()); + mask = cv::Mat(g_modelHeight, g_modelWidth, CV_32FC1, fgData).clone(); + } else { + LOGE("No foreground mask in result"); + env->ReleaseByteArrayElements(rgbaData, rgbaPtr, JNI_ABORT); + return nullptr; + } + // Rotate mask back to original orientation + cv::Mat maskRotated; + if (rotation == 90) { + cv::rotate(mask, maskRotated, cv::ROTATE_90_COUNTERCLOCKWISE); + } else if (rotation == 180) { + cv::rotate(mask, maskRotated, cv::ROTATE_180); + } else if (rotation == 270) { + cv::rotate(mask, maskRotated, cv::ROTATE_90_CLOCKWISE); + } else { + maskRotated = mask; + } + + // Resize mask to input dimensions + cv::Mat maskResized; + cv::resize(maskRotated, maskResized, cv::Size(width, height), 0, 0, + cv::INTER_LINEAR); + + // Convert float mask (0-1) to bytes (0-255) + cv::Mat maskBytes; + maskResized.convertTo(maskBytes, CV_8UC1, 255.0); + + // Create output array + const int maskSize = width * height; + jbyteArray output = env->NewByteArray(maskSize); + if (!output) { + env->ReleaseByteArrayElements(rgbaData, rgbaPtr, JNI_ABORT); + return nullptr; + } + + env->SetByteArrayRegion(output, 0, maskSize, + reinterpret_cast(maskBytes.data)); + + env->ReleaseByteArrayElements(rgbaData, rgbaPtr, JNI_ABORT); + return output; + + } catch (const std::exception &e) { + LOGE("Segmentation failed: %s", e.what()); + env->ReleaseByteArrayElements(rgbaData, rgbaPtr, JNI_ABORT); + return nullptr; + } +} } // extern "C" diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt index 558dcc577d..c28f648670 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt @@ -1,248 +1,224 @@ package com.executorch.webrtc +import android.graphics.Matrix import android.util.Log +import com.executorch.webrtc.gl.GlBlurRenderer import com.oney.WebRTCModule.videoEffects.VideoFrameProcessor import org.webrtc.SurfaceTextureHelper +import org.webrtc.TextureBufferImpl import org.webrtc.VideoFrame +import org.webrtc.YuvConverter import java.nio.ByteBuffer +import java.nio.ByteOrder /** - * WebRTC frame processor that applies background blur using ExecuTorch segmentation. + * WebRTC frame processor that applies background blur using GPU shaders + ExecuTorch segmentation. + * Uses OpenGL for blur (fast) and JNI for segmentation. */ class ExecutorchFrameProcessor : VideoFrameProcessor { private var frameCount = 0 private var lastLogTime = System.currentTimeMillis() - private var lastProcessTime = System.currentTimeMillis() private val TAG = "ExecutorchFrameProcessor" - // Model state private var modelLoaded = false private var loadedModelPath: String? = null - companion object { - init { - try { - System.loadLibrary("react-native-executorch-webrtc") - } catch (e: Exception) { - Log.e("ExecutorchFrameProcessor", "Failed to load native library", e) - } - } - } + // GL-based blur renderer + private var renderer: GlBlurRenderer? = null + private var yuvConverter: YuvConverter? = null + + // Reusable buffers + private var maskByteBuffer: ByteBuffer? = null + private var rgbaBuffer: ByteArray? = null init { - Log.d(TAG, "ExecutorchFrameProcessor created - background removal enabled") - tryLoadModel() + Log.d(TAG, "ExecutorchFrameProcessor created - GL blur pipeline") tryLoadModel() } - - /** - * Try to load the model if not already loaded and path is available. - * Called from init and on each frame to handle late model configuration. - */ private fun tryLoadModel() { - val configuredPath = ExecutorchWebRTC.modelPath - - // Skip if no path configured or already loaded this path - if (configuredPath == null) { - return - } - - if (modelLoaded && loadedModelPath == configuredPath) { - return - } + val configuredPath = ExecutorchWebRTC.modelPath ?: return + if (modelLoaded && loadedModelPath == configuredPath) return try { - Log.d(TAG, "Loading segmentation model from: $configuredPath") + Log.d(TAG, "Loading segmentation model: $configuredPath") val success = loadModel(configuredPath) if (success) { modelLoaded = true loadedModelPath = configuredPath - Log.d(TAG, "✅ Segmentation model loaded successfully!") - } else { - Log.e(TAG, "❌ loadModel returned false") + Log.d(TAG, "Model loaded successfully!") } } catch (e: Exception) { - Log.e(TAG, "❌ Failed to load model: $configuredPath", e) + Log.e(TAG, "Failed to load model", e) } } - /** - * Load the segmentation model - */ + // JNI: Load the segmentation model private external fun loadModel(modelPath: String): Boolean - /** - * Process I420 frame directly in native code - much faster than the old path. - * Does segmentation and mask application in one JNI call. - * @return Modified Y plane with background blacked out - */ - private external fun processI420Frame( - yData: ByteArray, - uData: ByteArray, - vData: ByteArray, + // JNI: Run segmentation on RGBA pixels, returns grayscale mask + private external fun runSegmentation( + rgbaData: ByteArray, width: Int, height: Int, - yStride: Int, - uvStride: Int, - rotation: Int, + rotation: Int ): ByteArray? - override fun process( - frame: VideoFrame, - helper: SurfaceTextureHelper, - ): VideoFrame { + override fun process(frame: VideoFrame, helper: SurfaceTextureHelper): VideoFrame { frameCount++ + if (!modelLoaded) tryLoadModel() - // Try to load model if not loaded yet (handles late configuration) - if (!modelLoaded) { - tryLoadModel() - } - - // Log frame info every second + // Log stats every second val now = System.currentTimeMillis() if (now - lastLogTime >= 1000) { - val buffer = frame.buffer - Log.d( - TAG, - """ - ========== FRAME INFO ========== - Frame count: $frameCount - Size: ${buffer.width}x${buffer.height} - Rotation: ${frame.rotation} degrees - Buffer type: ${buffer.javaClass.simpleName} - FPS: ${frameCount / ((now - lastLogTime) / 1000.0)} - Background Blur: ACTIVE - ================================ - """.trimIndent(), - ) - + Log.d(TAG, "FPS: ${frameCount}, buffer: ${frame.buffer.javaClass.simpleName}") lastLogTime = now frameCount = 0 } - // Apply background blur - val blurredFrame = processWithModel(frame) - - if (blurredFrame != null) { - lastProcessTime = now - if (frameCount % 30 == 0) { - Log.d(TAG, "Returning PROCESSED frame (rotation=${blurredFrame.rotation}, timestamp=${blurredFrame.timestampNs})") - } - // Return the blurred frame - return blurredFrame + val buffer = frame.buffer + if (buffer !is VideoFrame.TextureBuffer) { + // Not a texture buffer, return original + frame.retain() + return frame } - // Fallback: return original frame if processing failed - if (frameCount % 30 == 0) { - Log.w(TAG, "Returning ORIGINAL frame (processing returned null)") + return try { + processWithGl(frame, buffer, helper) + } catch (e: Exception) { + Log.e(TAG, "GL processing failed: ${e.message}") + frame.retain() + frame } - frame.retain() - return frame } - private fun processWithModel(frame: VideoFrame): VideoFrame? { - val i420Buffer = frame.buffer.toI420() - if (i420Buffer == null) { - Log.e(TAG, "Failed to convert frame buffer to I420!") - return null + private fun processWithGl( + frame: VideoFrame, + textureBuffer: VideoFrame.TextureBuffer, + helper: SurfaceTextureHelper + ): VideoFrame { + val width = textureBuffer.width + val height = textureBuffer.height + + // Initialize renderer if needed + if (renderer == null) { + renderer = GlBlurRenderer() } + renderer!!.ensureSetup(width, height) - try { - val width = i420Buffer.width - val height = i420Buffer.height - - // Extract Y, U, V planes as byte arrays - val yPlane = i420Buffer.dataY - val uPlane = i420Buffer.dataU - val vPlane = i420Buffer.dataV - val yStride = i420Buffer.strideY - val uStride = i420Buffer.strideU - val vStride = i420Buffer.strideV - - // Calculate sizes - use minimum of calculated size and available bytes - val uvHeight = height / 2 - val yCalcSize = yStride * height - val uCalcSize = uStride * uvHeight - val vCalcSize = vStride * uvHeight - - val yAvail = yPlane.remaining() - val uAvail = uPlane.remaining() - val vAvail = vPlane.remaining() - - val ySize = minOf(yCalcSize, yAvail) - val uSize = minOf(uCalcSize, uAvail) - val vSize = minOf(vCalcSize, vAvail) - - // Log buffer info occasionally for debugging - if (frameCount % 60 == 0) { - Log.d( - TAG, - "Buffer info: Y=$ySize/$yAvail (stride=$yStride), U=$uSize/$uAvail (stride=$uStride), V=$vSize/$vAvail (stride=$vStride), ${width}x$height", - ) - } + if (yuvConverter == null) { + yuvConverter = YuvConverter() + } - val yData = ByteArray(ySize) - val uData = ByteArray(uSize) - val vData = ByteArray(vSize) + // Convert transform matrix for GL + val transformMatrix = convertToGlMatrix(textureBuffer.transformMatrix) + val isOes = textureBuffer.type == VideoFrame.TextureBuffer.Type.OES - yPlane.get(yData) - uPlane.get(uData) - vPlane.get(vData) + // 1. Render input texture to RGBA FBO + renderer!!.renderToRgbaFbo(textureBuffer.textureId, transformMatrix, isOes) - // Process in native - returns modified Y plane - // Pass rotation so native code can rotate image before model inference - val processedY = processI420Frame(yData, uData, vData, width, height, yStride, uStride, frame.rotation) + // 2. Render downscaled for segmentation + renderer!!.renderDownscaled() - if (processedY == null) { - Log.e(TAG, "processI420Frame returned null!") - i420Buffer.release() - return null - } + // 3. Read pixels for segmentation + val segPixels = renderer!!.readSegmentationPixels() + val segW = renderer!!.segmentationWidth + val segH = renderer!!.segmentationHeight + + // 4. Run segmentation (via JNI) + val mask = runSegmentationOnPixels(segPixels, segW, segH, frame.rotation) + + // 5. Upload mask to GPU + if (mask != null) { + ensureMaskBuffer(mask.size) + maskByteBuffer!!.clear() + maskByteBuffer!!.put(mask) + maskByteBuffer!!.rewind() + renderer!!.uploadMask(maskByteBuffer!!, segW, segH) + } + + // 6. Render blur (two-pass Gaussian) + renderer!!.renderBlur() - // Calculate actual Y stride from returned data - val actualYStride = processedY.size / height + // 7. Render composite (blend original + blurred using mask) + renderer!!.renderComposite() + + // 8. Create output TextureBuffer + val outputBuffer = TextureBufferImpl( + width, height, + VideoFrame.TextureBuffer.Type.RGB, + renderer!!.outputTextureId, + Matrix(), + helper.handler, + yuvConverter, + null + ) + + return VideoFrame(outputBuffer, frame.rotation, frame.timestampNs) + } + + private fun runSegmentationOnPixels(pixels: ByteBuffer, width: Int, height: Int, rotation: Int): ByteArray? { + if (!modelLoaded) { + // Return placeholder ellipse mask if model not loaded + return createEllipseMask(width, height) + } - // Log success occasionally - if (frameCount % 30 == 0) { - Log.d(TAG, "Frame processed: ${width}x$height, processedY=${processedY.size}, actualYStride=$actualYStride") + // Convert ByteBuffer to ByteArray for JNI + val size = width * height * 4 + if (rgbaBuffer == null || rgbaBuffer!!.size < size) { + rgbaBuffer = ByteArray(size) + } + pixels.rewind() + pixels.get(rgbaBuffer!!, 0, size) + + return runSegmentation(rgbaBuffer!!, width, height, rotation) + } + + private fun createEllipseMask(width: Int, height: Int): ByteArray { + val mask = ByteArray(width * height) + val centerX = width / 2f + val centerY = height / 2f + val radiusX = width * 0.4f + val radiusY = height * 0.45f + + for (y in 0 until height) { + for (x in 0 until width) { + val dx = (x - centerX) / radiusX + val dy = (y - centerY) / radiusY + val dist = dx * dx + dy * dy + val value = when { + dist < 1.0f -> 255 + dist < 1.3f -> ((1.0f - (dist - 1.0f) / 0.3f) * 255).toInt() + else -> 0 + } + mask[y * width + x] = value.toByte() } + } + return mask + } - // Create output buffers - // For Y: use processed data with calculated stride - // For U/V: keep original data and strides (we don't modify chroma) - val outYPlane = ByteBuffer.allocateDirect(processedY.size) - val outUPlane = ByteBuffer.allocateDirect(uSize) - val outVPlane = ByteBuffer.allocateDirect(vSize) - - outYPlane.put(processedY) - outUPlane.put(uData) - outVPlane.put(vData) - - outYPlane.rewind() - outUPlane.rewind() - outVPlane.rewind() - - // Use original U/V strides since we're passing through the original chroma data - val resultBuffer = - org.webrtc.JavaI420Buffer.wrap( - width, - height, - outYPlane, - actualYStride, - outUPlane, - uStride, - outVPlane, - vStride, - null, - ) - - i420Buffer.release() - return VideoFrame(resultBuffer, frame.rotation, frame.timestampNs) - } catch (e: Exception) { - Log.e(TAG, "Exception in processWithModel: ${e.message}", e) - i420Buffer.release() - return null + private fun ensureMaskBuffer(size: Int) { + if (maskByteBuffer == null || maskByteBuffer!!.capacity() < size) { + maskByteBuffer = ByteBuffer.allocateDirect(size).order(ByteOrder.nativeOrder()) } } + + private fun convertToGlMatrix(androidMatrix: android.graphics.Matrix): FloatArray { + val values = FloatArray(9) + androidMatrix.getValues(values) + // Convert 3x3 to 4x4 matrix for GL + return floatArrayOf( + values[0], values[3], 0f, values[6], + values[1], values[4], 0f, values[7], + 0f, 0f, 1f, 0f, + values[2], values[5], 0f, values[8] + ) + } + + fun release() { + renderer?.release() + renderer = null + yuvConverter?.release() + yuvConverter = null + } } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt index 43adf79637..61084187ac 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt @@ -9,21 +9,30 @@ import com.oney.WebRTCModule.videoEffects.ProcessorProvider */ object ExecutorchWebRTC { private const val TAG = "ExecutorchWebRTC" - private const val PROCESSOR_NAME = "executorchBackgroundBlur" + const val PROCESSOR_NAME = "executorchBackgroundBlur" + const val PROCESSOR_NAME_NEW = "executorchBackgroundBlurNew" // Configuration for background removal var modelPath: String? = null /** - * Registers the ExecuTorch frame processor with react-native-webrtc. - * Call this in your Application.onCreate() method. + * Registers both frame processors with react-native-webrtc. + * - "executorchBackgroundBlur" -> existing GL-based processor + * - "executorchBackgroundBlurNew" -> new experimental processor */ fun registerProcessors() { try { ProcessorProvider.addProcessor(PROCESSOR_NAME, ExecutorchFrameProcessorFactory()) - Log.d(TAG, "✅ ExecuTorch frame processor registered successfully") + Log.d(TAG, "✅ Registered processor: $PROCESSOR_NAME") } catch (e: Exception) { - Log.e(TAG, "❌ Failed to register ExecuTorch processor", e) + Log.e(TAG, "❌ Failed to register $PROCESSOR_NAME", e) + } + + try { + ProcessorProvider.addProcessor(PROCESSOR_NAME_NEW, NewExecutorchFrameProcessorFactory()) + Log.d(TAG, "✅ Registered processor: $PROCESSOR_NAME_NEW") + } catch (e: Exception) { + Log.e(TAG, "❌ Failed to register $PROCESSOR_NAME_NEW", e) } } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt index 9e5e6431bb..eead8f9be5 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt @@ -16,10 +16,16 @@ class ExecutorchWebRTCModule( reactContext: ReactApplicationContext, ) : ReactContextBaseJavaModule(reactContext) { companion object { + init { + System.loadLibrary("executorch") + System.loadLibrary("react-native-executorch-webrtc") + + } const val NAME = "ExecutorchWebRTC" private var initialized = false private var moduleContext: ReactApplicationContext? = null + /** * Send event to JavaScript */ @@ -71,4 +77,14 @@ class ExecutorchWebRTCModule( ) { ExecutorchWebRTC.configureModel(modelPath) } + + /** + * Get available processor names for use with videoTrack._setVideoEffects() + */ + override fun getConstants(): MutableMap { + return mutableMapOf( + "PROCESSOR_NAME" to ExecutorchWebRTC.PROCESSOR_NAME, + "PROCESSOR_NAME_NEW" to ExecutorchWebRTC.PROCESSOR_NAME_NEW + ) + } } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessor.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessor.kt new file mode 100644 index 0000000000..e841f5cbf6 --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessor.kt @@ -0,0 +1,63 @@ +package com.executorch.webrtc + +import android.util.Log +import com.oney.WebRTCModule.videoEffects.VideoFrameProcessor +import org.webrtc.SurfaceTextureHelper +import org.webrtc.VideoFrame +import org.webrtc.YuvConverter + +class NewExecutorchFrameProcessor : VideoFrameProcessor{ + private val TAG = "ExecuTorchBlurFrameProcessor" + private val lastFrame: VideoFrame? = null + private val yuvConverter = YuvConverter() + private var isModelLoaded = false + private lateinit var blurRadius: Number; + + // JNI: Load the segmentation model +//private external fun loadModel(modelPath: String): Boolean + + fun setBlurRadius(blurRadius: Number) {this.blurRadius = blurRadius} + + fun tryLoadModel(modelPath: String) { + // TODO: maybe we should throw here + if (isModelLoaded) return + try { + Log.d(TAG, "Loading selfie segmentation model with model path: $modelPath") + // val success = loadModel(modelPath) + val success = true; + if (success) { + this.isModelLoaded = true + Log.d(TAG, "Successfully loaded selfie segmentation model") + } else { + this.isModelLoaded = false + Log.e(TAG, "Failed to load selfie segmentation model with model path: $modelPath") + } + } catch (e: Exception) { + Log.e(TAG, "Failed to load model", e) + } + } + + private fun internalProcessFrame( + frame: VideoFrame.Buffer + ){ + + } + + override fun process( + frame: VideoFrame, + textureHelper: SurfaceTextureHelper + ): VideoFrame { + if (!this.isModelLoaded) { + Log.d(TAG, "The model was not initialized properly," + + " make sure to run tryLoadModel() before using the frame processor.") + frame.retain() + return frame; + } + + val buf = frame.buffer + val outputFrame = this.internalProcessFrame(buf) + + frame.retain() + return frame + } +} diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessorFactory.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessorFactory.kt new file mode 100644 index 0000000000..0a708b5601 --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessorFactory.kt @@ -0,0 +1,8 @@ +package com.executorch.webrtc + +import com.oney.WebRTCModule.videoEffects.VideoFrameProcessor +import com.oney.WebRTCModule.videoEffects.VideoFrameProcessorFactoryInterface + +class NewExecutorchFrameProcessorFactory : VideoFrameProcessorFactoryInterface { + override fun build(): VideoFrameProcessor = NewExecutorchFrameProcessor() +} diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/FullscreenQuad.java b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/FullscreenQuad.java new file mode 100644 index 0000000000..9f9f8f86dc --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/FullscreenQuad.java @@ -0,0 +1,39 @@ +package com.executorch.webrtc.gl; + +import android.opengl.GLES20; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.FloatBuffer; + +public class FullscreenQuad { + private static final float[] QUAD_COORDS = {-1f, -1f, 1f, -1f, -1f, 1f, 1f, 1f}; + private static final float[] TEX_COORDS = {0f, 0f, 1f, 0f, 0f, 1f, 1f, 1f}; + + private final FloatBuffer quadBuffer = createFloatBuffer(QUAD_COORDS); + private final FloatBuffer texBuffer = createFloatBuffer(TEX_COORDS); + + public void draw(GlProgram program) { + int posLoc = program.getAttributeLocation("aPosition"); + int texLoc = program.getAttributeLocation("aTexCoord"); + + quadBuffer.position(0); + texBuffer.position(0); + + GLES20.glEnableVertexAttribArray(posLoc); + GLES20.glVertexAttribPointer(posLoc, 2, GLES20.GL_FLOAT, false, 0, quadBuffer); + GLES20.glEnableVertexAttribArray(texLoc); + GLES20.glVertexAttribPointer(texLoc, 2, GLES20.GL_FLOAT, false, 0, texBuffer); + GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4); + GLES20.glDisableVertexAttribArray(posLoc); + GLES20.glDisableVertexAttribArray(texLoc); + } + + private static FloatBuffer createFloatBuffer(float[] data) { + ByteBuffer bb = ByteBuffer.allocateDirect(data.length * 4); + bb.order(ByteOrder.nativeOrder()); + FloatBuffer fb = bb.asFloatBuffer(); + fb.put(data); + fb.position(0); + return fb; + } +} diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java new file mode 100644 index 0000000000..f186310a62 --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java @@ -0,0 +1,320 @@ +package com.executorch.webrtc.gl; + +import android.opengl.GLES11Ext; +import android.opengl.GLES20; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +public class GlBlurRenderer { + + private static final String VERTEX_SHADER = + "attribute vec4 aPosition;\n" + + "attribute vec2 aTexCoord;\n" + + "varying vec2 vTexCoord;\n" + + "uniform mat4 uTexMatrix;\n" + + "void main() {\n" + + " gl_Position = aPosition;\n" + + " vTexCoord = (uTexMatrix * vec4(aTexCoord, 0.0, 1.0)).xy;\n" + + "}\n"; + + private static final String VERTEX_SHADER_SIMPLE = + "attribute vec4 aPosition;\n" + + "attribute vec2 aTexCoord;\n" + + "varying vec2 vTexCoord;\n" + + "void main() {\n" + + " gl_Position = aPosition;\n" + + " vTexCoord = aTexCoord;\n" + + "}\n"; + + private static final String FRAGMENT_OES_TO_RGBA = + "#extension GL_OES_EGL_image_external : require\n" + + "precision mediump float;\n" + + "varying vec2 vTexCoord;\n" + + "uniform samplerExternalOES uTexture;\n" + + "void main() { gl_FragColor = texture2D(uTexture, vTexCoord); }\n"; + + private static final String FRAGMENT_PASSTHROUGH = + "precision mediump float;\n" + + "varying vec2 vTexCoord;\n" + + "uniform sampler2D uTexture;\n" + + "void main() { gl_FragColor = texture2D(uTexture, vTexCoord); }\n"; + + private static final String FRAGMENT_BLUR = + "precision mediump float;\n" + + "varying vec2 vTexCoord;\n" + + "uniform sampler2D uTexture;\n" + + "uniform vec2 uDirection;\n" + + "uniform float uWeights[9];\n" + + "uniform float uOffsets[9];\n" + + "void main() {\n" + + " vec4 color = texture2D(uTexture, vTexCoord) * uWeights[0];\n" + + " for (int i = 1; i < 9; i++) {\n" + + " vec2 off = uDirection * uOffsets[i];\n" + + " color += texture2D(uTexture, vTexCoord + off) * uWeights[i];\n" + + " color += texture2D(uTexture, vTexCoord - off) * uWeights[i];\n" + + " }\n" + + " gl_FragColor = color;\n" + + "}\n"; + + // Temporal smoothing shader - blends current mask with previous frame + private static final String FRAGMENT_TEMPORAL_BLEND = + "precision mediump float;\n" + + "varying vec2 vTexCoord;\n" + + "uniform sampler2D uCurrentMask;\n" + + "uniform sampler2D uPreviousMask;\n" + + "uniform float uBlendFactor;\n" + + "void main() {\n" + + " float current = texture2D(uCurrentMask, vTexCoord).r;\n" + + " float previous = texture2D(uPreviousMask, vTexCoord).r;\n" + + " float blended = mix(previous, current, uBlendFactor);\n" + + " gl_FragColor = vec4(blended, blended, blended, 1.0);\n" + + "}\n"; + + private static final String FRAGMENT_COMPOSITE = + "precision mediump float;\n" + + "varying vec2 vTexCoord;\n" + + "uniform sampler2D uOriginal;\n" + + "uniform sampler2D uBlurred;\n" + + "uniform sampler2D uMask;\n" + + "uniform vec2 uMaskTexelSize;\n" + + "void main() {\n" + + " vec4 original = texture2D(uOriginal, vTexCoord);\n" + + " vec4 blurred = texture2D(uBlurred, vTexCoord);\n" + + " // Sample mask with 3x3 blur for edge smoothing\n" + + " float mask = 0.0;\n" + + " mask += texture2D(uMask, vTexCoord + vec2(-uMaskTexelSize.x, -uMaskTexelSize.y)).r * 0.0625;\n" + + " mask += texture2D(uMask, vTexCoord + vec2(0.0, -uMaskTexelSize.y)).r * 0.125;\n" + + " mask += texture2D(uMask, vTexCoord + vec2(uMaskTexelSize.x, -uMaskTexelSize.y)).r * 0.0625;\n" + + " mask += texture2D(uMask, vTexCoord + vec2(-uMaskTexelSize.x, 0.0)).r * 0.125;\n" + + " mask += texture2D(uMask, vTexCoord).r * 0.25;\n" + + " mask += texture2D(uMask, vTexCoord + vec2(uMaskTexelSize.x, 0.0)).r * 0.125;\n" + + " mask += texture2D(uMask, vTexCoord + vec2(-uMaskTexelSize.x, uMaskTexelSize.y)).r * 0.0625;\n" + + " mask += texture2D(uMask, vTexCoord + vec2(0.0, uMaskTexelSize.y)).r * 0.125;\n" + + " mask += texture2D(uMask, vTexCoord + vec2(uMaskTexelSize.x, uMaskTexelSize.y)).r * 0.0625;\n" + + " // Normalize from 0-255 byte range to 0-1\n" + + " mask = mask;\n" + + " // Apply smoothstep for better edge transition (0.2-0.8 range)\n" + + " mask = clamp((mask - 0.2) / 0.6, 0.0, 1.0);\n" + + " mask = mask * mask * (3.0 - 2.0 * mask);\n" + + " gl_FragColor = vec4(mix(blurred.rgb, original.rgb, mask), 1.0);\n" + + "}\n"; + + private static final int SEGMENTATION_WIDTH = 256; + private static final int SEGMENTATION_HEIGHT = 256; + private static final int BLUR_DOWNSCALE = 1; // Full resolution for better quality + private static final float TEMPORAL_BLEND_FACTOR = 0.6f; // 0.6 = 60% new, 40% previous + + private final FullscreenQuad quad = new FullscreenQuad(); + + private GlProgram oesProgram, rgbProgram, passthroughProgram, blurProgram, compositeProgram; + private GlProgram temporalBlendProgram; + private GlFramebuffer rgbaFbo, segmentationFbo, blurFboA, blurFboB, outputFbo; + // Temporal smoothing: ping-pong between two mask FBOs + private GlFramebuffer maskFboA, maskFboB; + private int rawMaskTexture; // Incoming mask before temporal smoothing + private boolean useMaskFboA = true; // Track which FBO has the "previous" mask + private int currentWidth, currentHeight; + private int currentMaskWidth, currentMaskHeight; + private boolean initialized; + + private final float[] blurWeights = new float[9]; + private final float[] blurOffsets = new float[9]; + private ByteBuffer segPixelBuffer; + + public GlBlurRenderer() { + computeGaussianKernel(16.0f); // Increased sigma for stronger blur + } + + public void ensureSetup(int width, int height) { + if (initialized && width == currentWidth && height == currentHeight) return; + if (initialized) releaseGlResources(); + + currentWidth = width; + currentHeight = height; + + oesProgram = new GlProgram(VERTEX_SHADER, FRAGMENT_OES_TO_RGBA); + rgbProgram = new GlProgram(VERTEX_SHADER, FRAGMENT_PASSTHROUGH); + passthroughProgram = new GlProgram(VERTEX_SHADER_SIMPLE, FRAGMENT_PASSTHROUGH); + blurProgram = new GlProgram(VERTEX_SHADER_SIMPLE, FRAGMENT_BLUR); + compositeProgram = new GlProgram(VERTEX_SHADER_SIMPLE, FRAGMENT_COMPOSITE); + temporalBlendProgram = new GlProgram(VERTEX_SHADER_SIMPLE, FRAGMENT_TEMPORAL_BLEND); + + rgbaFbo = new GlFramebuffer(width, height); + segmentationFbo = new GlFramebuffer(SEGMENTATION_WIDTH, SEGMENTATION_HEIGHT); + + int blurW = Math.max(1, width / BLUR_DOWNSCALE); + int blurH = Math.max(1, height / BLUR_DOWNSCALE); + blurFboA = new GlFramebuffer(blurW, blurH); + blurFboB = new GlFramebuffer(blurW, blurH); + outputFbo = new GlFramebuffer(width, height); + + // Temporal smoothing mask FBOs (will be sized on first mask upload) + rawMaskTexture = GlFramebuffer.createTexture2D(); + maskFboA = null; + maskFboB = null; + currentMaskWidth = 0; + currentMaskHeight = 0; + useMaskFboA = true; + initialized = true; + } + + public void renderToRgbaFbo(int textureId, float[] transformMatrix, boolean isOes) { + rgbaFbo.bind(); + GlProgram prog = isOes ? oesProgram : rgbProgram; + prog.use(); + prog.setUniformMatrix4("uTexMatrix", transformMatrix); + int target = isOes ? GLES11Ext.GL_TEXTURE_EXTERNAL_OES : GLES20.GL_TEXTURE_2D; + prog.bindTexture("uTexture", 0, textureId, target); + quad.draw(prog); + GlFramebuffer.unbind(); + } + + public void renderDownscaled() { + drawTexture(passthroughProgram, rgbaFbo.getTextureId(), segmentationFbo); + } + + public ByteBuffer readSegmentationPixels() { + int cap = SEGMENTATION_WIDTH * SEGMENTATION_HEIGHT * 4; + if (segPixelBuffer == null || segPixelBuffer.capacity() < cap) { + segPixelBuffer = ByteBuffer.allocateDirect(cap).order(ByteOrder.nativeOrder()); + } + segPixelBuffer.clear(); + segmentationFbo.readPixels(segPixelBuffer); + segPixelBuffer.rewind(); + return segPixelBuffer; + } + + public void uploadMask(ByteBuffer maskData, int maskWidth, int maskHeight) { + // Create/resize mask FBOs if needed + if (maskFboA == null || currentMaskWidth != maskWidth || currentMaskHeight != maskHeight) { + if (maskFboA != null) maskFboA.release(); + if (maskFboB != null) maskFboB.release(); + maskFboA = new GlFramebuffer(maskWidth, maskHeight); + maskFboB = new GlFramebuffer(maskWidth, maskHeight); + currentMaskWidth = maskWidth; + currentMaskHeight = maskHeight; + useMaskFboA = true; + + // Initialize both FBOs with the first mask (no blending on first frame) + GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, rawMaskTexture); + GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_LUMINANCE, + maskWidth, maskHeight, 0, GLES20.GL_LUMINANCE, GLES20.GL_UNSIGNED_BYTE, maskData); + GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, 0); + drawTexture(passthroughProgram, rawMaskTexture, maskFboA); + drawTexture(passthroughProgram, rawMaskTexture, maskFboB); + return; + } + + // Upload raw mask to texture + GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, rawMaskTexture); + GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_LUMINANCE, + maskWidth, maskHeight, 0, GLES20.GL_LUMINANCE, GLES20.GL_UNSIGNED_BYTE, maskData); + GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, 0); + + // Temporal blend: mix current mask with previous frame's mask + GlFramebuffer previousFbo = useMaskFboA ? maskFboA : maskFboB; + GlFramebuffer outputMaskFbo = useMaskFboA ? maskFboB : maskFboA; + + outputMaskFbo.bind(); + temporalBlendProgram.use(); + temporalBlendProgram.bindTexture("uCurrentMask", 0, rawMaskTexture, GLES20.GL_TEXTURE_2D); + temporalBlendProgram.bindTexture("uPreviousMask", 1, previousFbo.getTextureId(), GLES20.GL_TEXTURE_2D); + temporalBlendProgram.setUniform1f("uBlendFactor", TEMPORAL_BLEND_FACTOR); + quad.draw(temporalBlendProgram); + GlFramebuffer.unbind(); + + // Flip for next frame + useMaskFboA = !useMaskFboA; + } + + public void renderBlur() { + int blurW = blurFboA.getWidth(); + int blurH = blurFboA.getHeight(); + drawTexture(passthroughProgram, rgbaFbo.getTextureId(), blurFboA); + // First pass: horizontal + vertical + renderBlurPass(blurFboA.getTextureId(), blurFboB, 1.0f / blurW, 0.0f); + renderBlurPass(blurFboB.getTextureId(), blurFboA, 0.0f, 1.0f / blurH); + // Second pass: horizontal + vertical for stronger blur + renderBlurPass(blurFboA.getTextureId(), blurFboB, 1.0f / blurW, 0.0f); + renderBlurPass(blurFboB.getTextureId(), blurFboA, 0.0f, 1.0f / blurH); + } + + public void renderComposite() { + // Skip if mask hasn't been uploaded yet + if (maskFboA == null || maskFboB == null) return; + + outputFbo.bind(); + compositeProgram.use(); + compositeProgram.bindTexture("uOriginal", 0, rgbaFbo.getTextureId(), GLES20.GL_TEXTURE_2D); + compositeProgram.bindTexture("uBlurred", 1, blurFboA.getTextureId(), GLES20.GL_TEXTURE_2D); + // Use the temporally smoothed mask (the one we just wrote to, which is now "previous") + int smoothedMaskTexture = useMaskFboA ? maskFboB.getTextureId() : maskFboA.getTextureId(); + compositeProgram.bindTexture("uMask", 2, smoothedMaskTexture, GLES20.GL_TEXTURE_2D); + // Set mask texel size for edge blur sampling (based on output resolution) + compositeProgram.setUniform2f("uMaskTexelSize", 1.0f / currentWidth, 1.0f / currentHeight); + quad.draw(compositeProgram); + GlFramebuffer.unbind(); + } + + public int getOutputTextureId() { return outputFbo.getTextureId(); } + public int getSegmentationWidth() { return SEGMENTATION_WIDTH; } + public int getSegmentationHeight() { return SEGMENTATION_HEIGHT; } + + public void setBlurRadius(float sigma) { + computeGaussianKernel(sigma); + } + + public void release() { + if (!initialized) return; + releaseGlResources(); + segPixelBuffer = null; + initialized = false; + } + + private void renderBlurPass(int inputTex, GlFramebuffer outFbo, float dirX, float dirY) { + outFbo.bind(); + blurProgram.use(); + blurProgram.bindTexture("uTexture", 0, inputTex, GLES20.GL_TEXTURE_2D); + blurProgram.setUniform2f("uDirection", dirX, dirY); + blurProgram.setUniform1fv("uWeights", blurWeights); + blurProgram.setUniform1fv("uOffsets", blurOffsets); + quad.draw(blurProgram); + GlFramebuffer.unbind(); + } + + private void drawTexture(GlProgram prog, int textureId, GlFramebuffer outFbo) { + outFbo.bind(); + prog.use(); + prog.bindTexture("uTexture", 0, textureId, GLES20.GL_TEXTURE_2D); + quad.draw(prog); + GlFramebuffer.unbind(); + } + + private void computeGaussianKernel(float sigma) { + float sum = 0; + for (int i = 0; i < 9; i++) { + blurOffsets[i] = i; + blurWeights[i] = (float) (Math.exp(-(i * i) / (2.0 * sigma * sigma)) + / (Math.sqrt(2.0 * Math.PI) * sigma)); + sum += (i == 0) ? blurWeights[i] : 2.0f * blurWeights[i]; + } + for (int i = 0; i < 9; i++) blurWeights[i] /= sum; + } + + private void releaseGlResources() { + if (rgbaFbo != null) rgbaFbo.release(); + if (segmentationFbo != null) segmentationFbo.release(); + if (blurFboA != null) blurFboA.release(); + if (blurFboB != null) blurFboB.release(); + if (outputFbo != null) outputFbo.release(); + if (maskFboA != null) maskFboA.release(); + if (maskFboB != null) maskFboB.release(); + GLES20.glDeleteTextures(1, new int[]{rawMaskTexture}, 0); + if (oesProgram != null) oesProgram.release(); + if (rgbProgram != null) rgbProgram.release(); + if (passthroughProgram != null) passthroughProgram.release(); + if (blurProgram != null) blurProgram.release(); + if (compositeProgram != null) compositeProgram.release(); + if (temporalBlendProgram != null) temporalBlendProgram.release(); + } +} diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlFramebuffer.java b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlFramebuffer.java new file mode 100644 index 0000000000..a763e5ec90 --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlFramebuffer.java @@ -0,0 +1,66 @@ +package com.executorch.webrtc.gl; + +import android.opengl.GLES20; +import java.nio.ByteBuffer; + +public class GlFramebuffer { + private final int width; + private final int height; + private final int framebufferId; + private final int textureId; + + public GlFramebuffer(int width, int height) { + this.width = width; + this.height = height; + + textureId = createTexture2D(); + GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, textureId); + GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_RGBA, + width, height, 0, GLES20.GL_RGBA, GLES20.GL_UNSIGNED_BYTE, null); + GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, 0); + + int[] fboId = new int[1]; + GLES20.glGenFramebuffers(1, fboId, 0); + framebufferId = fboId[0]; + GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, framebufferId); + GLES20.glFramebufferTexture2D(GLES20.GL_FRAMEBUFFER, GLES20.GL_COLOR_ATTACHMENT0, + GLES20.GL_TEXTURE_2D, textureId, 0); + GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, 0); + } + + public void bind() { + GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, framebufferId); + GLES20.glViewport(0, 0, width, height); + } + + public static void unbind() { + GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, 0); + } + + public void readPixels(ByteBuffer outputBuffer) { + bind(); + GLES20.glReadPixels(0, 0, width, height, GLES20.GL_RGBA, GLES20.GL_UNSIGNED_BYTE, outputBuffer); + unbind(); + } + + public int getTextureId() { return textureId; } + public int getWidth() { return width; } + public int getHeight() { return height; } + + public void release() { + GLES20.glDeleteTextures(1, new int[]{textureId}, 0); + GLES20.glDeleteFramebuffers(1, new int[]{framebufferId}, 0); + } + + public static int createTexture2D() { + int[] texId = new int[1]; + GLES20.glGenTextures(1, texId, 0); + GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, texId[0]); + GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_LINEAR); + GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_LINEAR); + GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_S, GLES20.GL_CLAMP_TO_EDGE); + GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_T, GLES20.GL_CLAMP_TO_EDGE); + GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, 0); + return texId[0]; + } +} diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlProgram.java b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlProgram.java new file mode 100644 index 0000000000..79e38b7c91 --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlProgram.java @@ -0,0 +1,83 @@ +package com.executorch.webrtc.gl; + +import android.opengl.GLES20; +import java.util.HashMap; +import java.util.Map; + +public class GlProgram { + private final int programId; + private final Map attributeLocations = new HashMap<>(); + private final Map uniformLocations = new HashMap<>(); + + public GlProgram(String vertexSource, String fragmentSource) { + int vertexShader = loadShader(GLES20.GL_VERTEX_SHADER, vertexSource); + int fragmentShader = loadShader(GLES20.GL_FRAGMENT_SHADER, fragmentSource); + programId = GLES20.glCreateProgram(); + GLES20.glAttachShader(programId, vertexShader); + GLES20.glAttachShader(programId, fragmentShader); + GLES20.glLinkProgram(programId); + GLES20.glDeleteShader(vertexShader); + GLES20.glDeleteShader(fragmentShader); + } + + public void use() { + GLES20.glUseProgram(programId); + } + + public int getAttributeLocation(String name) { + Integer loc = attributeLocations.get(name); + if (loc == null) { + loc = GLES20.glGetAttribLocation(programId, name); + attributeLocations.put(name, loc); + } + return loc; + } + + public void setUniformMatrix4(String name, float[] matrix) { + GLES20.glUniformMatrix4fv(getUniformLocation(name), 1, false, matrix, 0); + } + + public void setUniform1i(String name, int value) { + GLES20.glUniform1i(getUniformLocation(name), value); + } + + public void setUniform2f(String name, float x, float y) { + GLES20.glUniform2f(getUniformLocation(name), x, y); + } + + public void setUniform1f(String name, float value) { + GLES20.glUniform1f(getUniformLocation(name), value); + } + + public void setUniform1fv(String name, float[] values) { + GLES20.glUniform1fv(getUniformLocation(name), values.length, values, 0); + } + + public void bindTexture(String uniformName, int textureUnit, int textureId, int textureTarget) { + GLES20.glActiveTexture(GLES20.GL_TEXTURE0 + textureUnit); + GLES20.glBindTexture(textureTarget, textureId); + setUniform1i(uniformName, textureUnit); + } + + public void release() { + GLES20.glDeleteProgram(programId); + attributeLocations.clear(); + uniformLocations.clear(); + } + + private int getUniformLocation(String name) { + Integer loc = uniformLocations.get(name); + if (loc == null) { + loc = GLES20.glGetUniformLocation(programId, name); + uniformLocations.put(name, loc); + } + return loc; + } + + private static int loadShader(int type, String source) { + int shader = GLES20.glCreateShader(type); + GLES20.glShaderSource(shader, source); + GLES20.glCompileShader(shader); + return shader; + } +} diff --git a/packages/react-native-executorch-webrtc/src/index.ts b/packages/react-native-executorch-webrtc/src/index.ts index 74cedab460..1d4ac30052 100644 --- a/packages/react-native-executorch-webrtc/src/index.ts +++ b/packages/react-native-executorch-webrtc/src/index.ts @@ -76,4 +76,7 @@ export { useWebRTCFrameProcessor, enableFrameProcessor, disableFrameProcessor, + PROCESSOR_NAMES, + type ProcessorName, + type WebRTCFrameProcessorOptions, } from './useWebRTCFrameProcessor'; diff --git a/packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts b/packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts index a32126322e..da11a4ef6b 100644 --- a/packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts +++ b/packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts @@ -2,7 +2,13 @@ import { useEffect } from 'react'; import { Platform, DeviceEventEmitter } from 'react-native'; import type { MediaStream, MediaStreamTrack } from 'react-native-webrtc'; -const PROCESSOR_NAME = 'executorchBackgroundBlur'; +export const PROCESSOR_NAMES = { + default: 'executorchBackgroundBlur', + experimental: 'executorchBackgroundBlurNew', +} as const; + +export type ProcessorName = + (typeof PROCESSOR_NAMES)[keyof typeof PROCESSOR_NAMES]; /** * Result from frame processing @@ -20,10 +26,8 @@ export interface FrameProcessingResult { export interface WebRTCFrameProcessorOptions { enabled?: boolean; onResults?: (results: FrameProcessingResult) => void; - // Future options: - // modelPath?: string; - // modelType?: 'object_detection' | 'segmentation' | 'classification'; - // threshold?: number; + /** Which processor to use. Defaults to 'executorchBackgroundBlur' */ + processorName?: ProcessorName | string; } /** @@ -46,7 +50,11 @@ export function useWebRTCFrameProcessor( stream: MediaStream | null | undefined, options: WebRTCFrameProcessorOptions = {} ): void { - const { enabled = true, onResults } = options; + const { + enabled = true, + onResults, + processorName = PROCESSOR_NAMES.default, + } = options; useEffect(() => { if (!stream || !enabled) { return; @@ -76,9 +84,9 @@ export function useWebRTCFrameProcessor( try { const track = videoTrack as any; if (typeof track._setVideoEffects === 'function') { - track._setVideoEffects([PROCESSOR_NAME]); + track._setVideoEffects([processorName]); console.log( - `✅ ExecuTorch frame processor enabled on track ${videoTrack.id}` + `✅ ExecuTorch frame processor "${processorName}" enabled on track ${videoTrack.id}` ); } else { console.warn('useWebRTCFrameProcessor: _setVideoEffects not available'); @@ -109,22 +117,28 @@ export function useWebRTCFrameProcessor( ); } }; - }, [stream, enabled, onResults]); + }, [stream, enabled, onResults, processorName]); } /** * Manually enable ExecuTorch frame processing on a video track. * * @param videoTrack - The video track to process + * @param processorName - Which processor to use (default: 'executorchBackgroundBlur') * * @example * ```tsx * const stream = await mediaDevices.getUserMedia({ video: true }); * const track = stream.getVideoTracks()[0]; * enableFrameProcessor(track); + * // or use experimental: + * enableFrameProcessor(track, PROCESSOR_NAMES.experimental); * ``` */ -export function enableFrameProcessor(videoTrack: MediaStreamTrack): void { +export function enableFrameProcessor( + videoTrack: MediaStreamTrack, + processorName: ProcessorName | string = PROCESSOR_NAMES.default +): void { if (Platform.OS !== 'android') { console.warn('enableFrameProcessor: Currently only supported on Android'); return; @@ -133,9 +147,9 @@ export function enableFrameProcessor(videoTrack: MediaStreamTrack): void { try { const track = videoTrack as any; if (typeof track._setVideoEffects === 'function') { - track._setVideoEffects([PROCESSOR_NAME]); + track._setVideoEffects([processorName]); console.log( - `✅ ExecuTorch frame processor enabled on track ${videoTrack.id}` + `✅ ExecuTorch frame processor "${processorName}" enabled on track ${videoTrack.id}` ); } } catch (error) { From 583abe2ef0555868a208f1190ff9c98bfafb1062 Mon Sep 17 00:00:00 2001 From: chmjkb Date: Tue, 14 Apr 2026 10:13:01 +0200 Subject: [PATCH 05/19] refactor --- .../webrtc/ExecutorchFrameProcessor.kt | 270 ++++++++++------ .../com/executorch/webrtc/ExecutorchWebRTC.kt | 29 +- .../webrtc/ExecutorchWebRTCModule.kt | 42 ++- .../executorch/webrtc/MaskPostProcessor.java | 220 ++++++++++++++ .../webrtc/NewExecutorchFrameProcessor.kt | 63 ---- .../NewExecutorchFrameProcessorFactory.kt | 8 - .../executorch/webrtc/gl/GlBlurRenderer.java | 287 ++++++++---------- 7 files changed, 563 insertions(+), 356 deletions(-) create mode 100644 packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java delete mode 100644 packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessor.kt delete mode 100644 packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessorFactory.kt diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt index c28f648670..92aad505e0 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt @@ -9,30 +9,49 @@ import org.webrtc.TextureBufferImpl import org.webrtc.VideoFrame import org.webrtc.YuvConverter import java.nio.ByteBuffer -import java.nio.ByteOrder /** * WebRTC frame processor that applies background blur using GPU shaders + ExecuTorch segmentation. - * Uses OpenGL for blur (fast) and JNI for segmentation. + * Uses MaskPostProcessor for temporal smoothing (EMA) and edge refinement. + * + * Architecture matches fishjam's BackgroundBlurProcessor but uses ExecuTorch for mask generation + * instead of ML Kit. */ class ExecutorchFrameProcessor : VideoFrameProcessor { - private var frameCount = 0 - private var lastLogTime = System.currentTimeMillis() - private val TAG = "ExecutorchFrameProcessor" + companion object { + private const val TAG = "ExecutorchFrameProcessor" + private const val LOG_INTERVAL_FRAMES = 30 - private var modelLoaded = false - private var loadedModelPath: String? = null + @Volatile + private var pendingBlurRadius: Float = -1f + + @JvmStatic + fun setBlurRadius(radius: Float) { + pendingBlurRadius = radius + } + } - // GL-based blur renderer - private var renderer: GlBlurRenderer? = null + private val renderer = GlBlurRenderer() + private val maskPostProcessor = MaskPostProcessor() + + @Volatile + private var isProcessing = false + private var lastProcessedFrame: VideoFrame? = null private var yuvConverter: YuvConverter? = null - // Reusable buffers - private var maskByteBuffer: ByteBuffer? = null + private var modelLoaded = false + private var loadedModelPath: String? = null private var rgbaBuffer: ByteArray? = null + // Timing measurements + private var frameCount = 0 + private var totalTimeAccumulator = 0L + private var inferenceTimeAccumulator = 0L + private var maskPostProcessTimeAccumulator = 0L + private var gpuTimeAccumulator = 0L + init { - Log.d(TAG, "ExecutorchFrameProcessor created - GL blur pipeline") + Log.d(TAG, "ExecutorchFrameProcessor created") tryLoadModel() } @@ -61,51 +80,61 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { rgbaData: ByteArray, width: Int, height: Int, - rotation: Int + rotation: Int, ): ByteArray? - override fun process(frame: VideoFrame, helper: SurfaceTextureHelper): VideoFrame { - frameCount++ - if (!modelLoaded) tryLoadModel() - - // Log stats every second - val now = System.currentTimeMillis() - if (now - lastLogTime >= 1000) { - Log.d(TAG, "FPS: ${frameCount}, buffer: ${frame.buffer.javaClass.simpleName}") - lastLogTime = now - frameCount = 0 + override fun process( + frame: VideoFrame, + helper: SurfaceTextureHelper, + ): VideoFrame { + // Return cached frame if busy + if (isProcessing) { + if (lastProcessedFrame != null) { + lastProcessedFrame!!.retain() + return lastProcessedFrame!! + } + frame.retain() + return frame } val buffer = frame.buffer if (buffer !is VideoFrame.TextureBuffer) { - // Not a texture buffer, return original frame.retain() return frame } + isProcessing = true return try { - processWithGl(frame, buffer, helper) + val result = processFrame(frame, buffer, helper) + // Cache processed frame + lastProcessedFrame?.release() + result.retain() + lastProcessedFrame = result + result } catch (e: Exception) { - Log.e(TAG, "GL processing failed: ${e.message}") + Log.e(TAG, "Error processing frame", e) frame.retain() frame + } finally { + isProcessing = false } } - private fun processWithGl( + private fun processFrame( frame: VideoFrame, textureBuffer: VideoFrame.TextureBuffer, - helper: SurfaceTextureHelper + helper: SurfaceTextureHelper, ): VideoFrame { + val totalStartTime = System.nanoTime() + + applyPendingBlurRadius() + + if (!modelLoaded) tryLoadModel() + val width = textureBuffer.width val height = textureBuffer.height - // Initialize renderer if needed - if (renderer == null) { - renderer = GlBlurRenderer() - } - renderer!!.ensureSetup(width, height) - + renderer.ensureSetup(width, height) if (yuvConverter == null) { yuvConverter = YuvConverter() } @@ -114,53 +143,113 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { val transformMatrix = convertToGlMatrix(textureBuffer.transformMatrix) val isOes = textureBuffer.type == VideoFrame.TextureBuffer.Type.OES + val gpuStartTime = System.nanoTime() + // 1. Render input texture to RGBA FBO - renderer!!.renderToRgbaFbo(textureBuffer.textureId, transformMatrix, isOes) + renderer.renderToRgbaFbo(textureBuffer.textureId, transformMatrix, isOes) // 2. Render downscaled for segmentation - renderer!!.renderDownscaled() + renderer.renderDownscaled() // 3. Read pixels for segmentation - val segPixels = renderer!!.readSegmentationPixels() - val segW = renderer!!.segmentationWidth - val segH = renderer!!.segmentationHeight + val segPixels = renderer.readSegmentationPixels() + val segW = renderer.segmentationWidth + val segH = renderer.segmentationHeight // 4. Run segmentation (via JNI) - val mask = runSegmentationOnPixels(segPixels, segW, segH, frame.rotation) - - // 5. Upload mask to GPU - if (mask != null) { - ensureMaskBuffer(mask.size) - maskByteBuffer!!.clear() - maskByteBuffer!!.put(mask) - maskByteBuffer!!.rewind() - renderer!!.uploadMask(maskByteBuffer!!, segW, segH) + val inferenceStartTime = System.nanoTime() + val rawMask = runSegmentationOnPixels(segPixels, segW, segH, frame.rotation) + val inferenceEndTime = System.nanoTime() + + if (rawMask != null) { + // 5. Post-process mask (morphology + EMA + Gaussian blur) + val maskPostProcessStartTime = System.nanoTime() + val processedMask = maskPostProcessor.process(rawMask, segW, segH) + val maskPostProcessEndTime = System.nanoTime() + maskPostProcessTimeAccumulator += (maskPostProcessEndTime - maskPostProcessStartTime) + + // 6. Upload processed mask to GPU + renderer.uploadMask(processedMask, segW, segH) } - // 6. Render blur (two-pass Gaussian) - renderer!!.renderBlur() - - // 7. Render composite (blend original + blurred using mask) - renderer!!.renderComposite() - - // 8. Create output TextureBuffer - val outputBuffer = TextureBufferImpl( - width, height, - VideoFrame.TextureBuffer.Type.RGB, - renderer!!.outputTextureId, - Matrix(), - helper.handler, - yuvConverter, - null - ) + // 7. Render blur + renderer.renderBlur() + + // 8. Render composite (blend original + blurred using mask) + renderer.renderComposite() + + val gpuEndTime = System.nanoTime() + + // 9. Create output TextureBuffer + val outputBuffer = + TextureBufferImpl( + width, + height, + VideoFrame.TextureBuffer.Type.RGB, + renderer.outputTextureId, + Matrix(), + helper.handler, + yuvConverter, + null, + ) + + val totalEndTime = System.nanoTime() + + // Accumulate timing measurements + totalTimeAccumulator += (totalEndTime - totalStartTime) + inferenceTimeAccumulator += (inferenceEndTime - inferenceStartTime) + gpuTimeAccumulator += (gpuEndTime - gpuStartTime) - (inferenceEndTime - inferenceStartTime) + frameCount++ + + // Log averages every LOG_INTERVAL_FRAMES frames + if (frameCount >= LOG_INTERVAL_FRAMES) { + val avgTotalMs = (totalTimeAccumulator / frameCount) / 1_000_000.0 + val avgInferenceMs = (inferenceTimeAccumulator / frameCount) / 1_000_000.0 + val avgMaskPostProcessMs = (maskPostProcessTimeAccumulator / frameCount) / 1_000_000.0 + val avgGpuMs = (gpuTimeAccumulator / frameCount) / 1_000_000.0 + val fps = 1000.0 / avgTotalMs + + Log.d( + TAG, + String.format( + "Avg over %d frames: Total=%.2fms (%.1f FPS) | Inference=%.2fms | MaskPostProcess=%.2fms | GPU=%.2fms", + frameCount, + avgTotalMs, + fps, + avgInferenceMs, + avgMaskPostProcessMs, + avgGpuMs, + ), + ) + + // Reset accumulators + frameCount = 0 + totalTimeAccumulator = 0L + inferenceTimeAccumulator = 0L + maskPostProcessTimeAccumulator = 0L + gpuTimeAccumulator = 0L + } return VideoFrame(outputBuffer, frame.rotation, frame.timestampNs) } - private fun runSegmentationOnPixels(pixels: ByteBuffer, width: Int, height: Int, rotation: Int): ByteArray? { + private fun applyPendingBlurRadius() { + val radius = pendingBlurRadius + if (radius < 0f) return + pendingBlurRadius = -1f + renderer.setBlurRadius(radius) + } + + private fun runSegmentationOnPixels( + pixels: ByteBuffer, + width: Int, + height: Int, + rotation: Int, + ): ByteArray? { if (!modelLoaded) { - // Return placeholder ellipse mask if model not loaded - return createEllipseMask(width, height) + val result = ByteArray(width * height) + result.fill(0) + return result } // Convert ByteBuffer to ByteArray for JNI @@ -174,7 +263,10 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { return runSegmentation(rgbaBuffer!!, width, height, rotation) } - private fun createEllipseMask(width: Int, height: Int): ByteArray { + private fun createEllipseMask( + width: Int, + height: Int, + ): ByteArray { val mask = ByteArray(width * height) val centerX = width / 2f val centerY = height / 2f @@ -186,39 +278,47 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { val dx = (x - centerX) / radiusX val dy = (y - centerY) / radiusY val dist = dx * dx + dy * dy - val value = when { - dist < 1.0f -> 255 - dist < 1.3f -> ((1.0f - (dist - 1.0f) / 0.3f) * 255).toInt() - else -> 0 - } + val value = + when { + dist < 1.0f -> 255 + dist < 1.3f -> ((1.0f - (dist - 1.0f) / 0.3f) * 255).toInt() + else -> 0 + } mask[y * width + x] = value.toByte() } } return mask } - private fun ensureMaskBuffer(size: Int) { - if (maskByteBuffer == null || maskByteBuffer!!.capacity() < size) { - maskByteBuffer = ByteBuffer.allocateDirect(size).order(ByteOrder.nativeOrder()) - } - } - private fun convertToGlMatrix(androidMatrix: android.graphics.Matrix): FloatArray { val values = FloatArray(9) androidMatrix.getValues(values) // Convert 3x3 to 4x4 matrix for GL return floatArrayOf( - values[0], values[3], 0f, values[6], - values[1], values[4], 0f, values[7], - 0f, 0f, 1f, 0f, - values[2], values[5], 0f, values[8] + values[0], + values[3], + 0f, + values[6], + values[1], + values[4], + 0f, + values[7], + 0f, + 0f, + 1f, + 0f, + values[2], + values[5], + 0f, + values[8], ) } fun release() { - renderer?.release() - renderer = null + renderer.release() yuvConverter?.release() yuvConverter = null + lastProcessedFrame?.release() + lastProcessedFrame = null } } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt index 61084187ac..e6bc715338 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt @@ -5,34 +5,24 @@ import com.oney.WebRTCModule.videoEffects.ProcessorProvider /** * Main entry point for ExecuTorch WebRTC integration. - * Call registerProcessors() from your Application.onCreate() + * Registers the background blur processor with react-native-webrtc. */ object ExecutorchWebRTC { private const val TAG = "ExecutorchWebRTC" const val PROCESSOR_NAME = "executorchBackgroundBlur" - const val PROCESSOR_NAME_NEW = "executorchBackgroundBlurNew" // Configuration for background removal var modelPath: String? = null /** - * Registers both frame processors with react-native-webrtc. - * - "executorchBackgroundBlur" -> existing GL-based processor - * - "executorchBackgroundBlurNew" -> new experimental processor + * Registers the frame processor with react-native-webrtc. */ fun registerProcessors() { try { ProcessorProvider.addProcessor(PROCESSOR_NAME, ExecutorchFrameProcessorFactory()) - Log.d(TAG, "✅ Registered processor: $PROCESSOR_NAME") + Log.d(TAG, "Registered processor: $PROCESSOR_NAME") } catch (e: Exception) { - Log.e(TAG, "❌ Failed to register $PROCESSOR_NAME", e) - } - - try { - ProcessorProvider.addProcessor(PROCESSOR_NAME_NEW, NewExecutorchFrameProcessorFactory()) - Log.d(TAG, "✅ Registered processor: $PROCESSOR_NAME_NEW") - } catch (e: Exception) { - Log.e(TAG, "❌ Failed to register $PROCESSOR_NAME_NEW", e) + Log.e(TAG, "Failed to register $PROCESSOR_NAME", e) } } @@ -40,9 +30,16 @@ object ExecutorchWebRTC { * Configure the segmentation model for background removal */ fun configureModel(path: String) { - Log.d(TAG, "📥 configureModel called with path: $path") + Log.d(TAG, "configureModel called with path: $path") modelPath = path - Log.d(TAG, "✅ Model path configured - processors will load model on next frame") + Log.d(TAG, "Model path configured - processor will load model on next frame") + } + + /** + * Set the blur radius dynamically + */ + fun setBlurRadius(radius: Float) { + ExecutorchFrameProcessor.setBlurRadius(radius) } /** diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt index eead8f9be5..7b15f07354 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt @@ -3,9 +3,7 @@ package com.executorch.webrtc import com.facebook.react.bridge.ReactApplicationContext import com.facebook.react.bridge.ReactContextBaseJavaModule import com.facebook.react.bridge.ReactMethod -import com.facebook.react.bridge.WritableMap import com.facebook.react.module.annotations.ReactModule -import com.facebook.react.modules.core.DeviceEventManagerModule /** * Native module that auto-registers the frame processor when loaded. @@ -19,29 +17,13 @@ class ExecutorchWebRTCModule( init { System.loadLibrary("executorch") System.loadLibrary("react-native-executorch-webrtc") - } + const val NAME = "ExecutorchWebRTC" private var initialized = false - private var moduleContext: ReactApplicationContext? = null - - - /** - * Send event to JavaScript - */ - fun sendEvent( - eventName: String, - params: WritableMap?, - ) { - moduleContext - ?.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java) - ?.emit(eventName, params) - } } init { - moduleContext = reactContext - // Auto-register the processor when the module is loaded if (!initialized) { ExecutorchWebRTC.registerProcessors() @@ -69,22 +51,34 @@ class ExecutorchWebRTCModule( ExecutorchWebRTC.configureModel(modelPath) } - // Legacy alias + /** + * Configure the segmentation model and blur intensity + * @param modelPath Path to the .pte model file + * @param blurIntensity Blur sigma value (default 12.0) + */ @ReactMethod fun configureBackgroundBlur( modelPath: String, blurIntensity: Int, ) { ExecutorchWebRTC.configureModel(modelPath) + ExecutorchWebRTC.setBlurRadius(blurIntensity.toFloat()) + } + + /** + * Set the blur radius dynamically + * @param radius Blur sigma value + */ + @ReactMethod + fun setBlurRadius(radius: Double) { + ExecutorchWebRTC.setBlurRadius(radius.toFloat()) } /** * Get available processor names for use with videoTrack._setVideoEffects() */ - override fun getConstants(): MutableMap { - return mutableMapOf( + override fun getConstants(): MutableMap = + mutableMapOf( "PROCESSOR_NAME" to ExecutorchWebRTC.PROCESSOR_NAME, - "PROCESSOR_NAME_NEW" to ExecutorchWebRTC.PROCESSOR_NAME_NEW ) - } } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java new file mode 100644 index 0000000000..d31d1cb7c2 --- /dev/null +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java @@ -0,0 +1,220 @@ +package com.executorch.webrtc; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +/** + * Post-processes segmentation masks for better visual quality and temporal stability. + * Applies morphological cleaning, EMA temporal smoothing, and Gaussian blur. + * + * Adapted from fishjam's implementation but works with byte[] input (0-255) + * from ExecuTorch instead of float[] from ML Kit. + */ +public class MaskPostProcessor { + + private static final float BINARIZE_THRESHOLD = 0.5f; + private static final float EMA_ALPHA = 0.85f; + private static final float THRESHOLD = EMA_ALPHA + 0.05f; + private static final float GAUSSIAN_SIGMA = 2.0f; + private static final int GAUSSIAN_RADIUS = 3; + + private float[] smoothedMask; + private float[] tempA; + private float[] rawFloatMask; + private final float[] gaussianKernel; + private int maskWidth; + private int maskHeight; + private boolean hasHistory; + + private ByteBuffer outputBuffer; + private int outputBufferCapacity; + + public MaskPostProcessor() { + gaussianKernel = computeGaussianKernel(GAUSSIAN_SIGMA, GAUSSIAN_RADIUS); + } + + /** + * Process a byte mask (0-255) from ExecuTorch segmentation. + * Returns processed mask as ByteBuffer ready for GPU upload. + */ + public ByteBuffer process(byte[] rawMask, int w, int h) { + ensureBuffers(w, h); + int len = w * h; + + // Convert byte mask (0-255) to float mask (0-1) + for (int i = 0; i < len; i++) { + rawFloatMask[i] = (rawMask[i] & 0xFF) / 255.0f; + } + + // Apply morphological cleaning (erode + dilate) + morphologicalClean(rawFloatMask, tempA, w, h); + + // Apply EMA temporal smoothing and threshold + applyEmaAndThreshold(tempA, rawFloatMask, len); + + // Apply Gaussian blur for smooth edges + gaussianBlurHorizontal(rawFloatMask, tempA, w, h); + gaussianBlurVertical(tempA, rawFloatMask, w, h); + + // Convert back to bytes for GPU upload + convertMaskToBytes(rawFloatMask, len); + + return outputBuffer; + } + + public void reset() { + hasHistory = false; + } + + private void ensureBuffers(int w, int h) { + if (w != maskWidth || h != maskHeight) { + int len = w * h; + smoothedMask = new float[len]; + tempA = new float[len]; + rawFloatMask = new float[len]; + maskWidth = w; + maskHeight = h; + hasHistory = false; + } + + int requiredCapacity = w * h; + if (outputBuffer == null || outputBufferCapacity < requiredCapacity) { + outputBuffer = ByteBuffer.allocateDirect(requiredCapacity); + outputBuffer.order(ByteOrder.nativeOrder()); + outputBufferCapacity = requiredCapacity; + } + } + + private static void binarizeInPlace(float[] mask, int len) { + for (int i = 0; i < len; i++) { + mask[i] = mask[i] > BINARIZE_THRESHOLD ? 1.0f : 0.0f; + } + } + + private void morphologicalClean(float[] src, float[] dst, int w, int h) { + int len = w * h; + binarizeInPlace(src, len); + erode(src, dst, w, h); + dilate(dst, src, w, h); + System.arraycopy(src, 0, dst, 0, len); + } + + private void erode(float[] src, float[] dst, int w, int h) { + for (int y = 0; y < h; y++) { + for (int x = 0; x < w; x++) { + float min = 1.0f; + for (int dy = -1; dy <= 1; dy++) { + int ny = y + dy; + if (ny < 0 || ny >= h) { + min = 0.0f; + continue; + } + for (int dx = -1; dx <= 1; dx++) { + int nx = x + dx; + if (nx < 0 || nx >= w) { + min = 0.0f; + continue; + } + float v = src[ny * w + nx]; + if (v < min) { + min = v; + } + } + } + dst[y * w + x] = min; + } + } + } + + private void dilate(float[] src, float[] dst, int w, int h) { + for (int y = 0; y < h; y++) { + for (int x = 0; x < w; x++) { + float max = 0.0f; + for (int dy = -1; dy <= 1; dy++) { + int ny = y + dy; + if (ny < 0 || ny >= h) continue; + for (int dx = -1; dx <= 1; dx++) { + int nx = x + dx; + if (nx < 0 || nx >= w) continue; + float v = src[ny * w + nx]; + if (v > max) max = v; + } + } + dst[y * w + x] = max; + } + } + } + + private void applyEmaAndThreshold(float[] current, float[] dst, int len) { + float oneMinusAlpha = 1.0f - EMA_ALPHA; + if (!hasHistory) { + System.arraycopy(current, 0, smoothedMask, 0, len); + hasHistory = true; + } else { + for (int i = 0; i < len; i++) { + smoothedMask[i] = EMA_ALPHA * smoothedMask[i] + oneMinusAlpha * current[i]; + } + } + for (int i = 0; i < len; i++) { + dst[i] = smoothedMask[i] > THRESHOLD ? 1.0f : 0.0f; + } + } + + private void gaussianBlurHorizontal(float[] src, float[] dst, int w, int h) { + int r = GAUSSIAN_RADIUS; + for (int y = 0; y < h; y++) { + int rowOffset = y * w; + for (int x = 0; x < w; x++) { + float sum = 0; + for (int k = -r; k <= r; k++) { + int sx = Math.min(Math.max(x + k, 0), w - 1); + sum += src[rowOffset + sx] * gaussianKernel[k + r]; + } + dst[rowOffset + x] = sum; + } + } + } + + private void gaussianBlurVertical(float[] src, float[] dst, int w, int h) { + int r = GAUSSIAN_RADIUS; + for (int y = 0; y < h; y++) { + for (int x = 0; x < w; x++) { + float sum = 0; + for (int k = -r; k <= r; k++) { + int sy = Math.min(Math.max(y + k, 0), h - 1); + sum += src[sy * w + x] * gaussianKernel[k + r]; + } + dst[y * w + x] = sum; + } + } + } + + private static float[] computeGaussianKernel(float sigma, int radius) { + int size = 2 * radius + 1; + float[] kernel = new float[size]; + float sum = 0; + for (int i = 0; i < size; i++) { + int d = i - radius; + kernel[i] = (float) Math.exp(-(d * d) / (2.0 * sigma * sigma)); + sum += kernel[i]; + } + for (int i = 0; i < size; i++) { + kernel[i] /= sum; + } + return kernel; + } + + private void convertMaskToBytes(float[] source, int length) { + outputBuffer.clear(); + for (int i = 0; i < length; i++) { + float value = source[i]; + if (value < 0f) { + value = 0f; + } else if (value > 1f) { + value = 1f; + } + outputBuffer.put((byte) (value * 255f)); + } + outputBuffer.rewind(); + } +} diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessor.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessor.kt deleted file mode 100644 index e841f5cbf6..0000000000 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessor.kt +++ /dev/null @@ -1,63 +0,0 @@ -package com.executorch.webrtc - -import android.util.Log -import com.oney.WebRTCModule.videoEffects.VideoFrameProcessor -import org.webrtc.SurfaceTextureHelper -import org.webrtc.VideoFrame -import org.webrtc.YuvConverter - -class NewExecutorchFrameProcessor : VideoFrameProcessor{ - private val TAG = "ExecuTorchBlurFrameProcessor" - private val lastFrame: VideoFrame? = null - private val yuvConverter = YuvConverter() - private var isModelLoaded = false - private lateinit var blurRadius: Number; - - // JNI: Load the segmentation model -//private external fun loadModel(modelPath: String): Boolean - - fun setBlurRadius(blurRadius: Number) {this.blurRadius = blurRadius} - - fun tryLoadModel(modelPath: String) { - // TODO: maybe we should throw here - if (isModelLoaded) return - try { - Log.d(TAG, "Loading selfie segmentation model with model path: $modelPath") - // val success = loadModel(modelPath) - val success = true; - if (success) { - this.isModelLoaded = true - Log.d(TAG, "Successfully loaded selfie segmentation model") - } else { - this.isModelLoaded = false - Log.e(TAG, "Failed to load selfie segmentation model with model path: $modelPath") - } - } catch (e: Exception) { - Log.e(TAG, "Failed to load model", e) - } - } - - private fun internalProcessFrame( - frame: VideoFrame.Buffer - ){ - - } - - override fun process( - frame: VideoFrame, - textureHelper: SurfaceTextureHelper - ): VideoFrame { - if (!this.isModelLoaded) { - Log.d(TAG, "The model was not initialized properly," + - " make sure to run tryLoadModel() before using the frame processor.") - frame.retain() - return frame; - } - - val buf = frame.buffer - val outputFrame = this.internalProcessFrame(buf) - - frame.retain() - return frame - } -} diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessorFactory.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessorFactory.kt deleted file mode 100644 index 0a708b5601..0000000000 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/NewExecutorchFrameProcessorFactory.kt +++ /dev/null @@ -1,8 +0,0 @@ -package com.executorch.webrtc - -import com.oney.WebRTCModule.videoEffects.VideoFrameProcessor -import com.oney.WebRTCModule.videoEffects.VideoFrameProcessorFactoryInterface - -class NewExecutorchFrameProcessorFactory : VideoFrameProcessorFactoryInterface { - override fun build(): VideoFrameProcessor = NewExecutorchFrameProcessor() -} diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java index f186310a62..8592039cd5 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java @@ -2,9 +2,17 @@ import android.opengl.GLES11Ext; import android.opengl.GLES20; + import java.nio.ByteBuffer; import java.nio.ByteOrder; +/** + * OpenGL-based blur renderer for WebRTC video frames. + * Handles texture conversion, downscaling for segmentation, Gaussian blur, and compositing. + * + * Simplified version matching fishjam's implementation - temporal smoothing is now + * handled by MaskPostProcessor on CPU instead of GPU shaders. + */ public class GlBlurRenderer { private static final String VERTEX_SHADER = @@ -31,13 +39,17 @@ public class GlBlurRenderer { "precision mediump float;\n" + "varying vec2 vTexCoord;\n" + "uniform samplerExternalOES uTexture;\n" + - "void main() { gl_FragColor = texture2D(uTexture, vTexCoord); }\n"; + "void main() {\n" + + " gl_FragColor = texture2D(uTexture, vTexCoord);\n" + + "}\n"; private static final String FRAGMENT_PASSTHROUGH = "precision mediump float;\n" + "varying vec2 vTexCoord;\n" + "uniform sampler2D uTexture;\n" + - "void main() { gl_FragColor = texture2D(uTexture, vTexCoord); }\n"; + "void main() {\n" + + " gl_FragColor = texture2D(uTexture, vTexCoord);\n" + + "}\n"; private static final String FRAGMENT_BLUR = "precision mediump float;\n" + @@ -56,78 +68,60 @@ public class GlBlurRenderer { " gl_FragColor = color;\n" + "}\n"; - // Temporal smoothing shader - blends current mask with previous frame - private static final String FRAGMENT_TEMPORAL_BLEND = - "precision mediump float;\n" + - "varying vec2 vTexCoord;\n" + - "uniform sampler2D uCurrentMask;\n" + - "uniform sampler2D uPreviousMask;\n" + - "uniform float uBlendFactor;\n" + - "void main() {\n" + - " float current = texture2D(uCurrentMask, vTexCoord).r;\n" + - " float previous = texture2D(uPreviousMask, vTexCoord).r;\n" + - " float blended = mix(previous, current, uBlendFactor);\n" + - " gl_FragColor = vec4(blended, blended, blended, 1.0);\n" + - "}\n"; - private static final String FRAGMENT_COMPOSITE = "precision mediump float;\n" + "varying vec2 vTexCoord;\n" + "uniform sampler2D uOriginal;\n" + "uniform sampler2D uBlurred;\n" + "uniform sampler2D uMask;\n" + - "uniform vec2 uMaskTexelSize;\n" + "void main() {\n" + " vec4 original = texture2D(uOriginal, vTexCoord);\n" + " vec4 blurred = texture2D(uBlurred, vTexCoord);\n" + - " // Sample mask with 3x3 blur for edge smoothing\n" + - " float mask = 0.0;\n" + - " mask += texture2D(uMask, vTexCoord + vec2(-uMaskTexelSize.x, -uMaskTexelSize.y)).r * 0.0625;\n" + - " mask += texture2D(uMask, vTexCoord + vec2(0.0, -uMaskTexelSize.y)).r * 0.125;\n" + - " mask += texture2D(uMask, vTexCoord + vec2(uMaskTexelSize.x, -uMaskTexelSize.y)).r * 0.0625;\n" + - " mask += texture2D(uMask, vTexCoord + vec2(-uMaskTexelSize.x, 0.0)).r * 0.125;\n" + - " mask += texture2D(uMask, vTexCoord).r * 0.25;\n" + - " mask += texture2D(uMask, vTexCoord + vec2(uMaskTexelSize.x, 0.0)).r * 0.125;\n" + - " mask += texture2D(uMask, vTexCoord + vec2(-uMaskTexelSize.x, uMaskTexelSize.y)).r * 0.0625;\n" + - " mask += texture2D(uMask, vTexCoord + vec2(0.0, uMaskTexelSize.y)).r * 0.125;\n" + - " mask += texture2D(uMask, vTexCoord + vec2(uMaskTexelSize.x, uMaskTexelSize.y)).r * 0.0625;\n" + - " // Normalize from 0-255 byte range to 0-1\n" + - " mask = mask;\n" + - " // Apply smoothstep for better edge transition (0.2-0.8 range)\n" + - " mask = clamp((mask - 0.2) / 0.6, 0.0, 1.0);\n" + - " mask = mask * mask * (3.0 - 2.0 * mask);\n" + + " float mask = texture2D(uMask, vTexCoord).r;\n" + " gl_FragColor = vec4(mix(blurred.rgb, original.rgb, mask), 1.0);\n" + "}\n"; private static final int SEGMENTATION_WIDTH = 256; - private static final int SEGMENTATION_HEIGHT = 256; - private static final int BLUR_DOWNSCALE = 1; // Full resolution for better quality - private static final float TEMPORAL_BLEND_FACTOR = 0.6f; // 0.6 = 60% new, 40% previous + private static final int SEGMENTATION_HEIGHT = 144; + private static final int BLUR_DOWNSCALE = 2; private final FullscreenQuad quad = new FullscreenQuad(); - private GlProgram oesProgram, rgbProgram, passthroughProgram, blurProgram, compositeProgram; - private GlProgram temporalBlendProgram; - private GlFramebuffer rgbaFbo, segmentationFbo, blurFboA, blurFboB, outputFbo; - // Temporal smoothing: ping-pong between two mask FBOs - private GlFramebuffer maskFboA, maskFboB; - private int rawMaskTexture; // Incoming mask before temporal smoothing - private boolean useMaskFboA = true; // Track which FBO has the "previous" mask - private int currentWidth, currentHeight; - private int currentMaskWidth, currentMaskHeight; + private GlProgram oesProgram; + private GlProgram rgbProgram; + private GlProgram passthroughProgram; + private GlProgram blurProgram; + private GlProgram compositeProgram; + + private GlFramebuffer rgbaFramebuffer; + private GlFramebuffer segmentationFramebuffer; + private GlFramebuffer blurFramebufferA; + private GlFramebuffer blurFramebufferB; + private GlFramebuffer outputFramebuffer; + + private int maskTexture; + + private int currentWidth; + private int currentHeight; private boolean initialized; private final float[] blurWeights = new float[9]; private final float[] blurOffsets = new float[9]; + private ByteBuffer segPixelBuffer; public GlBlurRenderer() { - computeGaussianKernel(16.0f); // Increased sigma for stronger blur + computeGaussianKernel(12.0f); } public void ensureSetup(int width, int height) { - if (initialized && width == currentWidth && height == currentHeight) return; - if (initialized) releaseGlResources(); + if (initialized && width == currentWidth && height == currentHeight) { + return; + } + + if (initialized) { + releaseGlResources(); + } currentWidth = width; currentHeight = height; @@ -137,144 +131,99 @@ public void ensureSetup(int width, int height) { passthroughProgram = new GlProgram(VERTEX_SHADER_SIMPLE, FRAGMENT_PASSTHROUGH); blurProgram = new GlProgram(VERTEX_SHADER_SIMPLE, FRAGMENT_BLUR); compositeProgram = new GlProgram(VERTEX_SHADER_SIMPLE, FRAGMENT_COMPOSITE); - temporalBlendProgram = new GlProgram(VERTEX_SHADER_SIMPLE, FRAGMENT_TEMPORAL_BLEND); - - rgbaFbo = new GlFramebuffer(width, height); - segmentationFbo = new GlFramebuffer(SEGMENTATION_WIDTH, SEGMENTATION_HEIGHT); - - int blurW = Math.max(1, width / BLUR_DOWNSCALE); - int blurH = Math.max(1, height / BLUR_DOWNSCALE); - blurFboA = new GlFramebuffer(blurW, blurH); - blurFboB = new GlFramebuffer(blurW, blurH); - outputFbo = new GlFramebuffer(width, height); - - // Temporal smoothing mask FBOs (will be sized on first mask upload) - rawMaskTexture = GlFramebuffer.createTexture2D(); - maskFboA = null; - maskFboB = null; - currentMaskWidth = 0; - currentMaskHeight = 0; - useMaskFboA = true; + + rgbaFramebuffer = new GlFramebuffer(width, height); + segmentationFramebuffer = new GlFramebuffer(SEGMENTATION_WIDTH, SEGMENTATION_HEIGHT); + + int blurWidth = Math.max(1, width / BLUR_DOWNSCALE); + int blurHeight = Math.max(1, height / BLUR_DOWNSCALE); + blurFramebufferA = new GlFramebuffer(blurWidth, blurHeight); + blurFramebufferB = new GlFramebuffer(blurWidth, blurHeight); + outputFramebuffer = new GlFramebuffer(width, height); + + maskTexture = GlFramebuffer.createTexture2D(); + initialized = true; } public void renderToRgbaFbo(int textureId, float[] transformMatrix, boolean isOes) { - rgbaFbo.bind(); - GlProgram prog = isOes ? oesProgram : rgbProgram; - prog.use(); - prog.setUniformMatrix4("uTexMatrix", transformMatrix); + rgbaFramebuffer.bind(); + GlProgram program = isOes ? oesProgram : rgbProgram; + program.use(); + program.setUniformMatrix4("uTexMatrix", transformMatrix); int target = isOes ? GLES11Ext.GL_TEXTURE_EXTERNAL_OES : GLES20.GL_TEXTURE_2D; - prog.bindTexture("uTexture", 0, textureId, target); - quad.draw(prog); + program.bindTexture("uTexture", 0, textureId, target); + quad.draw(program); GlFramebuffer.unbind(); } public void renderDownscaled() { - drawTexture(passthroughProgram, rgbaFbo.getTextureId(), segmentationFbo); + drawTexture(passthroughProgram, rgbaFramebuffer.getTextureId(), segmentationFramebuffer); } public ByteBuffer readSegmentationPixels() { - int cap = SEGMENTATION_WIDTH * SEGMENTATION_HEIGHT * 4; - if (segPixelBuffer == null || segPixelBuffer.capacity() < cap) { - segPixelBuffer = ByteBuffer.allocateDirect(cap).order(ByteOrder.nativeOrder()); + int requiredCapacity = SEGMENTATION_WIDTH * SEGMENTATION_HEIGHT * 4; + if (segPixelBuffer == null || segPixelBuffer.capacity() < requiredCapacity) { + segPixelBuffer = ByteBuffer.allocateDirect(requiredCapacity); + segPixelBuffer.order(ByteOrder.nativeOrder()); } segPixelBuffer.clear(); - segmentationFbo.readPixels(segPixelBuffer); + segmentationFramebuffer.readPixels(segPixelBuffer); segPixelBuffer.rewind(); return segPixelBuffer; } public void uploadMask(ByteBuffer maskData, int maskWidth, int maskHeight) { - // Create/resize mask FBOs if needed - if (maskFboA == null || currentMaskWidth != maskWidth || currentMaskHeight != maskHeight) { - if (maskFboA != null) maskFboA.release(); - if (maskFboB != null) maskFboB.release(); - maskFboA = new GlFramebuffer(maskWidth, maskHeight); - maskFboB = new GlFramebuffer(maskWidth, maskHeight); - currentMaskWidth = maskWidth; - currentMaskHeight = maskHeight; - useMaskFboA = true; - - // Initialize both FBOs with the first mask (no blending on first frame) - GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, rawMaskTexture); - GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_LUMINANCE, - maskWidth, maskHeight, 0, GLES20.GL_LUMINANCE, GLES20.GL_UNSIGNED_BYTE, maskData); - GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, 0); - drawTexture(passthroughProgram, rawMaskTexture, maskFboA); - drawTexture(passthroughProgram, rawMaskTexture, maskFboB); - return; - } - - // Upload raw mask to texture - GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, rawMaskTexture); + GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, maskTexture); GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_LUMINANCE, - maskWidth, maskHeight, 0, GLES20.GL_LUMINANCE, GLES20.GL_UNSIGNED_BYTE, maskData); + maskWidth, maskHeight, 0, + GLES20.GL_LUMINANCE, GLES20.GL_UNSIGNED_BYTE, maskData); GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, 0); - - // Temporal blend: mix current mask with previous frame's mask - GlFramebuffer previousFbo = useMaskFboA ? maskFboA : maskFboB; - GlFramebuffer outputMaskFbo = useMaskFboA ? maskFboB : maskFboA; - - outputMaskFbo.bind(); - temporalBlendProgram.use(); - temporalBlendProgram.bindTexture("uCurrentMask", 0, rawMaskTexture, GLES20.GL_TEXTURE_2D); - temporalBlendProgram.bindTexture("uPreviousMask", 1, previousFbo.getTextureId(), GLES20.GL_TEXTURE_2D); - temporalBlendProgram.setUniform1f("uBlendFactor", TEMPORAL_BLEND_FACTOR); - quad.draw(temporalBlendProgram); - GlFramebuffer.unbind(); - - // Flip for next frame - useMaskFboA = !useMaskFboA; } public void renderBlur() { - int blurW = blurFboA.getWidth(); - int blurH = blurFboA.getHeight(); - drawTexture(passthroughProgram, rgbaFbo.getTextureId(), blurFboA); - // First pass: horizontal + vertical - renderBlurPass(blurFboA.getTextureId(), blurFboB, 1.0f / blurW, 0.0f); - renderBlurPass(blurFboB.getTextureId(), blurFboA, 0.0f, 1.0f / blurH); - // Second pass: horizontal + vertical for stronger blur - renderBlurPass(blurFboA.getTextureId(), blurFboB, 1.0f / blurW, 0.0f); - renderBlurPass(blurFboB.getTextureId(), blurFboA, 0.0f, 1.0f / blurH); + int blurWidth = blurFramebufferA.getWidth(); + int blurHeight = blurFramebufferA.getHeight(); + drawTexture(passthroughProgram, rgbaFramebuffer.getTextureId(), blurFramebufferA); + renderBlurPass(blurFramebufferA.getTextureId(), blurFramebufferB, 1.0f / blurWidth, 0.0f); + renderBlurPass(blurFramebufferB.getTextureId(), blurFramebufferA, 0.0f, 1.0f / blurHeight); } public void renderComposite() { - // Skip if mask hasn't been uploaded yet - if (maskFboA == null || maskFboB == null) return; - - outputFbo.bind(); + outputFramebuffer.bind(); compositeProgram.use(); - compositeProgram.bindTexture("uOriginal", 0, rgbaFbo.getTextureId(), GLES20.GL_TEXTURE_2D); - compositeProgram.bindTexture("uBlurred", 1, blurFboA.getTextureId(), GLES20.GL_TEXTURE_2D); - // Use the temporally smoothed mask (the one we just wrote to, which is now "previous") - int smoothedMaskTexture = useMaskFboA ? maskFboB.getTextureId() : maskFboA.getTextureId(); - compositeProgram.bindTexture("uMask", 2, smoothedMaskTexture, GLES20.GL_TEXTURE_2D); - // Set mask texel size for edge blur sampling (based on output resolution) - compositeProgram.setUniform2f("uMaskTexelSize", 1.0f / currentWidth, 1.0f / currentHeight); + compositeProgram.bindTexture("uOriginal", 0, rgbaFramebuffer.getTextureId(), GLES20.GL_TEXTURE_2D); + compositeProgram.bindTexture("uBlurred", 1, blurFramebufferA.getTextureId(), GLES20.GL_TEXTURE_2D); + compositeProgram.bindTexture("uMask", 2, maskTexture, GLES20.GL_TEXTURE_2D); quad.draw(compositeProgram); GlFramebuffer.unbind(); } - public int getOutputTextureId() { return outputFbo.getTextureId(); } - public int getSegmentationWidth() { return SEGMENTATION_WIDTH; } - public int getSegmentationHeight() { return SEGMENTATION_HEIGHT; } + public int getOutputTextureId() { + return outputFramebuffer.getTextureId(); + } - public void setBlurRadius(float sigma) { - computeGaussianKernel(sigma); + public int getSegmentationWidth() { + return SEGMENTATION_WIDTH; + } + + public int getSegmentationHeight() { + return SEGMENTATION_HEIGHT; } public void release() { - if (!initialized) return; + if (!initialized) { + return; + } releaseGlResources(); segPixelBuffer = null; initialized = false; } - private void renderBlurPass(int inputTex, GlFramebuffer outFbo, float dirX, float dirY) { - outFbo.bind(); + private void renderBlurPass(int inputTexture, GlFramebuffer outputFbo, float dirX, float dirY) { + outputFbo.bind(); blurProgram.use(); - blurProgram.bindTexture("uTexture", 0, inputTex, GLES20.GL_TEXTURE_2D); + blurProgram.bindTexture("uTexture", 0, inputTexture, GLES20.GL_TEXTURE_2D); blurProgram.setUniform2f("uDirection", dirX, dirY); blurProgram.setUniform1fv("uWeights", blurWeights); blurProgram.setUniform1fv("uOffsets", blurOffsets); @@ -282,39 +231,57 @@ private void renderBlurPass(int inputTex, GlFramebuffer outFbo, float dirX, floa GlFramebuffer.unbind(); } - private void drawTexture(GlProgram prog, int textureId, GlFramebuffer outFbo) { - outFbo.bind(); - prog.use(); - prog.bindTexture("uTexture", 0, textureId, GLES20.GL_TEXTURE_2D); - quad.draw(prog); + private void drawTexture(GlProgram program, int textureId, GlFramebuffer outputFbo) { + outputFbo.bind(); + program.use(); + program.bindTexture("uTexture", 0, textureId, GLES20.GL_TEXTURE_2D); + quad.draw(program); GlFramebuffer.unbind(); } + public void setBlurRadius(float sigma) { + computeGaussianKernel(sigma); + } + private void computeGaussianKernel(float sigma) { float sum = 0; for (int i = 0; i < 9; i++) { - blurOffsets[i] = i; + blurOffsets[i] = (float) i; blurWeights[i] = (float) (Math.exp(-(i * i) / (2.0 * sigma * sigma)) / (Math.sqrt(2.0 * Math.PI) * sigma)); sum += (i == 0) ? blurWeights[i] : 2.0f * blurWeights[i]; } - for (int i = 0; i < 9; i++) blurWeights[i] /= sum; + for (int i = 0; i < 9; i++) { + blurWeights[i] /= sum; + } } private void releaseGlResources() { - if (rgbaFbo != null) rgbaFbo.release(); - if (segmentationFbo != null) segmentationFbo.release(); - if (blurFboA != null) blurFboA.release(); - if (blurFboB != null) blurFboB.release(); - if (outputFbo != null) outputFbo.release(); - if (maskFboA != null) maskFboA.release(); - if (maskFboB != null) maskFboB.release(); - GLES20.glDeleteTextures(1, new int[]{rawMaskTexture}, 0); + if (rgbaFramebuffer != null) rgbaFramebuffer.release(); + if (segmentationFramebuffer != null) segmentationFramebuffer.release(); + if (blurFramebufferA != null) blurFramebufferA.release(); + if (blurFramebufferB != null) blurFramebufferB.release(); + if (outputFramebuffer != null) outputFramebuffer.release(); + + int[] textures = {maskTexture}; + GLES20.glDeleteTextures(1, textures, 0); + maskTexture = 0; + if (oesProgram != null) oesProgram.release(); if (rgbProgram != null) rgbProgram.release(); if (passthroughProgram != null) passthroughProgram.release(); if (blurProgram != null) blurProgram.release(); if (compositeProgram != null) compositeProgram.release(); - if (temporalBlendProgram != null) temporalBlendProgram.release(); + + rgbaFramebuffer = null; + segmentationFramebuffer = null; + blurFramebufferA = null; + blurFramebufferB = null; + outputFramebuffer = null; + oesProgram = null; + rgbProgram = null; + passthroughProgram = null; + blurProgram = null; + compositeProgram = null; } } From ab5bb29a4e1efa2958b2245ce1e20672c8e6a34a Mon Sep 17 00:00:00 2001 From: chmjkb Date: Tue, 14 Apr 2026 10:57:50 +0200 Subject: [PATCH 06/19] works a bit better tho --- .../webrtc/ExecutorchFrameProcessor.kt | 44 ------------------- .../executorch/webrtc/MaskPostProcessor.java | 36 ++++++++------- .../executorch/webrtc/gl/GlBlurRenderer.java | 9 +++- 3 files changed, 27 insertions(+), 62 deletions(-) diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt index 92aad505e0..2361ccd6ee 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt @@ -125,10 +125,7 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { textureBuffer: VideoFrame.TextureBuffer, helper: SurfaceTextureHelper, ): VideoFrame { - val totalStartTime = System.nanoTime() - applyPendingBlurRadius() - if (!modelLoaded) tryLoadModel() val width = textureBuffer.width @@ -143,8 +140,6 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { val transformMatrix = convertToGlMatrix(textureBuffer.transformMatrix) val isOes = textureBuffer.type == VideoFrame.TextureBuffer.Type.OES - val gpuStartTime = System.nanoTime() - // 1. Render input texture to RGBA FBO renderer.renderToRgbaFbo(textureBuffer.textureId, transformMatrix, isOes) @@ -157,9 +152,7 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { val segH = renderer.segmentationHeight // 4. Run segmentation (via JNI) - val inferenceStartTime = System.nanoTime() val rawMask = runSegmentationOnPixels(segPixels, segW, segH, frame.rotation) - val inferenceEndTime = System.nanoTime() if (rawMask != null) { // 5. Post-process mask (morphology + EMA + Gaussian blur) @@ -178,8 +171,6 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { // 8. Render composite (blend original + blurred using mask) renderer.renderComposite() - val gpuEndTime = System.nanoTime() - // 9. Create output TextureBuffer val outputBuffer = TextureBufferImpl( @@ -193,43 +184,8 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { null, ) - val totalEndTime = System.nanoTime() - - // Accumulate timing measurements - totalTimeAccumulator += (totalEndTime - totalStartTime) - inferenceTimeAccumulator += (inferenceEndTime - inferenceStartTime) - gpuTimeAccumulator += (gpuEndTime - gpuStartTime) - (inferenceEndTime - inferenceStartTime) frameCount++ - // Log averages every LOG_INTERVAL_FRAMES frames - if (frameCount >= LOG_INTERVAL_FRAMES) { - val avgTotalMs = (totalTimeAccumulator / frameCount) / 1_000_000.0 - val avgInferenceMs = (inferenceTimeAccumulator / frameCount) / 1_000_000.0 - val avgMaskPostProcessMs = (maskPostProcessTimeAccumulator / frameCount) / 1_000_000.0 - val avgGpuMs = (gpuTimeAccumulator / frameCount) / 1_000_000.0 - val fps = 1000.0 / avgTotalMs - - Log.d( - TAG, - String.format( - "Avg over %d frames: Total=%.2fms (%.1f FPS) | Inference=%.2fms | MaskPostProcess=%.2fms | GPU=%.2fms", - frameCount, - avgTotalMs, - fps, - avgInferenceMs, - avgMaskPostProcessMs, - avgGpuMs, - ), - ) - - // Reset accumulators - frameCount = 0 - totalTimeAccumulator = 0L - inferenceTimeAccumulator = 0L - maskPostProcessTimeAccumulator = 0L - gpuTimeAccumulator = 0L - } - return VideoFrame(outputBuffer, frame.rotation, frame.timestampNs) } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java index d31d1cb7c2..ccaf53bbb9 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java @@ -13,13 +13,15 @@ public class MaskPostProcessor { private static final float BINARIZE_THRESHOLD = 0.5f; - private static final float EMA_ALPHA = 0.85f; - private static final float THRESHOLD = EMA_ALPHA + 0.05f; - private static final float GAUSSIAN_SIGMA = 2.0f; - private static final int GAUSSIAN_RADIUS = 3; + private static final float EMA_ALPHA = 0.5f; // 50% history for responsive mask + + // Larger Gaussian blur for smoother edges (no visible sheath) + private static final float GAUSSIAN_SIGMA = 5.0f; + private static final int GAUSSIAN_RADIUS = 8; private float[] smoothedMask; private float[] tempA; + private float[] tempB; private float[] rawFloatMask; private final float[] gaussianKernel; private int maskWidth; @@ -46,18 +48,18 @@ public ByteBuffer process(byte[] rawMask, int w, int h) { rawFloatMask[i] = (rawMask[i] & 0xFF) / 255.0f; } - // Apply morphological cleaning (erode + dilate) + // Apply morphological cleaning (binarize + erode + dilate) to remove noise morphologicalClean(rawFloatMask, tempA, w, h); - // Apply EMA temporal smoothing and threshold - applyEmaAndThreshold(tempA, rawFloatMask, len); + // Apply EMA temporal smoothing (keeps soft values, no hard threshold) + applyEmaSmoothing(tempA, len); - // Apply Gaussian blur for smooth edges - gaussianBlurHorizontal(rawFloatMask, tempA, w, h); - gaussianBlurVertical(tempA, rawFloatMask, w, h); + // Apply larger Gaussian blur for smooth, natural edges + gaussianBlurHorizontal(smoothedMask, tempA, w, h); + gaussianBlurVertical(tempA, tempB, w, h); - // Convert back to bytes for GPU upload - convertMaskToBytes(rawFloatMask, len); + // Convert to bytes for GPU upload + convertMaskToBytes(tempB, len); return outputBuffer; } @@ -71,6 +73,7 @@ private void ensureBuffers(int w, int h) { int len = w * h; smoothedMask = new float[len]; tempA = new float[len]; + tempB = new float[len]; rawFloatMask = new float[len]; maskWidth = w; maskHeight = h; @@ -145,7 +148,10 @@ private void dilate(float[] src, float[] dst, int w, int h) { } } - private void applyEmaAndThreshold(float[] current, float[] dst, int len) { + /** + * Apply EMA smoothing without hard threshold - keeps soft gradient values + */ + private void applyEmaSmoothing(float[] current, int len) { float oneMinusAlpha = 1.0f - EMA_ALPHA; if (!hasHistory) { System.arraycopy(current, 0, smoothedMask, 0, len); @@ -155,9 +161,7 @@ private void applyEmaAndThreshold(float[] current, float[] dst, int len) { smoothedMask[i] = EMA_ALPHA * smoothedMask[i] + oneMinusAlpha * current[i]; } } - for (int i = 0; i < len; i++) { - dst[i] = smoothedMask[i] > THRESHOLD ? 1.0f : 0.0f; - } + // No hard threshold - keep soft values for natural gradient edges } private void gaussianBlurHorizontal(float[] src, float[] dst, int w, int h) { diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java index 8592039cd5..fedfe90c77 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java @@ -9,7 +9,6 @@ /** * OpenGL-based blur renderer for WebRTC video frames. * Handles texture conversion, downscaling for segmentation, Gaussian blur, and compositing. - * * Simplified version matching fishjam's implementation - temporal smoothing is now * handled by MaskPostProcessor on CPU instead of GPU shaders. */ @@ -83,7 +82,7 @@ public class GlBlurRenderer { private static final int SEGMENTATION_WIDTH = 256; private static final int SEGMENTATION_HEIGHT = 144; - private static final int BLUR_DOWNSCALE = 2; + private static final int BLUR_DOWNSCALE = 1; // Full resolution for better quality private final FullscreenQuad quad = new FullscreenQuad(); @@ -185,6 +184,12 @@ public void renderBlur() { int blurWidth = blurFramebufferA.getWidth(); int blurHeight = blurFramebufferA.getHeight(); drawTexture(passthroughProgram, rgbaFramebuffer.getTextureId(), blurFramebufferA); + + // First blur pass (horizontal + vertical) + renderBlurPass(blurFramebufferA.getTextureId(), blurFramebufferB, 1.0f / blurWidth, 0.0f); + renderBlurPass(blurFramebufferB.getTextureId(), blurFramebufferA, 0.0f, 1.0f / blurHeight); + + // Second blur pass for stronger effect renderBlurPass(blurFramebufferA.getTextureId(), blurFramebufferB, 1.0f / blurWidth, 0.0f); renderBlurPass(blurFramebufferB.getTextureId(), blurFramebufferA, 0.0f, 1.0f / blurHeight); } From e0369eda3437664e2c7be806075bd73afe4a05b6 Mon Sep 17 00:00:00 2001 From: chmjkb Date: Tue, 14 Apr 2026 11:43:17 +0200 Subject: [PATCH 07/19] blur improvements --- .../webrtc/ExecutorchFrameProcessor.kt | 47 +++++++++- .../executorch/webrtc/MaskPostProcessor.java | 69 ++------------ .../executorch/webrtc/gl/GlBlurRenderer.java | 92 +++++++++++++++++-- 3 files changed, 133 insertions(+), 75 deletions(-) diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt index 2361ccd6ee..1a1895f18b 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt @@ -125,6 +125,8 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { textureBuffer: VideoFrame.TextureBuffer, helper: SurfaceTextureHelper, ): VideoFrame { + val totalStartTime = System.nanoTime() + applyPendingBlurRadius() if (!modelLoaded) tryLoadModel() @@ -140,6 +142,8 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { val transformMatrix = convertToGlMatrix(textureBuffer.transformMatrix) val isOes = textureBuffer.type == VideoFrame.TextureBuffer.Type.OES + val gpuStartTime = System.nanoTime() + // 1. Render input texture to RGBA FBO renderer.renderToRgbaFbo(textureBuffer.textureId, transformMatrix, isOes) @@ -152,16 +156,18 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { val segH = renderer.segmentationHeight // 4. Run segmentation (via JNI) + val inferenceStartTime = System.nanoTime() val rawMask = runSegmentationOnPixels(segPixels, segW, segH, frame.rotation) + val inferenceEndTime = System.nanoTime() if (rawMask != null) { - // 5. Post-process mask (morphology + EMA + Gaussian blur) + // 5. Post-process mask (morphology + EMA) - blur now on GPU val maskPostProcessStartTime = System.nanoTime() val processedMask = maskPostProcessor.process(rawMask, segW, segH) val maskPostProcessEndTime = System.nanoTime() maskPostProcessTimeAccumulator += (maskPostProcessEndTime - maskPostProcessStartTime) - // 6. Upload processed mask to GPU + // 6. Upload processed mask to GPU (includes GPU blur) renderer.uploadMask(processedMask, segW, segH) } @@ -171,6 +177,8 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { // 8. Render composite (blend original + blurred using mask) renderer.renderComposite() + val gpuEndTime = System.nanoTime() + // 9. Create output TextureBuffer val outputBuffer = TextureBufferImpl( @@ -184,8 +192,43 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { null, ) + val totalEndTime = System.nanoTime() + + // Accumulate timing measurements + totalTimeAccumulator += (totalEndTime - totalStartTime) + inferenceTimeAccumulator += (inferenceEndTime - inferenceStartTime) + gpuTimeAccumulator += (gpuEndTime - gpuStartTime) - (inferenceEndTime - inferenceStartTime) frameCount++ + // Log averages every LOG_INTERVAL_FRAMES frames + if (frameCount >= LOG_INTERVAL_FRAMES) { + val avgTotalMs = (totalTimeAccumulator / frameCount) / 1_000_000.0 + val avgInferenceMs = (inferenceTimeAccumulator / frameCount) / 1_000_000.0 + val avgMaskPostProcessMs = (maskPostProcessTimeAccumulator / frameCount) / 1_000_000.0 + val avgGpuMs = (gpuTimeAccumulator / frameCount) / 1_000_000.0 + val fps = 1000.0 / avgTotalMs + + Log.d( + TAG, + String.format( + "Avg over %d frames: Total=%.2fms (%.1f FPS) | Inference=%.2fms | MaskCPU=%.2fms | GPU=%.2fms", + frameCount, + avgTotalMs, + fps, + avgInferenceMs, + avgMaskPostProcessMs, + avgGpuMs, + ), + ) + + // Reset accumulators + frameCount = 0 + totalTimeAccumulator = 0L + inferenceTimeAccumulator = 0L + maskPostProcessTimeAccumulator = 0L + gpuTimeAccumulator = 0L + } + return VideoFrame(outputBuffer, frame.rotation, frame.timestampNs) } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java index ccaf53bbb9..8aa41d6dd6 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java @@ -4,26 +4,18 @@ import java.nio.ByteOrder; /** - * Post-processes segmentation masks for better visual quality and temporal stability. - * Applies morphological cleaning, EMA temporal smoothing, and Gaussian blur. - * - * Adapted from fishjam's implementation but works with byte[] input (0-255) - * from ExecuTorch instead of float[] from ML Kit. + * Post-processes segmentation masks for temporal stability. + * Applies morphological cleaning and EMA temporal smoothing. + * Gaussian blur is handled on GPU for better performance. */ public class MaskPostProcessor { private static final float BINARIZE_THRESHOLD = 0.5f; private static final float EMA_ALPHA = 0.5f; // 50% history for responsive mask - // Larger Gaussian blur for smoother edges (no visible sheath) - private static final float GAUSSIAN_SIGMA = 5.0f; - private static final int GAUSSIAN_RADIUS = 8; - private float[] smoothedMask; private float[] tempA; - private float[] tempB; private float[] rawFloatMask; - private final float[] gaussianKernel; private int maskWidth; private int maskHeight; private boolean hasHistory; @@ -32,12 +24,12 @@ public class MaskPostProcessor { private int outputBufferCapacity; public MaskPostProcessor() { - gaussianKernel = computeGaussianKernel(GAUSSIAN_SIGMA, GAUSSIAN_RADIUS); } /** * Process a byte mask (0-255) from ExecuTorch segmentation. * Returns processed mask as ByteBuffer ready for GPU upload. + * Note: Gaussian blur is done on GPU after upload for better performance. */ public ByteBuffer process(byte[] rawMask, int w, int h) { ensureBuffers(w, h); @@ -54,12 +46,8 @@ public ByteBuffer process(byte[] rawMask, int w, int h) { // Apply EMA temporal smoothing (keeps soft values, no hard threshold) applyEmaSmoothing(tempA, len); - // Apply larger Gaussian blur for smooth, natural edges - gaussianBlurHorizontal(smoothedMask, tempA, w, h); - gaussianBlurVertical(tempA, tempB, w, h); - - // Convert to bytes for GPU upload - convertMaskToBytes(tempB, len); + // Convert to bytes for GPU upload (blur will be done on GPU) + convertMaskToBytes(smoothedMask, len); return outputBuffer; } @@ -73,7 +61,6 @@ private void ensureBuffers(int w, int h) { int len = w * h; smoothedMask = new float[len]; tempA = new float[len]; - tempB = new float[len]; rawFloatMask = new float[len]; maskWidth = w; maskHeight = h; @@ -164,50 +151,6 @@ private void applyEmaSmoothing(float[] current, int len) { // No hard threshold - keep soft values for natural gradient edges } - private void gaussianBlurHorizontal(float[] src, float[] dst, int w, int h) { - int r = GAUSSIAN_RADIUS; - for (int y = 0; y < h; y++) { - int rowOffset = y * w; - for (int x = 0; x < w; x++) { - float sum = 0; - for (int k = -r; k <= r; k++) { - int sx = Math.min(Math.max(x + k, 0), w - 1); - sum += src[rowOffset + sx] * gaussianKernel[k + r]; - } - dst[rowOffset + x] = sum; - } - } - } - - private void gaussianBlurVertical(float[] src, float[] dst, int w, int h) { - int r = GAUSSIAN_RADIUS; - for (int y = 0; y < h; y++) { - for (int x = 0; x < w; x++) { - float sum = 0; - for (int k = -r; k <= r; k++) { - int sy = Math.min(Math.max(y + k, 0), h - 1); - sum += src[sy * w + x] * gaussianKernel[k + r]; - } - dst[y * w + x] = sum; - } - } - } - - private static float[] computeGaussianKernel(float sigma, int radius) { - int size = 2 * radius + 1; - float[] kernel = new float[size]; - float sum = 0; - for (int i = 0; i < size; i++) { - int d = i - radius; - kernel[i] = (float) Math.exp(-(d * d) / (2.0 * sigma * sigma)); - sum += kernel[i]; - } - for (int i = 0; i < size; i++) { - kernel[i] /= sum; - } - return kernel; - } - private void convertMaskToBytes(float[] source, int length) { outputBuffer.clear(); for (int i = 0; i < length; i++) { diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java index fedfe90c77..1fb85f5695 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/gl/GlBlurRenderer.java @@ -9,8 +9,7 @@ /** * OpenGL-based blur renderer for WebRTC video frames. * Handles texture conversion, downscaling for segmentation, Gaussian blur, and compositing. - * Simplified version matching fishjam's implementation - temporal smoothing is now - * handled by MaskPostProcessor on CPU instead of GPU shaders. + * Both background blur and mask edge blur are done on GPU for performance. */ public class GlBlurRenderer { @@ -50,6 +49,16 @@ public class GlBlurRenderer { " gl_FragColor = texture2D(uTexture, vTexCoord);\n" + "}\n"; + // Converts luminance (grayscale) texture to RGBA for blur processing + private static final String FRAGMENT_LUMINANCE_TO_RGBA = + "precision mediump float;\n" + + "varying vec2 vTexCoord;\n" + + "uniform sampler2D uTexture;\n" + + "void main() {\n" + + " float lum = texture2D(uTexture, vTexCoord).r;\n" + + " gl_FragColor = vec4(lum, lum, lum, 1.0);\n" + + "}\n"; + private static final String FRAGMENT_BLUR = "precision mediump float;\n" + "varying vec2 vTexCoord;\n" + @@ -84,11 +93,15 @@ public class GlBlurRenderer { private static final int SEGMENTATION_HEIGHT = 144; private static final int BLUR_DOWNSCALE = 1; // Full resolution for better quality + // Mask blur settings (larger sigma for smooth edges) + private static final float MASK_BLUR_SIGMA = 5.0f; + private final FullscreenQuad quad = new FullscreenQuad(); private GlProgram oesProgram; private GlProgram rgbProgram; private GlProgram passthroughProgram; + private GlProgram luminanceToRgbaProgram; private GlProgram blurProgram; private GlProgram compositeProgram; @@ -98,19 +111,30 @@ public class GlBlurRenderer { private GlFramebuffer blurFramebufferB; private GlFramebuffer outputFramebuffer; + // Mask blur framebuffers (at mask resolution) + private GlFramebuffer maskBlurFboA; + private GlFramebuffer maskBlurFboB; private int maskTexture; + private int currentMaskWidth; + private int currentMaskHeight; private int currentWidth; private int currentHeight; private boolean initialized; + // Background blur kernel private final float[] blurWeights = new float[9]; private final float[] blurOffsets = new float[9]; + // Mask blur kernel (separate, larger sigma) + private final float[] maskBlurWeights = new float[9]; + private final float[] maskBlurOffsets = new float[9]; + private ByteBuffer segPixelBuffer; public GlBlurRenderer() { - computeGaussianKernel(12.0f); + computeGaussianKernel(12.0f, blurWeights, blurOffsets); + computeGaussianKernel(MASK_BLUR_SIGMA, maskBlurWeights, maskBlurOffsets); } public void ensureSetup(int width, int height) { @@ -128,6 +152,7 @@ public void ensureSetup(int width, int height) { oesProgram = new GlProgram(VERTEX_SHADER, FRAGMENT_OES_TO_RGBA); rgbProgram = new GlProgram(VERTEX_SHADER, FRAGMENT_PASSTHROUGH); passthroughProgram = new GlProgram(VERTEX_SHADER_SIMPLE, FRAGMENT_PASSTHROUGH); + luminanceToRgbaProgram = new GlProgram(VERTEX_SHADER_SIMPLE, FRAGMENT_LUMINANCE_TO_RGBA); blurProgram = new GlProgram(VERTEX_SHADER_SIMPLE, FRAGMENT_BLUR); compositeProgram = new GlProgram(VERTEX_SHADER_SIMPLE, FRAGMENT_COMPOSITE); @@ -141,6 +166,7 @@ public void ensureSetup(int width, int height) { outputFramebuffer = new GlFramebuffer(width, height); maskTexture = GlFramebuffer.createTexture2D(); + // Mask blur FBOs will be created on first mask upload initialized = true; } @@ -173,11 +199,37 @@ public ByteBuffer readSegmentationPixels() { } public void uploadMask(ByteBuffer maskData, int maskWidth, int maskHeight) { + // Upload raw mask to texture GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, maskTexture); GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_LUMINANCE, maskWidth, maskHeight, 0, GLES20.GL_LUMINANCE, GLES20.GL_UNSIGNED_BYTE, maskData); GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, 0); + + // Ensure mask blur FBOs exist at correct size + if (maskBlurFboA == null || currentMaskWidth != maskWidth || currentMaskHeight != maskHeight) { + if (maskBlurFboA != null) maskBlurFboA.release(); + if (maskBlurFboB != null) maskBlurFboB.release(); + maskBlurFboA = new GlFramebuffer(maskWidth, maskHeight); + maskBlurFboB = new GlFramebuffer(maskWidth, maskHeight); + currentMaskWidth = maskWidth; + currentMaskHeight = maskHeight; + } + + // Convert luminance to RGBA for blur processing + drawTexture(luminanceToRgbaProgram, maskTexture, maskBlurFboA); + + // Blur the mask on GPU (horizontal + vertical passes) + float texelX = 1.0f / maskWidth; + float texelY = 1.0f / maskHeight; + + // First blur pass + renderMaskBlurPass(maskBlurFboA.getTextureId(), maskBlurFboB, texelX, 0.0f); + renderMaskBlurPass(maskBlurFboB.getTextureId(), maskBlurFboA, 0.0f, texelY); + + // Second blur pass for stronger smoothing + renderMaskBlurPass(maskBlurFboA.getTextureId(), maskBlurFboB, texelX, 0.0f); + renderMaskBlurPass(maskBlurFboB.getTextureId(), maskBlurFboA, 0.0f, texelY); } public void renderBlur() { @@ -195,11 +247,14 @@ public void renderBlur() { } public void renderComposite() { + if (maskBlurFboA == null) return; + outputFramebuffer.bind(); compositeProgram.use(); compositeProgram.bindTexture("uOriginal", 0, rgbaFramebuffer.getTextureId(), GLES20.GL_TEXTURE_2D); compositeProgram.bindTexture("uBlurred", 1, blurFramebufferA.getTextureId(), GLES20.GL_TEXTURE_2D); - compositeProgram.bindTexture("uMask", 2, maskTexture, GLES20.GL_TEXTURE_2D); + // Use the GPU-blurred mask + compositeProgram.bindTexture("uMask", 2, maskBlurFboA.getTextureId(), GLES20.GL_TEXTURE_2D); quad.draw(compositeProgram); GlFramebuffer.unbind(); } @@ -236,6 +291,17 @@ private void renderBlurPass(int inputTexture, GlFramebuffer outputFbo, float dir GlFramebuffer.unbind(); } + private void renderMaskBlurPass(int inputTexture, GlFramebuffer outputFbo, float dirX, float dirY) { + outputFbo.bind(); + blurProgram.use(); + blurProgram.bindTexture("uTexture", 0, inputTexture, GLES20.GL_TEXTURE_2D); + blurProgram.setUniform2f("uDirection", dirX, dirY); + blurProgram.setUniform1fv("uWeights", maskBlurWeights); + blurProgram.setUniform1fv("uOffsets", maskBlurOffsets); + quad.draw(blurProgram); + GlFramebuffer.unbind(); + } + private void drawTexture(GlProgram program, int textureId, GlFramebuffer outputFbo) { outputFbo.bind(); program.use(); @@ -245,19 +311,19 @@ private void drawTexture(GlProgram program, int textureId, GlFramebuffer outputF } public void setBlurRadius(float sigma) { - computeGaussianKernel(sigma); + computeGaussianKernel(sigma, blurWeights, blurOffsets); } - private void computeGaussianKernel(float sigma) { + private void computeGaussianKernel(float sigma, float[] weights, float[] offsets) { float sum = 0; for (int i = 0; i < 9; i++) { - blurOffsets[i] = (float) i; - blurWeights[i] = (float) (Math.exp(-(i * i) / (2.0 * sigma * sigma)) + offsets[i] = (float) i; + weights[i] = (float) (Math.exp(-(i * i) / (2.0 * sigma * sigma)) / (Math.sqrt(2.0 * Math.PI) * sigma)); - sum += (i == 0) ? blurWeights[i] : 2.0f * blurWeights[i]; + sum += (i == 0) ? weights[i] : 2.0f * weights[i]; } for (int i = 0; i < 9; i++) { - blurWeights[i] /= sum; + weights[i] /= sum; } } @@ -267,6 +333,8 @@ private void releaseGlResources() { if (blurFramebufferA != null) blurFramebufferA.release(); if (blurFramebufferB != null) blurFramebufferB.release(); if (outputFramebuffer != null) outputFramebuffer.release(); + if (maskBlurFboA != null) maskBlurFboA.release(); + if (maskBlurFboB != null) maskBlurFboB.release(); int[] textures = {maskTexture}; GLES20.glDeleteTextures(1, textures, 0); @@ -275,6 +343,7 @@ private void releaseGlResources() { if (oesProgram != null) oesProgram.release(); if (rgbProgram != null) rgbProgram.release(); if (passthroughProgram != null) passthroughProgram.release(); + if (luminanceToRgbaProgram != null) luminanceToRgbaProgram.release(); if (blurProgram != null) blurProgram.release(); if (compositeProgram != null) compositeProgram.release(); @@ -283,9 +352,12 @@ private void releaseGlResources() { blurFramebufferA = null; blurFramebufferB = null; outputFramebuffer = null; + maskBlurFboA = null; + maskBlurFboB = null; oesProgram = null; rgbProgram = null; passthroughProgram = null; + luminanceToRgbaProgram = null; blurProgram = null; compositeProgram = null; } From f7d3e21e865d6a68fb842210cfc88288e63c78c9 Mon Sep 17 00:00:00 2001 From: chmjkb Date: Fri, 17 Apr 2026 11:30:03 +0200 Subject: [PATCH 08/19] some changes --- .../webrtc/ExecutorchFrameProcessor.kt | 29 +- .../webrtc/ExecutorchWebRTCModule.kt | 56 +- .../executorch/webrtc/MaskPostProcessor.java | 4 - .../ios/ExecutorchFrameProcessor.h | 4 +- .../ios/ExecutorchFrameProcessor.mm | 566 ++++++++++++------ .../ios/ExecutorchWebRTC.mm | 43 +- .../package.json | 7 +- .../src/BackgroundBlur.ts | 64 ++ .../src/NativeBackgroundBlur.ts | 33 + .../src/index.ts | 90 +-- .../src/useBackgroundBlur.ts | 123 ++++ .../src/useWebRTCFrameProcessor.ts | 179 ------ 12 files changed, 689 insertions(+), 509 deletions(-) create mode 100644 packages/react-native-executorch-webrtc/src/BackgroundBlur.ts create mode 100644 packages/react-native-executorch-webrtc/src/NativeBackgroundBlur.ts create mode 100644 packages/react-native-executorch-webrtc/src/useBackgroundBlur.ts delete mode 100644 packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt index 1a1895f18b..522fb73072 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt @@ -167,7 +167,7 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { val maskPostProcessEndTime = System.nanoTime() maskPostProcessTimeAccumulator += (maskPostProcessEndTime - maskPostProcessStartTime) - // 6. Upload processed mask to GPU (includes GPU blur) + // 6. Upload processed mask to GPU renderer.uploadMask(processedMask, segW, segH) } @@ -262,33 +262,6 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { return runSegmentation(rgbaBuffer!!, width, height, rotation) } - private fun createEllipseMask( - width: Int, - height: Int, - ): ByteArray { - val mask = ByteArray(width * height) - val centerX = width / 2f - val centerY = height / 2f - val radiusX = width * 0.4f - val radiusY = height * 0.45f - - for (y in 0 until height) { - for (x in 0 until width) { - val dx = (x - centerX) / radiusX - val dy = (y - centerY) / radiusY - val dist = dx * dx + dy * dy - val value = - when { - dist < 1.0f -> 255 - dist < 1.3f -> ((1.0f - (dist - 1.0f) / 0.3f) * 255).toInt() - else -> 0 - } - mask[y * width + x] = value.toByte() - } - } - return mask - } - private fun convertToGlMatrix(androidMatrix: android.graphics.Matrix): FloatArray { val values = FloatArray(9) androidMatrix.getValues(values) diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt index 7b15f07354..8833294fc8 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt @@ -6,8 +6,8 @@ import com.facebook.react.bridge.ReactMethod import com.facebook.react.module.annotations.ReactModule /** - * Native module that auto-registers the frame processor when loaded. - * This allows the package to work without manual native code setup. + * Native module for ExecuTorch WebRTC background blur. + * API compatible with @fishjam-cloud/react-native-webrtc-background-blur. */ @ReactModule(name = ExecutorchWebRTCModule.NAME) class ExecutorchWebRTCModule( @@ -20,54 +20,40 @@ class ExecutorchWebRTCModule( } const val NAME = "ExecutorchWebRTC" - private var initialized = false + private var processorsRegistered = false } init { // Auto-register the processor when the module is loaded - if (!initialized) { + if (!processorsRegistered) { ExecutorchWebRTC.registerProcessors() - initialized = true + processorsRegistered = true } } override fun getName(): String = NAME /** - * No-op method just to ensure the module is loaded. - * Called from JS to trigger initialization. - */ - @ReactMethod - fun setup() { - // Module init happens in constructor, this is just a trigger - } - - /** - * Configure the segmentation model for background removal + * Initialize background blur with the segmentation model. * @param modelPath Path to the .pte model file */ @ReactMethod - fun configureBackgroundRemoval(modelPath: String) { + fun initialize(modelPath: String) { ExecutorchWebRTC.configureModel(modelPath) } /** - * Configure the segmentation model and blur intensity - * @param modelPath Path to the .pte model file - * @param blurIntensity Blur sigma value (default 12.0) + * Deinitialize and release resources. */ @ReactMethod - fun configureBackgroundBlur( - modelPath: String, - blurIntensity: Int, - ) { - ExecutorchWebRTC.configureModel(modelPath) - ExecutorchWebRTC.setBlurRadius(blurIntensity.toFloat()) + fun deinitialize() { + // Currently no-op, resources are managed per-frame + // Could be extended to unload the model if needed } /** - * Set the blur radius dynamically - * @param radius Blur sigma value + * Set the blur radius/intensity. + * @param radius Blur sigma value (default 12.0) */ @ReactMethod fun setBlurRadius(radius: Double) { @@ -75,10 +61,16 @@ class ExecutorchWebRTCModule( } /** - * Get available processor names for use with videoTrack._setVideoEffects() + * Check if background blur is available on this device. + */ + @ReactMethod(isBlockingSynchronousMethod = true) + fun isAvailable(): Boolean { + return true // Always available on Android with ExecuTorch + } + + /** + * Get the processor name for use with _setVideoEffect(). */ - override fun getConstants(): MutableMap = - mutableMapOf( - "PROCESSOR_NAME" to ExecutorchWebRTC.PROCESSOR_NAME, - ) + @ReactMethod(isBlockingSynchronousMethod = true) + fun getProcessorName(): String = ExecutorchWebRTC.PROCESSOR_NAME } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java index 8aa41d6dd6..3328a9dd15 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/MaskPostProcessor.java @@ -135,9 +135,6 @@ private void dilate(float[] src, float[] dst, int w, int h) { } } - /** - * Apply EMA smoothing without hard threshold - keeps soft gradient values - */ private void applyEmaSmoothing(float[] current, int len) { float oneMinusAlpha = 1.0f - EMA_ALPHA; if (!hasHistory) { @@ -148,7 +145,6 @@ private void applyEmaSmoothing(float[] current, int len) { smoothedMask[i] = EMA_ALPHA * smoothedMask[i] + oneMinusAlpha * current[i]; } } - // No hard threshold - keep soft values for natural gradient edges } private void convertMaskToBytes(float[] source, int length) { diff --git a/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.h b/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.h index d3afecdb8c..cbc9632954 100644 --- a/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.h +++ b/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.h @@ -2,7 +2,6 @@ #import #import -// Import the VideoFrameProcessor protocol from react-native-webrtc @protocol VideoFrameProcessorDelegate - (RTCVideoFrame *)capturer:(RTCVideoCapturer *)capturer didCaptureVideoFrame:(RTCVideoFrame *)frame; @@ -11,7 +10,10 @@ @interface ExecutorchFrameProcessor : NSObject + (instancetype)sharedInstance; + - (void)configureWithModelPath:(NSString *)modelPath; +- (void)setBlurRadius:(float)blurRadius; - (void)unloadModel; +- (BOOL)isAvailable; @end diff --git a/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.mm b/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.mm index 6325a8e29c..e7f22ec167 100644 --- a/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.mm +++ b/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.mm @@ -1,9 +1,9 @@ #import "ExecutorchFrameProcessor.h" +#import +#import #import #import -#import -#import -#import +#import #import #import @@ -13,12 +13,34 @@ using namespace rnexecutorch::models::semantic_segmentation; @implementation ExecutorchFrameProcessor { + // ExecuTorch model std::unique_ptr _segmentation; BOOL _modelLoaded; int _modelWidth; int _modelHeight; + + // Core Image context for GPU-accelerated processing + CIContext *_ciContext; + float _blurRadius; + + // Pixel buffer pool for efficient output allocation + CVPixelBufferPoolRef _outputPool; + size_t _poolWidth; + size_t _poolHeight; + + // Frame dropping + RTCVideoFrame *_lastProcessedFrame; + BOOL _isProcessing; + BOOL _ready; + + // Temporal smoothing (EMA) + cv::Mat _previousMask; + float _emaAlpha; + + // Timing int _frameCount; NSTimeInterval _lastLogTime; + NSTimeInterval _totalProcessingTime; } + (instancetype)sharedInstance { @@ -36,8 +58,18 @@ - (instancetype)init { _modelLoaded = NO; _modelWidth = 256; _modelHeight = 256; + _blurRadius = 12.0f; + _isProcessing = NO; + _ready = NO; + _emaAlpha = 0.5f; _frameCount = 0; _lastLogTime = 0; + _totalProcessingTime = 0; + + // Create Core Image context with GPU rendering + _ciContext = [CIContext contextWithOptions:@{ + kCIContextUseSoftwareRenderer : @NO, + }]; } return self; } @@ -45,126 +77,240 @@ - (instancetype)init { - (void)configureWithModelPath:(NSString *)modelPath { NSLog(@"[ExecutorchFrameProcessor] Loading model from: %@", modelPath); - try { - std::vector normMean = {}; - std::vector normStd = {}; - std::vector allClasses = {"foreground", "background"}; - - _segmentation = std::make_unique( - std::string([modelPath UTF8String]), normMean, normStd, allClasses, - nullptr); - - auto inputShapes = _segmentation->getAllInputShapes(); - if (!inputShapes.empty() && inputShapes[0].size() >= 4) { - _modelHeight = inputShapes[0][inputShapes[0].size() - 2]; - _modelWidth = inputShapes[0][inputShapes[0].size() - 1]; + dispatch_async(dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0), ^{ + @try { + std::vector normMean = {}; + std::vector normStd = {}; + std::vector allClasses = {"foreground", "background"}; + + self->_segmentation = std::make_unique( + std::string([modelPath UTF8String]), normMean, normStd, allClasses, + nullptr); + + auto inputShapes = self->_segmentation->getAllInputShapes(); + if (!inputShapes.empty() && inputShapes[0].size() >= 4) { + self->_modelHeight = inputShapes[0][inputShapes[0].size() - 2]; + self->_modelWidth = inputShapes[0][inputShapes[0].size() - 1]; + } + + self->_modelLoaded = YES; + self->_ready = YES; + NSLog(@"[ExecutorchFrameProcessor] Model loaded! Size: %dx%d", + self->_modelWidth, self->_modelHeight); + } @catch (NSException *exception) { + NSLog(@"[ExecutorchFrameProcessor] Failed to load model: %@", + exception.reason); + self->_modelLoaded = NO; } + }); +} - _modelLoaded = YES; - NSLog(@"[ExecutorchFrameProcessor] Model loaded! Size: %dx%d", _modelWidth, - _modelHeight); - } catch (const std::exception &e) { - NSLog(@"[ExecutorchFrameProcessor] Failed to load model: %s", e.what()); - _modelLoaded = NO; - } +- (void)setBlurRadius:(float)blurRadius { + _blurRadius = blurRadius; } - (void)unloadModel { _segmentation.reset(); _modelLoaded = NO; + _ready = NO; + _previousMask.release(); } -- (RTCVideoFrame *)capturer:(RTCVideoCapturer *)capturer - didCaptureVideoFrame:(RTCVideoFrame *)frame { - _frameCount++; +- (BOOL)isAvailable { + return YES; +} - // TEST: Just return the original frame to check if colors are correct without - // processing return frame; +#pragma mark - VideoFrameProcessorDelegate - // Get I420 buffer - id i420Buffer = [frame.buffer toI420]; - if (!i420Buffer) { +- (RTCVideoFrame *)capturer:(RTCVideoCapturer *)capturer + didCaptureVideoFrame:(RTCVideoFrame *)frame { + if (!_ready || !_modelLoaded) { return frame; } - int width = i420Buffer.width; - int height = i420Buffer.height; - int rotation = frame.rotation; - int uvHeight = height / 2; - int uvWidth = width / 2; + // Frame dropping when busy + if (_isProcessing) { + return _lastProcessedFrame ?: frame; + } - // Rate-limited logging - NSTimeInterval now = [[NSDate date] timeIntervalSince1970]; - if (now - _lastLogTime > 1.0) { - NSLog(@"[ExecutorchFrameProcessor] Frame: %dx%d, rotation=%d, fps=%.1f, " - @"model=%d", - width, height, rotation, _frameCount / (now - _lastLogTime), - _modelLoaded); - _lastLogTime = now; - _frameCount = 0; + _isProcessing = YES; + RTCVideoFrame *result = [self processFrame:frame]; + _lastProcessedFrame = result; + _isProcessing = NO; + return result; +} + +#pragma mark - Core Pipeline + +- (RTCVideoFrame *)processFrame:(RTCVideoFrame *)frame { + NSTimeInterval startTime = [[NSDate date] timeIntervalSince1970]; + + // Get CVPixelBuffer from frame + CVPixelBufferRef inputPixelBuffer = NULL; + + if ([frame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) { + inputPixelBuffer = ((RTCCVPixelBuffer *)frame.buffer).pixelBuffer; + } else { + // Convert I420 to CVPixelBuffer if needed + inputPixelBuffer = [self createPixelBufferFromI420:frame.buffer]; + if (!inputPixelBuffer) { + return frame; + } } - // Create mutable buffer for output - id outBuffer = - [[RTCMutableI420Buffer alloc] initWithWidth:width height:height]; + size_t width = CVPixelBufferGetWidth(inputPixelBuffer); + size_t height = CVPixelBufferGetHeight(inputPixelBuffer); - // Copy Y plane row by row (respecting stride) - for (int row = 0; row < height; row++) { - memcpy(outBuffer.mutableDataY + row * outBuffer.strideY, - i420Buffer.dataY + row * i420Buffer.strideY, width); + // Run ExecuTorch segmentation to get mask + CIImage *maskImage = [self generateMaskForPixelBuffer:inputPixelBuffer + rotation:frame.rotation]; + if (!maskImage) { + if (![frame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) { + CVPixelBufferRelease(inputPixelBuffer); + } + return frame; } - // Copy U plane row by row - for (int row = 0; row < uvHeight; row++) { - memcpy(outBuffer.mutableDataU + row * outBuffer.strideU, - i420Buffer.dataU + row * i420Buffer.strideU, uvWidth); + // Ensure output pool exists + [self ensurePoolForWidth:width height:height]; + + // Create output buffer + CVPixelBufferRef outputBuffer = NULL; + if (CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, _outputPool, + &outputBuffer) != kCVReturnSuccess) { + if (![frame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) { + CVPixelBufferRelease(inputPixelBuffer); + } + return frame; } - // Copy V plane row by row - for (int row = 0; row < uvHeight; row++) { - memcpy(outBuffer.mutableDataV + row * outBuffer.strideV, - i420Buffer.dataV + row * i420Buffer.strideV, uvWidth); + // Core Image processing + CIImage *original = [CIImage imageWithCVPixelBuffer:inputPixelBuffer]; + + // Scale mask to match input size + CGFloat scaleX = (CGFloat)width / maskImage.extent.size.width; + CGFloat scaleY = (CGFloat)height / maskImage.extent.size.height; + CIImage *scaledMask = [maskImage + imageByApplyingTransform:CGAffineTransformMakeScale(scaleX, scaleY)]; + + // Apply Gaussian blur to background + CIFilter *blurFilter = [CIFilter filterWithName:@"CIGaussianBlur"]; + [blurFilter setValue:[original imageByClampingToExtent] + forKey:kCIInputImageKey]; + [blurFilter setValue:@(_blurRadius) forKey:kCIInputRadiusKey]; + CIImage *blurred = + [blurFilter.outputImage imageByCroppingToRect:original.extent]; + + // Blend: foreground (mask=white) stays sharp, background gets blurred + CIFilter *blendFilter = [CIFilter filterWithName:@"CIBlendWithMask"]; + [blendFilter setValue:original forKey:kCIInputImageKey]; + [blendFilter setValue:blurred forKey:kCIInputBackgroundImageKey]; + [blendFilter setValue:scaledMask forKey:kCIInputMaskImageKey]; + CIImage *composited = blendFilter.outputImage; + + // Render to output buffer + CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); + [_ciContext render:composited + toCVPixelBuffer:outputBuffer + bounds:original.extent + colorSpace:colorSpace]; + CGColorSpaceRelease(colorSpace); + + // Create output frame + RTCCVPixelBuffer *rtcBuffer = + [[RTCCVPixelBuffer alloc] initWithPixelBuffer:outputBuffer]; + RTCVideoFrame *outputFrame = + [[RTCVideoFrame alloc] initWithBuffer:rtcBuffer + rotation:frame.rotation + timeStampNs:frame.timeStampNs]; + + CVPixelBufferRelease(outputBuffer); + if (![frame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) { + CVPixelBufferRelease(inputPixelBuffer); } - // If no model loaded, just return the copy (no blur) - if (!_modelLoaded || !_segmentation) { - RTCVideoFrame *passthrough = - [[RTCVideoFrame alloc] initWithBuffer:outBuffer - rotation:frame.rotation - timeStampNs:frame.timeStampNs]; - return passthrough; + // Logging + NSTimeInterval endTime = [[NSDate date] timeIntervalSince1970]; + _totalProcessingTime += (endTime - startTime); + _frameCount++; + + if (endTime - _lastLogTime > 1.0) { + double avgMs = (_totalProcessingTime / _frameCount) * 1000.0; + double fps = _frameCount / (endTime - _lastLogTime); + NSLog(@"[ExecutorchFrameProcessor] Avg: %.1fms (%.1f FPS)", avgMs, fps); + _lastLogTime = endTime; + _frameCount = 0; + _totalProcessingTime = 0; } - // Convert I420 to RGB for model inference - cv::Mat i420Mat(height * 3 / 2, width, CV_8UC1); - memcpy(i420Mat.data, i420Buffer.dataY, width * height); + return outputFrame; +} - uint8_t *uvDst = i420Mat.data + (height * width); - for (int row = 0; row < uvHeight; row++) { - memcpy(uvDst + row * uvWidth, i420Buffer.dataU + row * i420Buffer.strideU, - uvWidth); +#pragma mark - Segmentation + +- (CIImage *)generateMaskForPixelBuffer:(CVPixelBufferRef)pixelBuffer + rotation:(RTCVideoRotation)rotation { + if (!_modelLoaded || !_segmentation) { + return nil; } - for (int row = 0; row < uvHeight; row++) { - memcpy(uvDst + uvHeight * uvWidth + row * uvWidth, - i420Buffer.dataV + row * i420Buffer.strideV, uvWidth); + + size_t width = CVPixelBufferGetWidth(pixelBuffer); + size_t height = CVPixelBufferGetHeight(pixelBuffer); + + // Lock pixel buffer and convert to RGB for model + CVPixelBufferLockBaseAddress(pixelBuffer, kCVPixelBufferLock_ReadOnly); + + OSType format = CVPixelBufferGetPixelFormatType(pixelBuffer); + cv::Mat rgbMat; + + if (format == kCVPixelFormatType_32BGRA) { + // Direct BGRA access + void *baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer); + size_t bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer); + cv::Mat bgraMat((int)height, (int)width, CV_8UC4, baseAddress, bytesPerRow); + cv::cvtColor(bgraMat, rgbMat, cv::COLOR_BGRA2RGB); + } else if (format == kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange || + format == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) { + // NV12 format - convert via I420 + cv::Mat yMat((int)height, (int)width, CV_8UC1, + CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0), + CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0)); + cv::Mat uvMat((int)height / 2, (int)width / 2, CV_8UC2, + CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1), + CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1)); + + cv::Mat yuvMat((int)height * 3 / 2, (int)width, CV_8UC1); + yMat.copyTo(yuvMat(cv::Rect(0, 0, (int)width, (int)height))); + + // Deinterleave UV + std::vector uvChannels; + cv::split(uvMat, uvChannels); + uvChannels[0].copyTo( + yuvMat(cv::Rect(0, (int)height, (int)width / 2, (int)height / 2))); + uvChannels[1].copyTo(yuvMat(cv::Rect((int)width / 2, (int)height, + (int)width / 2, (int)height / 2))); + + cv::cvtColor(yuvMat, rgbMat, cv::COLOR_YUV2RGB_I420); + } else { + CVPixelBufferUnlockBaseAddress(pixelBuffer, kCVPixelBufferLock_ReadOnly); + return nil; } - cv::Mat rgbFull; - cv::cvtColor(i420Mat, rgbFull, cv::COLOR_YUV2RGB_I420); + CVPixelBufferUnlockBaseAddress(pixelBuffer, kCVPixelBufferLock_ReadOnly); // Rotate for model inference cv::Mat rgbRotated; if (rotation == RTCVideoRotation_90) { - cv::rotate(rgbFull, rgbRotated, cv::ROTATE_90_CLOCKWISE); + cv::rotate(rgbMat, rgbRotated, cv::ROTATE_90_CLOCKWISE); } else if (rotation == RTCVideoRotation_180) { - cv::rotate(rgbFull, rgbRotated, cv::ROTATE_180); + cv::rotate(rgbMat, rgbRotated, cv::ROTATE_180); } else if (rotation == RTCVideoRotation_270) { - cv::rotate(rgbFull, rgbRotated, cv::ROTATE_90_COUNTERCLOCKWISE); + cv::rotate(rgbMat, rgbRotated, cv::ROTATE_90_COUNTERCLOCKWISE); } else { - rgbRotated = rgbFull; + rgbRotated = rgbMat; } - // Run segmentation + // Run ExecuTorch segmentation cv::Mat mask; try { JSTensorViewIn pixelData; @@ -181,125 +327,187 @@ - (RTCVideoFrame *)capturer:(RTCVideoCapturer *)capturer auto *fgData = reinterpret_cast(fgBuffer->data()); mask = cv::Mat(_modelHeight, _modelWidth, CV_32FC1, fgData).clone(); } else { - mask = cv::Mat::ones(_modelHeight, _modelWidth, CV_32FC1); + return nil; } } catch (const std::exception &e) { NSLog(@"[ExecutorchFrameProcessor] Segmentation failed: %s", e.what()); - // On error, return unprocessed copy - RTCVideoFrame *passthrough = - [[RTCVideoFrame alloc] initWithBuffer:outBuffer - rotation:frame.rotation - timeStampNs:frame.timeStampNs]; - return passthrough; + return nil; } - // Resize mask and rotate back to match frame orientation - cv::Mat fullMask; - if (rotation == RTCVideoRotation_90 || rotation == RTCVideoRotation_270) { - cv::Mat rotatedMask; - cv::resize(mask, rotatedMask, cv::Size(height, width), 0, 0, - cv::INTER_LINEAR); - int inverseCode = (rotation == RTCVideoRotation_90) - ? cv::ROTATE_90_COUNTERCLOCKWISE - : cv::ROTATE_90_CLOCKWISE; - cv::rotate(rotatedMask, fullMask, inverseCode); + // Rotate mask back + cv::Mat maskRotated; + if (rotation == RTCVideoRotation_90) { + cv::rotate(mask, maskRotated, cv::ROTATE_90_COUNTERCLOCKWISE); } else if (rotation == RTCVideoRotation_180) { - cv::resize(mask, fullMask, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); - cv::rotate(fullMask, fullMask, cv::ROTATE_180); + cv::rotate(mask, maskRotated, cv::ROTATE_180); + } else if (rotation == RTCVideoRotation_270) { + cv::rotate(mask, maskRotated, cv::ROTATE_90_CLOCKWISE); } else { - cv::resize(mask, fullMask, cv::Size(width, height), 0, 0, cv::INTER_LINEAR); + maskRotated = mask; + } + + // EMA temporal smoothing to reduce flickering + if (_previousMask.empty() || _previousMask.size() != maskRotated.size()) { + _previousMask = maskRotated.clone(); + } else { + // Blend current mask with previous: smoothed = alpha * current + (1 - + // alpha) * previous + cv::addWeighted(maskRotated, _emaAlpha, _previousMask, 1.0f - _emaAlpha, 0, + maskRotated); + _previousMask = maskRotated.clone(); + } + + // Morphological operations to clean up mask edges + cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3)); + cv::erode(maskRotated, maskRotated, kernel, cv::Point(-1, -1), 1); + cv::dilate(maskRotated, maskRotated, kernel, cv::Point(-1, -1), 1); + + // Blur mask edges for smooth transition (match Android: sigma=5.0) + cv::GaussianBlur(maskRotated, maskRotated, cv::Size(0, 0), 5.0); + + // Convert to 8-bit grayscale + cv::Mat mask8u; + maskRotated.convertTo(mask8u, CV_8UC1, 255.0); + + // Create CIImage from mask + CVPixelBufferRef maskPixelBuffer = [self createGrayscalePixelBuffer:mask8u]; + if (!maskPixelBuffer) { + return nil; } - // Blur mask edges for smooth transition - cv::GaussianBlur(fullMask, fullMask, cv::Size(15, 15), 0); + CIImage *maskCIImage = [CIImage imageWithCVPixelBuffer:maskPixelBuffer]; + CVPixelBufferRelease(maskPixelBuffer); - // Create Y mat from original - cv::Mat yMat(height, width, CV_8UC1); + return maskCIImage; +} + +#pragma mark - Helpers + +- (CVPixelBufferRef)createPixelBufferFromI420:(id)buffer { + id i420 = [buffer toI420]; + if (!i420) { + return NULL; + } + + int width = [i420 width]; + int height = [i420 height]; + int strideY = [i420 strideY]; + int strideU = [i420 strideU]; + int strideV = [i420 strideV]; + const uint8_t *dataY = [i420 dataY]; + const uint8_t *dataU = [i420 dataU]; + const uint8_t *dataV = [i420 dataV]; + + NSDictionary *attrs = @{ + (id)kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_32BGRA), + (id)kCVPixelBufferWidthKey : @(width), + (id)kCVPixelBufferHeightKey : @(height), + (id)kCVPixelBufferIOSurfacePropertiesKey : @{}, + }; + + CVPixelBufferRef pixelBuffer = NULL; + CVReturn result = CVPixelBufferCreate( + kCFAllocatorDefault, width, height, kCVPixelFormatType_32BGRA, + (__bridge CFDictionaryRef)attrs, &pixelBuffer); + if (result != kCVReturnSuccess) { + return NULL; + } + + // Convert I420 to BGRA using OpenCV + cv::Mat i420Mat(height * 3 / 2, width, CV_8UC1); + + // Copy Y plane for (int row = 0; row < height; row++) { - memcpy(yMat.ptr(row), i420Buffer.dataY + row * i420Buffer.strideY, width); + memcpy(i420Mat.ptr(row), dataY + row * strideY, width); } - // Create U and V mats - cv::Mat uMat(uvHeight, uvWidth, CV_8UC1); - cv::Mat vMat(uvHeight, uvWidth, CV_8UC1); + // Copy U and V planes + uint8_t *uvDst = i420Mat.data + (height * width); + int uvHeight = height / 2; + int uvWidth = width / 2; for (int row = 0; row < uvHeight; row++) { - memcpy(uMat.ptr(row), i420Buffer.dataU + row * i420Buffer.strideU, uvWidth); - memcpy(vMat.ptr(row), i420Buffer.dataV + row * i420Buffer.strideV, uvWidth); + memcpy(uvDst + row * uvWidth, dataU + row * strideU, uvWidth); + } + for (int row = 0; row < uvHeight; row++) { + memcpy(uvDst + uvHeight * uvWidth + row * uvWidth, dataV + row * strideV, + uvWidth); } - // Blur Y plane (2x downscale for less blocky artifacts) - cv::Mat ySmall, yBlurredSmall, yBlurred; - int smallW = width / 2; - int smallH = height / 2; - cv::resize(yMat, ySmall, cv::Size(smallW, smallH), 0, 0, cv::INTER_AREA); - cv::stackBlur(ySmall, yBlurredSmall, cv::Size(31, 31)); - cv::resize(yBlurredSmall, yBlurred, cv::Size(width, height), 0, 0, - cv::INTER_LINEAR); - - // Blur U and V planes (they're already at half res, just blur directly) - cv::Mat uBlurred, vBlurred; - cv::stackBlur(uMat, uBlurred, cv::Size(15, 15)); - cv::stackBlur(vMat, vBlurred, cv::Size(15, 15)); - - // Downscale mask for UV blending (UV is half resolution) - cv::Mat uvMask; - cv::resize(fullMask, uvMask, cv::Size(uvWidth, uvHeight), 0, 0, - cv::INTER_LINEAR); - - // Blend Y plane: foreground stays sharp, background gets blurred - uint8_t *outY = outBuffer.mutableDataY; - int outYStride = outBuffer.strideY; + cv::Mat bgraMat; + cv::cvtColor(i420Mat, bgraMat, cv::COLOR_YUV2BGRA_I420); + + CVPixelBufferLockBaseAddress(pixelBuffer, 0); + void *dst = CVPixelBufferGetBaseAddress(pixelBuffer); + size_t dstBytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer); for (int row = 0; row < height; row++) { - const uint8_t *srcY = yMat.ptr(row); - const uint8_t *blurY = yBlurred.ptr(row); - const float *maskRow = fullMask.ptr(row); - uint8_t *dstY = outY + row * outYStride; - - for (int col = 0; col < width; col++) { - float fg = maskRow[col]; - dstY[col] = - static_cast(blurY[col] * (1.0f - fg) + srcY[col] * fg); - } + memcpy((uint8_t *)dst + row * dstBytesPerRow, bgraMat.ptr(row), width * 4); } - // Blend U plane - uint8_t *outU = outBuffer.mutableDataU; - int outUStride = outBuffer.strideU; - for (int row = 0; row < uvHeight; row++) { - const uint8_t *srcU = uMat.ptr(row); - const uint8_t *blurU = uBlurred.ptr(row); - const float *maskRow = uvMask.ptr(row); - uint8_t *dstU = outU + row * outUStride; - - for (int col = 0; col < uvWidth; col++) { - float fg = maskRow[col]; - dstU[col] = - static_cast(blurU[col] * (1.0f - fg) + srcU[col] * fg); - } + CVPixelBufferUnlockBaseAddress(pixelBuffer, 0); + return pixelBuffer; +} + +- (CVPixelBufferRef)createGrayscalePixelBuffer:(cv::Mat &)grayMat { + int width = grayMat.cols; + int height = grayMat.rows; + + NSDictionary *attrs = @{ + (id)kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_OneComponent8), + (id)kCVPixelBufferWidthKey : @(width), + (id)kCVPixelBufferHeightKey : @(height), + (id)kCVPixelBufferIOSurfacePropertiesKey : @{}, + }; + + CVPixelBufferRef pixelBuffer = NULL; + CVReturn result = CVPixelBufferCreate( + kCFAllocatorDefault, width, height, kCVPixelFormatType_OneComponent8, + (__bridge CFDictionaryRef)attrs, &pixelBuffer); + if (result != kCVReturnSuccess) { + return NULL; } - // Blend V plane - uint8_t *outV = outBuffer.mutableDataV; - int outVStride = outBuffer.strideV; - for (int row = 0; row < uvHeight; row++) { - const uint8_t *srcV = vMat.ptr(row); - const uint8_t *blurV = vBlurred.ptr(row); - const float *maskRow = uvMask.ptr(row); - uint8_t *dstV = outV + row * outVStride; - - for (int col = 0; col < uvWidth; col++) { - float fg = maskRow[col]; - dstV[col] = - static_cast(blurV[col] * (1.0f - fg) + srcV[col] * fg); - } + CVPixelBufferLockBaseAddress(pixelBuffer, 0); + void *dst = CVPixelBufferGetBaseAddress(pixelBuffer); + size_t dstBytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer); + + for (int row = 0; row < height; row++) { + memcpy((uint8_t *)dst + row * dstBytesPerRow, grayMat.ptr(row), width); } - RTCVideoFrame *processedFrame = - [[RTCVideoFrame alloc] initWithBuffer:outBuffer - rotation:frame.rotation - timeStampNs:frame.timeStampNs]; - return processedFrame; + CVPixelBufferUnlockBaseAddress(pixelBuffer, 0); + return pixelBuffer; +} + +- (void)ensurePoolForWidth:(size_t)width height:(size_t)height { + if (_poolWidth == width && _poolHeight == height && _outputPool) { + return; + } + + if (_outputPool) { + CVPixelBufferPoolRelease(_outputPool); + _outputPool = NULL; + } + + _poolWidth = width; + _poolHeight = height; + + NSDictionary *attrs = @{ + (id)kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_32BGRA), + (id)kCVPixelBufferWidthKey : @(width), + (id)kCVPixelBufferHeightKey : @(height), + (id)kCVPixelBufferIOSurfacePropertiesKey : @{}, + (id)kCVPixelBufferMetalCompatibilityKey : @YES, + }; + + CVPixelBufferPoolCreate(kCFAllocatorDefault, NULL, + (__bridge CFDictionaryRef)attrs, &_outputPool); +} + +- (void)dealloc { + if (_outputPool) { + CVPixelBufferPoolRelease(_outputPool); + } } @end diff --git a/packages/react-native-executorch-webrtc/ios/ExecutorchWebRTC.mm b/packages/react-native-executorch-webrtc/ios/ExecutorchWebRTC.mm index 58d471d24f..ad4dd113d7 100644 --- a/packages/react-native-executorch-webrtc/ios/ExecutorchWebRTC.mm +++ b/packages/react-native-executorch-webrtc/ios/ExecutorchWebRTC.mm @@ -1,28 +1,28 @@ #import "ExecutorchWebRTC.h" #import "ExecutorchFrameProcessor.h" -#import +#include "ProcessorProvider.h" @implementation ExecutorchWebRTC RCT_EXPORT_MODULE() static BOOL _processorRegistered = NO; +static NSString *const PROCESSOR_NAME = @"executorchBackgroundBlur"; + (void)registerProcessorIfNeeded { if (!_processorRegistered) { _processorRegistered = YES; ExecutorchFrameProcessor *processor = [ExecutorchFrameProcessor sharedInstance]; - [ProcessorProvider addProcessor:processor - forName:@"executorchBackgroundBlur"]; - NSLog(@"[ExecutorchWebRTC] Registered executorchBackgroundBlur processor"); + [ProcessorProvider addProcessor:processor forName:PROCESSOR_NAME]; + NSLog(@"[ExecutorchWebRTC] Registered %@ processor", PROCESSOR_NAME); } } -RCT_EXPORT_METHOD(setup) { [ExecutorchWebRTC registerProcessorIfNeeded]; } +#pragma mark - Fishjam-compatible API -RCT_EXPORT_METHOD(configureBackgroundRemoval : (NSString *)modelPath) { - NSLog(@"[ExecutorchWebRTC] configureBackgroundRemoval: %@", modelPath); +RCT_EXPORT_METHOD(initialize : (NSString *)modelPath) { + NSLog(@"[ExecutorchWebRTC] initialize: %@", modelPath); [ExecutorchWebRTC registerProcessorIfNeeded]; @@ -35,10 +35,35 @@ + (void)registerProcessorIfNeeded { [[ExecutorchFrameProcessor sharedInstance] configureWithModelPath:cleanPath]; } +RCT_EXPORT_METHOD(deinitialize) { + NSLog(@"[ExecutorchWebRTC] deinitialize"); + [[ExecutorchFrameProcessor sharedInstance] unloadModel]; +} + +RCT_EXPORT_METHOD(setBlurRadius : (double)radius) { + [[ExecutorchFrameProcessor sharedInstance] setBlurRadius:(float)radius]; +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(isAvailable) { + return @([[ExecutorchFrameProcessor sharedInstance] isAvailable]); +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(getProcessorName) { + return PROCESSOR_NAME; +} + +#pragma mark - Legacy API (for backward compatibility) + +RCT_EXPORT_METHOD(setup) { [ExecutorchWebRTC registerProcessorIfNeeded]; } + +RCT_EXPORT_METHOD(configureBackgroundRemoval : (NSString *)modelPath) { + [self initialize:modelPath]; +} + RCT_EXPORT_METHOD(configureBackgroundBlur : (NSString *) modelPath blurIntensity : (int)intensity) { - // Legacy alias - [self configureBackgroundRemoval:modelPath]; + [self initialize:modelPath]; + [self setBlurRadius:intensity]; } @end diff --git a/packages/react-native-executorch-webrtc/package.json b/packages/react-native-executorch-webrtc/package.json index 9ffe2c60d8..d205d318cb 100644 --- a/packages/react-native-executorch-webrtc/package.json +++ b/packages/react-native-executorch-webrtc/package.json @@ -31,17 +31,18 @@ "clean": "del-cli lib" }, "peerDependencies": { + "@fishjam-cloud/react-native-client": "*", + "@fishjam-cloud/react-native-webrtc": "*", "react": "*", "react-native": "*", - "react-native-executorch": "*", - "react-native-webrtc": ">=121.0.0" + "react-native-executorch": "*" }, "devDependencies": { + "@fishjam-cloud/react-native-webrtc": "^0.25.8", "@types/react": "~19.1.10", "react": "19.1.0", "react-native": "0.81.5", "react-native-executorch": "workspace:*", - "react-native-webrtc": "^124.0.7", "typescript": "~5.9.2" } } diff --git a/packages/react-native-executorch-webrtc/src/BackgroundBlur.ts b/packages/react-native-executorch-webrtc/src/BackgroundBlur.ts new file mode 100644 index 0000000000..6d55b3e7b6 --- /dev/null +++ b/packages/react-native-executorch-webrtc/src/BackgroundBlur.ts @@ -0,0 +1,64 @@ +import NativeBackgroundBlur from './NativeBackgroundBlur'; + +let initialized = false; + +/** + * Initialize the background blur processor with ExecuTorch segmentation model. + * Must be called before using the blur middleware. + * + * @param modelPath Path to the .pte segmentation model file + * + * @example + * ```ts + * import { initializeBackgroundBlur } from '@executorch/react-native-executorch-webrtc'; + * + * // Initialize with your model + * initializeBackgroundBlur('/path/to/selfie_segmenter.pte'); + * ``` + */ +export const initializeBackgroundBlur = (modelPath: string): void => { + if (!NativeBackgroundBlur) { + console.warn( + '[ExecutorchWebRTC] Native module not available. Is the package properly linked?' + ); + return; + } + + if (initialized) { + console.warn('[ExecutorchWebRTC] Background blur already initialized'); + return; + } + + NativeBackgroundBlur.initialize(modelPath); + initialized = true; +}; + +/** + * Deinitialize and release background blur resources. + * + * @example + * ```ts + * import { deinitializeBackgroundBlur } from '@executorch/react-native-executorch-webrtc'; + * + * deinitializeBackgroundBlur(); + * ``` + */ +export const deinitializeBackgroundBlur = (): void => { + if (!NativeBackgroundBlur) { + return; + } + + if (!initialized) { + return; + } + + NativeBackgroundBlur.deinitialize(); + initialized = false; +}; + +/** + * Check if background blur has been initialized + */ +export const isBackgroundBlurInitialized = (): boolean => { + return initialized; +}; diff --git a/packages/react-native-executorch-webrtc/src/NativeBackgroundBlur.ts b/packages/react-native-executorch-webrtc/src/NativeBackgroundBlur.ts new file mode 100644 index 0000000000..38d51cf2e2 --- /dev/null +++ b/packages/react-native-executorch-webrtc/src/NativeBackgroundBlur.ts @@ -0,0 +1,33 @@ +import type { TurboModule } from 'react-native'; +import { TurboModuleRegistry } from 'react-native'; + +export interface Spec extends TurboModule { + /** + * Initialize the background blur processor with a segmentation model + * @param modelPath Path to the .pte segmentation model file + */ + initialize(modelPath: string): void; + + /** + * Deinitialize and release resources + */ + deinitialize(): void; + + /** + * Set the blur radius/intensity + * @param radius Blur sigma value (default: 12.0) + */ + setBlurRadius(radius: number): void; + + /** + * Check if background blur is available on this device + */ + isAvailable(): boolean; + + /** + * Get the processor name to use with _setVideoEffect + */ + getProcessorName(): string; +} + +export default TurboModuleRegistry.get('ExecutorchWebRTC'); diff --git a/packages/react-native-executorch-webrtc/src/index.ts b/packages/react-native-executorch-webrtc/src/index.ts index 1d4ac30052..36b79f26c0 100644 --- a/packages/react-native-executorch-webrtc/src/index.ts +++ b/packages/react-native-executorch-webrtc/src/index.ts @@ -1,82 +1,24 @@ /** - * ExecuTorch WebRTC integration + * ExecuTorch WebRTC Background Blur * - * This package provides frame processing integration between - * react-native-executorch and react-native-webrtc. + * This package provides background blur/removal for WebRTC video calls + * using ExecuTorch segmentation models. API is compatible with + * @fishjam-cloud/react-native-webrtc-background-blur. * * @packageDocumentation */ -import { NativeModules, Platform } from 'react-native'; - -// Auto-initialize the native module to register the processor -// This happens when the package is first imported -if (Platform.OS === 'android' || Platform.OS === 'ios') { - const { ExecutorchWebRTC } = NativeModules; - if (ExecutorchWebRTC) { - try { - ExecutorchWebRTC.setup(); - } catch (error) { - console.warn('Failed to initialize ExecutorchWebRTC:', error); - } - } else { - console.warn( - 'ExecutorchWebRTC native module not found - is the package properly linked?' - ); - } -} - -/** - * Configure background removal using semantic segmentation - * @param modelPath Path to the selfie segmentation model (.pte file) - */ -export function configureBackgroundRemoval(modelPath: string): void { - if (Platform.OS !== 'android' && Platform.OS !== 'ios') { - console.warn( - 'configureBackgroundRemoval: Only supported on Android and iOS' - ); - return; - } - - const { ExecutorchWebRTC } = NativeModules; - if (ExecutorchWebRTC) { - console.log( - '[ExecutorchWebRTC] Calling configureBackgroundRemoval:', - modelPath - ); - ExecutorchWebRTC.configureBackgroundRemoval(modelPath); - console.log('[ExecutorchWebRTC] configureBackgroundRemoval call completed'); - } else { - console.error( - '[ExecutorchWebRTC] Native module not found! Is the package linked?' - ); - } -} - -/** - * Get the current frame processing FPS - * @returns Promise resolving to current FPS (0 if not processing) - */ -export async function getFps(): Promise { - if (Platform.OS !== 'android') { - return 0; - } - - const { ExecutorchWebRTC } = NativeModules; - if (ExecutorchWebRTC) { - return ExecutorchWebRTC.getFps(); - } - return 0; -} - -// Legacy alias -export const configureBackgroundBlur = configureBackgroundRemoval; +export { useBackgroundBlur } from './useBackgroundBlur'; +export type { + UseBackgroundBlurOptions, + TrackMiddleware, +} from './useBackgroundBlur'; export { - useWebRTCFrameProcessor, - enableFrameProcessor, - disableFrameProcessor, - PROCESSOR_NAMES, - type ProcessorName, - type WebRTCFrameProcessorOptions, -} from './useWebRTCFrameProcessor'; + initializeBackgroundBlur, + deinitializeBackgroundBlur, + isBackgroundBlurInitialized, +} from './BackgroundBlur'; + +export type { Spec as BackgroundBlurSpec } from './NativeBackgroundBlur'; +export { default as NativeBackgroundBlur } from './NativeBackgroundBlur'; diff --git a/packages/react-native-executorch-webrtc/src/useBackgroundBlur.ts b/packages/react-native-executorch-webrtc/src/useBackgroundBlur.ts new file mode 100644 index 0000000000..04f9a75ede --- /dev/null +++ b/packages/react-native-executorch-webrtc/src/useBackgroundBlur.ts @@ -0,0 +1,123 @@ +import { useCallback, useEffect, useRef } from 'react'; +import { Platform } from 'react-native'; +import NativeBackgroundBlur from './NativeBackgroundBlur'; +import { initializeBackgroundBlur } from './BackgroundBlur'; + +/** + * Extended MediaStreamTrack with WebRTC video effects API + */ +type NativeMediaStreamTrack = MediaStreamTrack & { + _setVideoEffect: (name: string) => void; + _setVideoEffects: (names: string[] | null) => void; +}; + +/** + * Middleware function type compatible with Fishjam SDK + */ +export type TrackMiddleware = (track: MediaStreamTrack) => { + track: MediaStreamTrack; + onClear: () => void; +}; + +/** + * Options for useBackgroundBlur hook + */ +export type UseBackgroundBlurOptions = { + /** + * Path to the ExecuTorch segmentation model (.pte file) + * Required for initialization + */ + modelUri: string; + + /** + * Blur intensity/radius (default: 12) + */ + blurRadius?: number; +}; + +const PROCESSOR_NAME = 'executorchBackgroundBlur'; + +/** + * Hook to enable background blur on WebRTC video tracks. + * Compatible with Fishjam SDK's TrackMiddleware interface. + * + * @param options Configuration options including model path and blur radius + * @returns Object containing blurMiddleware for use with Fishjam SDK + * + * @example + * ```tsx + * import { useBackgroundBlur } from '@executorch/react-native-executorch-webrtc'; + * + * function VideoCall() { + * const { blurMiddleware } = useBackgroundBlur({ + * modelUri: 'file:///path/to/selfie_segmenter.pte', + * blurRadius: 15, + * }); + * + * // Use with Fishjam SDK + * const { toggleCamera } = useCamera({ + * cameraTrackMiddleware: blurMiddleware, + * }); + * + * // Or use directly with a track + * const applyBlur = (track: MediaStreamTrack) => { + * const { onClear } = blurMiddleware(track); + * // Call onClear() to remove the effect + * }; + * } + * ``` + */ +export function useBackgroundBlur(options: UseBackgroundBlurOptions): { + blurMiddleware: TrackMiddleware; +} { + const { modelUri, blurRadius = 12 } = options; + const initializedRef = useRef(false); + + useEffect(() => { + if (!NativeBackgroundBlur) { + console.warn('[useBackgroundBlur] Native module not available'); + return; + } + + if (!initializedRef.current && modelUri) { + initializeBackgroundBlur(modelUri); + initializedRef.current = true; + } + + return () => { + // TODO: unload native module? + }; + }, [modelUri]); + + // Update blur radius when it changes + useEffect(() => { + if (!NativeBackgroundBlur) { + console.warn('[useBackgroundBlur] Native module not available'); + return; + } + + NativeBackgroundBlur.setBlurRadius(blurRadius); + }, [blurRadius]); + + const blurMiddleware: TrackMiddleware = useCallback( + (track: MediaStreamTrack) => { + const nativeTrack = track as NativeMediaStreamTrack; + + // Apply the video effect + nativeTrack._setVideoEffect(PROCESSOR_NAME); + + return { + track, + onClear: () => { + // Android expects null to disable, iOS expects empty array + nativeTrack._setVideoEffects( + Platform.OS === 'ios' ? [] : (null as any) + ); + }, + }; + }, + [] + ); + + return { blurMiddleware }; +} diff --git a/packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts b/packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts deleted file mode 100644 index da11a4ef6b..0000000000 --- a/packages/react-native-executorch-webrtc/src/useWebRTCFrameProcessor.ts +++ /dev/null @@ -1,179 +0,0 @@ -import { useEffect } from 'react'; -import { Platform, DeviceEventEmitter } from 'react-native'; -import type { MediaStream, MediaStreamTrack } from 'react-native-webrtc'; - -export const PROCESSOR_NAMES = { - default: 'executorchBackgroundBlur', - experimental: 'executorchBackgroundBlurNew', -} as const; - -export type ProcessorName = - (typeof PROCESSOR_NAMES)[keyof typeof PROCESSOR_NAMES]; - -/** - * Result from frame processing - */ -export interface FrameProcessingResult { - result: string; // JSON string with detection results - width: number; - height: number; - timestamp: number; -} - -/** - * Options for frame processor - */ -export interface WebRTCFrameProcessorOptions { - enabled?: boolean; - onResults?: (results: FrameProcessingResult) => void; - /** Which processor to use. Defaults to 'executorchBackgroundBlur' */ - processorName?: ProcessorName | string; -} - -/** - * Hook to enable ExecuTorch frame processing on a WebRTC video track. - * - * @param stream - The MediaStream containing the video track to process - * @param options - Configuration options - * - * @example - * ```tsx - * const stream = await mediaDevices.getUserMedia({ video: true }); - * useWebRTCFrameProcessor(stream, { - * onResults: (results) => { - * console.log('Detections:', JSON.parse(results.result)); - * } - * }); - * ``` - */ -export function useWebRTCFrameProcessor( - stream: MediaStream | null | undefined, - options: WebRTCFrameProcessorOptions = {} -): void { - const { - enabled = true, - onResults, - processorName = PROCESSOR_NAMES.default, - } = options; - useEffect(() => { - if (!stream || !enabled) { - return; - } - - const videoTracks = stream.getVideoTracks(); - if (videoTracks.length === 0) { - console.warn('useWebRTCFrameProcessor: No video tracks found in stream'); - return; - } - - const videoTrack = videoTracks[0]; - if (!videoTrack) { - return; - } - - // Set up event listener for results - const subscription = onResults - ? DeviceEventEmitter.addListener( - 'onFrameProcessed', - (event: FrameProcessingResult) => { - onResults(event); - } - ) - : null; - - try { - const track = videoTrack as any; - if (typeof track._setVideoEffects === 'function') { - track._setVideoEffects([processorName]); - console.log( - `✅ ExecuTorch frame processor "${processorName}" enabled on track ${videoTrack.id}` - ); - } else { - console.warn('useWebRTCFrameProcessor: _setVideoEffects not available'); - } - } catch (error) { - console.error( - 'useWebRTCFrameProcessor: Failed to enable processor:', - error - ); - } - - // Cleanup: disable processor when unmounting - return () => { - subscription?.remove(); - - try { - const track = videoTrack as any; - if (typeof track._setVideoEffects === 'function') { - track._setVideoEffects([]); - console.log( - `ExecuTorch frame processor disabled on track ${videoTrack.id}` - ); - } - } catch (error) { - console.error( - 'useWebRTCFrameProcessor: Failed to disable processor:', - error - ); - } - }; - }, [stream, enabled, onResults, processorName]); -} - -/** - * Manually enable ExecuTorch frame processing on a video track. - * - * @param videoTrack - The video track to process - * @param processorName - Which processor to use (default: 'executorchBackgroundBlur') - * - * @example - * ```tsx - * const stream = await mediaDevices.getUserMedia({ video: true }); - * const track = stream.getVideoTracks()[0]; - * enableFrameProcessor(track); - * // or use experimental: - * enableFrameProcessor(track, PROCESSOR_NAMES.experimental); - * ``` - */ -export function enableFrameProcessor( - videoTrack: MediaStreamTrack, - processorName: ProcessorName | string = PROCESSOR_NAMES.default -): void { - if (Platform.OS !== 'android') { - console.warn('enableFrameProcessor: Currently only supported on Android'); - return; - } - - try { - const track = videoTrack as any; - if (typeof track._setVideoEffects === 'function') { - track._setVideoEffects([processorName]); - console.log( - `✅ ExecuTorch frame processor "${processorName}" enabled on track ${videoTrack.id}` - ); - } - } catch (error) { - console.error('enableFrameProcessor: Failed to enable processor:', error); - throw error; - } -} - -/** - * Manually disable ExecuTorch frame processing on a video track. - * - * @param videoTrack - The video track to stop processing - */ -export function disableFrameProcessor(videoTrack: MediaStreamTrack): void { - try { - const track = videoTrack as any; - if (typeof track._setVideoEffects === 'function') { - track._setVideoEffects([]); - console.log( - `ExecuTorch frame processor disabled on track ${videoTrack.id}` - ); - } - } catch (error) { - console.error('disableFrameProcessor: Failed to disable processor:', error); - throw error; - } -} From 3bd02af9fecc885e00be8d79a1bb37b8e3e8c6f7 Mon Sep 17 00:00:00 2001 From: chmjkb Date: Fri, 17 Apr 2026 13:05:24 +0200 Subject: [PATCH 09/19] fix: build issues --- .../android/CMakeLists.txt | 6 ++++++ .../android/build.gradle | 9 ++------- .../android/src/main/AndroidManifest.xml | 1 + .../react-native-executorch-webrtc.podspec | 12 +++++++----- 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/packages/react-native-executorch-webrtc/android/CMakeLists.txt b/packages/react-native-executorch-webrtc/android/CMakeLists.txt index 71447a0b45..73375ba4c3 100644 --- a/packages/react-native-executorch-webrtc/android/CMakeLists.txt +++ b/packages/react-native-executorch-webrtc/android/CMakeLists.txt @@ -57,6 +57,9 @@ else() set(OPENCV_THIRD_PARTY_LIBS "") endif() +# OpenMP for OpenCV parallel operations +target_link_options(${CMAKE_PROJECT_NAME} PRIVATE -fopenmp -static-openmp) + # Link against libraries target_link_libraries( ${CMAKE_PROJECT_NAME} @@ -70,3 +73,6 @@ target_link_libraries( ${OPENCV_THIRD_PARTY_LIBS} executorch ) + +# Ensure react-native-executorch symbols are exported (needed for BaseModel/BaseSemanticSegmentation) +target_link_options(${CMAKE_PROJECT_NAME} PRIVATE -Wl,--no-as-needed) diff --git a/packages/react-native-executorch-webrtc/android/build.gradle b/packages/react-native-executorch-webrtc/android/build.gradle index 88a9cebe65..bdda915ef0 100644 --- a/packages/react-native-executorch-webrtc/android/build.gradle +++ b/packages/react-native-executorch-webrtc/android/build.gradle @@ -66,12 +66,7 @@ dependencies { implementation 'com.facebook.react:react-native:+' // WebRTC classes - provided by app via autolinking - if (findProject(':react-native-webrtc') != null) { - compileOnly project(':react-native-webrtc') - } - + compileOnly project(':fishjam-cloud_react-native-webrtc') // ExecuTorch for vision model processing - if (findProject(':react-native-executorch') != null) { - implementation project(':react-native-executorch') - } + compileOnly project(':react-native-executorch') } diff --git a/packages/react-native-executorch-webrtc/android/src/main/AndroidManifest.xml b/packages/react-native-executorch-webrtc/android/src/main/AndroidManifest.xml index e68f8b269a..f691675edd 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/AndroidManifest.xml +++ b/packages/react-native-executorch-webrtc/android/src/main/AndroidManifest.xml @@ -1,3 +1,4 @@ + diff --git a/packages/react-native-executorch-webrtc/react-native-executorch-webrtc.podspec b/packages/react-native-executorch-webrtc/react-native-executorch-webrtc.podspec index 025b520397..95417ef2e0 100644 --- a/packages/react-native-executorch-webrtc/react-native-executorch-webrtc.podspec +++ b/packages/react-native-executorch-webrtc/react-native-executorch-webrtc.podspec @@ -16,18 +16,20 @@ Pod::Spec.new do |s| s.source_files = "ios/**/*.{h,m,mm}" # react-native-executorch exposes rnexecutorch/* headers via its header_dir. - # However, executorch SDK headers (executorch/*) from third-party/include - # don't propagate to dependent pods, so we need to add them here. - rne_pod_root = '"$(PODS_ROOT)/react-native-executorch"' + # However, executorch SDK headers and internal headers don't propagate to + # dependent pods, so we need to add them here. + rne_path = '${PODS_ROOT}/../../node_modules/react-native-executorch' s.pod_target_xcconfig = { "USE_HEADERMAP" => "YES", "CLANG_CXX_LANGUAGE_STANDARD" => "c++20", - "HEADER_SEARCH_PATHS" => "#{rne_pod_root}/third-party/include" + "HEADER_SEARCH_PATHS" => "\"#{rne_path}/third-party/include\" \"#{rne_path}/common\"" } s.dependency "React-Core" s.dependency "react-native-executorch" - s.dependency "react-native-webrtc" s.dependency "opencv-rne", "~> 4.11.0" + s.dependency 'FishjamReactNativeWebrtc' + + install_modules_dependencies(s) end From 51f185f6b77443b61bf3da60b198609a99704db2 Mon Sep 17 00:00:00 2001 From: chmjkb Date: Fri, 17 Apr 2026 13:14:48 +0200 Subject: [PATCH 10/19] deps: update lockfile --- yarn.lock | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/yarn.lock b/yarn.lock index 03b6b8ab9a..002ff75185 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2850,6 +2850,18 @@ __metadata: languageName: node linkType: hard +"@fishjam-cloud/react-native-webrtc@npm:^0.25.8": + version: 0.25.8 + resolution: "@fishjam-cloud/react-native-webrtc@npm:0.25.8" + dependencies: + base64-js: "npm:1.5.1" + debug: "npm:4.3.4" + peerDependencies: + react-native: "*" + checksum: 10/e88ad3b8355191f83d35fa2f51208d38f0dda7e90aad1e2f3a02036726a3ed922008422c1051e7a6f45c8027eeefa8dbf0be75cb65ccdd980b80a55a251fbcee + languageName: node + linkType: hard + "@gar/promise-retry@npm:^1.0.0": version: 1.0.2 resolution: "@gar/promise-retry@npm:1.0.2" @@ -14520,17 +14532,18 @@ __metadata: version: 0.0.0-use.local resolution: "react-native-executorch-webrtc@workspace:packages/react-native-executorch-webrtc" dependencies: + "@fishjam-cloud/react-native-webrtc": "npm:^0.25.8" "@types/react": "npm:~19.1.10" react: "npm:19.1.0" react-native: "npm:0.81.5" react-native-executorch: "workspace:*" - react-native-webrtc: "npm:^124.0.7" typescript: "npm:~5.9.2" peerDependencies: + "@fishjam-cloud/react-native-client": "*" + "@fishjam-cloud/react-native-webrtc": "*" react: "*" react-native: "*" react-native-executorch: "*" - react-native-webrtc: ">=121.0.0" languageName: unknown linkType: soft From b17f0d16596a67f35ac4dcc4c885baa76455b11d Mon Sep 17 00:00:00 2001 From: chmjkb Date: Fri, 17 Apr 2026 13:37:14 +0200 Subject: [PATCH 11/19] chore: remove webrtc from example app --- apps/computer-vision/app/index.tsx | 6 - .../computer-vision/app/webrtc_test/index.tsx | 500 ------------------ apps/computer-vision/package.json | 2 - 3 files changed, 508 deletions(-) delete mode 100644 apps/computer-vision/app/webrtc_test/index.tsx diff --git a/apps/computer-vision/app/index.tsx b/apps/computer-vision/app/index.tsx index 98177de281..b415a49cdc 100644 --- a/apps/computer-vision/app/index.tsx +++ b/apps/computer-vision/app/index.tsx @@ -17,12 +17,6 @@ export default function Home() { > Vision Camera - router.navigate('webrtc_test/')} - > - WebRTC Test - router.navigate('classification/')} diff --git a/apps/computer-vision/app/webrtc_test/index.tsx b/apps/computer-vision/app/webrtc_test/index.tsx deleted file mode 100644 index 48f1648f43..0000000000 --- a/apps/computer-vision/app/webrtc_test/index.tsx +++ /dev/null @@ -1,500 +0,0 @@ -import React, { useEffect, useRef, useState } from 'react'; -import { - View, - Text, - StyleSheet, - TouchableOpacity, - Alert, - Platform, - ScrollView, -} from 'react-native'; -import { - RTCView, - mediaDevices, - MediaStream, - MediaStreamTrack, -} from 'react-native-webrtc'; -import { - useWebRTCFrameProcessor, - configureBackgroundRemoval, -} from 'react-native-executorch-webrtc'; -import { SELFIE_SEGMENTATION, ResourceFetcher } from 'react-native-executorch'; -import ColorPalette from '../../colors'; - -export default function WebRTCTest() { - const [stream, setStream] = useState(null); - const [isFrontCamera, setIsFrontCamera] = useState(true); - const [cameraStarted, setCameraStarted] = useState(false); - const [devices, setDevices] = useState([]); - const [processingResults, setProcessingResults] = useState(''); - const [modelStatus, setModelStatus] = useState('Not loaded'); - const [downloadProgress, setDownloadProgress] = useState(0); - const streamRef = useRef(null); - - // Download and configure the segmentation model on mount - useEffect(() => { - const downloadModel = async () => { - try { - setModelStatus('Downloading...'); - const paths = await ResourceFetcher.fetch((progress) => { - setDownloadProgress(progress); - }, SELFIE_SEGMENTATION.modelSource); - - if (!paths?.[0]) { - throw new Error('Failed to download model'); - } - - const modelPath = paths[0]; - console.log('Model downloaded:', modelPath); - - // Configure native WebRTC processor with the model path - configureBackgroundRemoval(modelPath); - setModelStatus(`Ready: ${modelPath.split('/').pop()}`); - } catch (error) { - console.error('Error downloading model:', error); - setModelStatus( - `Error: ${error instanceof Error ? error.message : 'Unknown'}` - ); - } - }; - - downloadModel(); - }, []); - - // Enable ExecuTorch frame processing on the stream - useWebRTCFrameProcessor(stream, { - onResults: (results) => { - console.log('Frame processing results:', results); - setProcessingResults(JSON.stringify(results, null, 2)); - }, - }); - - // Enumerate available devices - const enumerateDevices = async () => { - try { - const deviceInfos = await mediaDevices.enumerateDevices(); - console.log('Available devices:', deviceInfos); - setDevices(deviceInfos.filter((d: any) => d.kind === 'videoinput')); - } catch (error) { - console.error('Error enumerating devices:', error); - } - }; - - // Start camera with WebRTC getUserMedia - const startCamera = async () => { - try { - console.log('Requesting camera access...'); - - // Enumerate devices before requesting camera - await enumerateDevices(); - - const mediaStream = await mediaDevices.getUserMedia({ - video: { - facingMode: isFrontCamera ? 'user' : 'environment', - frameRate: 30, - width: { ideal: 640 }, - height: { ideal: 480 }, - }, - audio: false, - }); - - console.log('Camera stream obtained:', mediaStream.id); - console.log('Video tracks:', mediaStream.getVideoTracks().length); - - const videoTrack = mediaStream.getVideoTracks()[0]; - if (videoTrack) { - console.log('Video track settings:', videoTrack.getSettings()); - - // getCapabilities() is not implemented on Android - try { - if (typeof videoTrack.getCapabilities === 'function') { - console.log( - 'Video track capabilities:', - videoTrack.getCapabilities() - ); - } - } catch (e) { - console.log('getCapabilities not supported on this platform'); - } - } - - setStream(mediaStream); - streamRef.current = mediaStream; - setCameraStarted(true); - } catch (error) { - console.error('Error accessing camera:', error); - Alert.alert( - 'Camera Error', - `Failed to access camera: ${error instanceof Error ? error.message : 'Unknown error'}` - ); - } - }; - - // Stop camera and release resources - const stopCamera = () => { - if (streamRef.current) { - console.log('Stopping camera...'); - streamRef.current.getTracks().forEach((track) => { - track.stop(); - console.log('Stopped track:', track.kind, track.id); - }); - setStream(null); - streamRef.current = null; - setCameraStarted(false); - } - }; - - // Switch between front and back camera - const switchCamera = async () => { - if (!streamRef.current) return; - - const newFacingMode = isFrontCamera ? 'environment' : 'user'; - console.log('Switching camera to:', newFacingMode); - - // Stop current stream completely - streamRef.current.getTracks().forEach((track) => { - track.stop(); - }); - setStream(null); - streamRef.current = null; - - // Wait for camera to fully release, then start new stream - await new Promise((resolve) => setTimeout(resolve, 300)); - - try { - const mediaStream = await mediaDevices.getUserMedia({ - video: { - facingMode: newFacingMode, - frameRate: 30, - width: { ideal: 640 }, - height: { ideal: 480 }, - }, - audio: false, - }); - - setStream(mediaStream); - streamRef.current = mediaStream; - setIsFrontCamera(!isFrontCamera); - console.log('Camera switched successfully'); - } catch (error) { - console.error('Error switching camera:', error); - setCameraStarted(false); - } - }; - - // Cleanup on unmount - useEffect(() => { - return () => { - if (streamRef.current) { - stopCamera(); - } - }; - }, []); - - return ( - - WebRTC Camera Test - - - Basic WebRTC camera test using react-native-webrtc's getUserMedia. This - tests the camera access without any ExecuTorch processing. - - - {/* Camera Preview */} - - {stream ? ( - - ) : ( - - - {cameraStarted ? 'Starting camera...' : 'Camera not started'} - - - )} - - {/* Overlay Info */} - {stream && ( - - - Camera: {isFrontCamera ? 'Front' : 'Back'} - - - Stream ID: {stream.id.slice(0, 8)}... - - - Tracks: {stream.getTracks().length} - - - )} - - - {/* Controls */} - - - Start Camera - - - - Stop Camera - - - - - Switch to {isFrontCamera ? 'Back' : 'Front'} - - - - - {/* Model Status */} - - Segmentation Model: - Status: {modelStatus} - {downloadProgress > 0 && downloadProgress < 1 && ( - - Progress: {(downloadProgress * 100).toFixed(0)}% - - )} - - - {/* Stream Info */} - - Stream Information: - {stream ? ( - <> - Stream URL: {stream.toURL()} - - Active: {stream.active ? 'Yes' : 'No'} - - {stream.getVideoTracks().map((track, idx) => ( - - Track {idx + 1}: - - ID: {track.id} - - {' '} - - Enabled: {track.enabled ? 'Yes' : 'No'} - - - {' '} - - Ready State: {track.readyState} - - - Label: {track.label} - - ))} - - ) : ( - No active stream - )} - - {devices.length > 0 && ( - - - Available Cameras: {devices.length} - - {devices.map((device, idx) => ( - - - {device.label || `Camera ${idx + 1}`} ( - {device.facing || 'unknown'}) - - ))} - - )} - - - {/* Processing Results */} - {processingResults && ( - - Frame Processing Results: - {processingResults} - - )} - - {/* Notes */} - - Implementation Notes: - - ✓ Uses mediaDevices.getUserMedia() for camera access - - - ✓ Displays stream in RTCView component - - - ✓ Uses ExecuTorch frame processor for real-time processing - - - ✓ Processes frames at ~10 FPS (every 100ms) - - - ✓ Results sent back to JS via event emitter - - - - ); -} - -const styles = StyleSheet.create({ - container: { - flex: 1, - backgroundColor: '#fff', - }, - contentContainer: { - padding: 20, - paddingBottom: 40, - }, - title: { - fontSize: 24, - fontWeight: 'bold', - color: ColorPalette.strongPrimary, - marginBottom: 10, - }, - description: { - fontSize: 14, - color: '#666', - marginBottom: 20, - lineHeight: 20, - }, - videoContainer: { - width: '100%', - height: 500, - backgroundColor: '#000', - borderRadius: 12, - overflow: 'hidden', - marginBottom: 20, - }, - video: { - width: '100%', - height: '100%', - }, - placeholder: { - flex: 1, - justifyContent: 'center', - alignItems: 'center', - }, - placeholderText: { - color: '#fff', - fontSize: 16, - }, - overlay: { - position: 'absolute', - top: 10, - left: 10, - backgroundColor: 'rgba(0,0,0,0.7)', - padding: 10, - borderRadius: 8, - }, - overlayText: { - color: '#fff', - fontSize: 12, - marginBottom: 2, - }, - controls: { - flexDirection: 'row', - justifyContent: 'space-between', - marginBottom: 20, - gap: 10, - }, - button: { - flex: 1, - backgroundColor: ColorPalette.strongPrimary, - borderRadius: 8, - padding: 12, - alignItems: 'center', - justifyContent: 'center', - }, - switchButton: { - backgroundColor: ColorPalette.primary, - }, - buttonDisabled: { - backgroundColor: '#ccc', - opacity: 0.5, - }, - buttonText: { - color: 'white', - fontSize: 14, - fontWeight: '600', - }, - infoContainer: { - backgroundColor: '#f5f5f5', - padding: 15, - borderRadius: 8, - marginBottom: 15, - }, - infoTitle: { - fontSize: 16, - fontWeight: 'bold', - color: ColorPalette.strongPrimary, - marginBottom: 10, - }, - infoText: { - fontSize: 12, - color: '#333', - marginBottom: 4, - fontFamily: Platform.OS === 'ios' ? 'Courier' : 'monospace', - }, - trackInfo: { - marginTop: 8, - paddingTop: 8, - borderTopWidth: 1, - borderTopColor: '#ddd', - }, - resultsContainer: { - backgroundColor: '#e6f7ff', - padding: 15, - borderRadius: 8, - marginBottom: 15, - borderWidth: 1, - borderColor: '#91d5ff', - }, - resultsTitle: { - fontSize: 16, - fontWeight: 'bold', - color: ColorPalette.strongPrimary, - marginBottom: 10, - }, - resultsText: { - fontSize: 11, - color: '#333', - fontFamily: Platform.OS === 'ios' ? 'Courier' : 'monospace', - }, - notesContainer: { - backgroundColor: '#fff9e6', - padding: 15, - borderRadius: 8, - borderWidth: 1, - borderColor: '#ffe066', - }, - notesTitle: { - fontSize: 14, - fontWeight: 'bold', - color: '#996600', - marginBottom: 8, - }, - notesText: { - fontSize: 12, - color: '#664400', - marginBottom: 4, - }, -}); diff --git a/apps/computer-vision/package.json b/apps/computer-vision/package.json index 5306b15a53..a597992955 100644 --- a/apps/computer-vision/package.json +++ b/apps/computer-vision/package.json @@ -28,7 +28,6 @@ "react-native-device-info": "^15.0.2", "react-native-executorch": "workspace:*", "react-native-executorch-expo-resource-fetcher": "workspace:*", - "react-native-executorch-webrtc": "workspace:*", "react-native-gesture-handler": "~2.28.0", "react-native-image-picker": "^7.2.2", "react-native-loading-spinner-overlay": "^3.0.1", @@ -40,7 +39,6 @@ "react-native-svg": "15.15.3", "react-native-svg-transformer": "^1.5.3", "react-native-vision-camera": "5.0.0-beta.7", - "react-native-webrtc": "^124.0.7", "react-native-worklets": "0.7.4" }, "devDependencies": { From 7b6ef6d95c09b384982225c320a53694833523d9 Mon Sep 17 00:00:00 2001 From: chmjkb Date: Fri, 17 Apr 2026 14:19:21 +0200 Subject: [PATCH 12/19] deps: update lockfile --- yarn.lock | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/yarn.lock b/yarn.lock index 002ff75185..fbec123671 100644 --- a/yarn.lock +++ b/yarn.lock @@ -7280,7 +7280,6 @@ __metadata: react-native-device-info: "npm:^15.0.2" react-native-executorch: "workspace:*" react-native-executorch-expo-resource-fetcher: "workspace:*" - react-native-executorch-webrtc: "workspace:*" react-native-gesture-handler: "npm:~2.28.0" react-native-image-picker: "npm:^7.2.2" react-native-loading-spinner-overlay: "npm:^3.0.1" @@ -7292,7 +7291,6 @@ __metadata: react-native-svg: "npm:15.15.3" react-native-svg-transformer: "npm:^1.5.3" react-native-vision-camera: "npm:5.0.0-beta.7" - react-native-webrtc: "npm:^124.0.7" react-native-worklets: "npm:0.7.4" languageName: unknown linkType: soft @@ -8681,13 +8679,6 @@ __metadata: languageName: node linkType: hard -"event-target-shim@npm:6.0.2": - version: 6.0.2 - resolution: "event-target-shim@npm:6.0.2" - checksum: 10/aa69fc4193cad3f1e4dc0c2d3f2689ea2d477f5ff2fbee8b65f866035b15658e1985932b06ba2190c3d2cc9cc6802c26facd6c60487590c1a05f44545ec24f42 - languageName: node - linkType: hard - "event-target-shim@npm:^5.0.0": version: 5.0.1 resolution: "event-target-shim@npm:5.0.1" @@ -14528,7 +14519,7 @@ __metadata: languageName: unknown linkType: soft -"react-native-executorch-webrtc@workspace:*, react-native-executorch-webrtc@workspace:packages/react-native-executorch-webrtc": +"react-native-executorch-webrtc@workspace:packages/react-native-executorch-webrtc": version: 0.0.0-use.local resolution: "react-native-executorch-webrtc@workspace:packages/react-native-executorch-webrtc" dependencies: @@ -14789,19 +14780,6 @@ __metadata: languageName: node linkType: hard -"react-native-webrtc@npm:^124.0.7": - version: 124.0.7 - resolution: "react-native-webrtc@npm:124.0.7" - dependencies: - base64-js: "npm:1.5.1" - debug: "npm:4.3.4" - event-target-shim: "npm:6.0.2" - peerDependencies: - react-native: ">=0.60.0" - checksum: 10/eeeb390aaa51d42dac36c846aa2aa143e49d8fb3f4d349f776fdc47e9a4f66b310097f9dd84c17f6fc9f7e563fddc95766e676f74e2333a4f127dbfa43b59ee4 - languageName: node - linkType: hard - "react-native-worklets@npm:0.7.4": version: 0.7.4 resolution: "react-native-worklets@npm:0.7.4" From 1791bdb0a271a0a6a2252b9d697244f95f9f292d Mon Sep 17 00:00:00 2001 From: chmjkb Date: Fri, 17 Apr 2026 14:45:18 +0200 Subject: [PATCH 13/19] chore: lint --- .cspell-wordlist.txt | 5 +++++ .../react-native-executorch-webrtc/src/BackgroundBlur.ts | 3 --- packages/react-native-executorch-webrtc/src/index.ts | 1 - .../react-native-executorch-webrtc/src/useBackgroundBlur.ts | 2 -- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.cspell-wordlist.txt b/.cspell-wordlist.txt index f1d55b4c98..e90a9b0d50 100644 --- a/.cspell-wordlist.txt +++ b/.cspell-wordlist.txt @@ -183,3 +183,8 @@ Synchronizable stringifying hɛloʊ wɜːld +webrtc +fishjam +Fishjam +deinitialize +Deinitialize diff --git a/packages/react-native-executorch-webrtc/src/BackgroundBlur.ts b/packages/react-native-executorch-webrtc/src/BackgroundBlur.ts index 6d55b3e7b6..af888dd812 100644 --- a/packages/react-native-executorch-webrtc/src/BackgroundBlur.ts +++ b/packages/react-native-executorch-webrtc/src/BackgroundBlur.ts @@ -5,9 +5,7 @@ let initialized = false; /** * Initialize the background blur processor with ExecuTorch segmentation model. * Must be called before using the blur middleware. - * * @param modelPath Path to the .pte segmentation model file - * * @example * ```ts * import { initializeBackgroundBlur } from '@executorch/react-native-executorch-webrtc'; @@ -35,7 +33,6 @@ export const initializeBackgroundBlur = (modelPath: string): void => { /** * Deinitialize and release background blur resources. - * * @example * ```ts * import { deinitializeBackgroundBlur } from '@executorch/react-native-executorch-webrtc'; diff --git a/packages/react-native-executorch-webrtc/src/index.ts b/packages/react-native-executorch-webrtc/src/index.ts index 36b79f26c0..96086ec472 100644 --- a/packages/react-native-executorch-webrtc/src/index.ts +++ b/packages/react-native-executorch-webrtc/src/index.ts @@ -4,7 +4,6 @@ * This package provides background blur/removal for WebRTC video calls * using ExecuTorch segmentation models. API is compatible with * @fishjam-cloud/react-native-webrtc-background-blur. - * * @packageDocumentation */ diff --git a/packages/react-native-executorch-webrtc/src/useBackgroundBlur.ts b/packages/react-native-executorch-webrtc/src/useBackgroundBlur.ts index 04f9a75ede..076f69822e 100644 --- a/packages/react-native-executorch-webrtc/src/useBackgroundBlur.ts +++ b/packages/react-native-executorch-webrtc/src/useBackgroundBlur.ts @@ -40,10 +40,8 @@ const PROCESSOR_NAME = 'executorchBackgroundBlur'; /** * Hook to enable background blur on WebRTC video tracks. * Compatible with Fishjam SDK's TrackMiddleware interface. - * * @param options Configuration options including model path and blur radius * @returns Object containing blurMiddleware for use with Fishjam SDK - * * @example * ```tsx * import { useBackgroundBlur } from '@executorch/react-native-executorch-webrtc'; From 8ab9ad563d74f5b1ac85e6d3aabcfba7ce932219 Mon Sep 17 00:00:00 2001 From: chmjkb Date: Fri, 17 Apr 2026 14:54:37 +0200 Subject: [PATCH 14/19] chore: ensure proper cleanup --- .../src/main/cpp/FrameProcessorBridge.cpp | 18 +++++++++++++++ .../webrtc/ExecutorchFrameProcessor.kt | 20 ++++++++++++++++ .../webrtc/ExecutorchFrameProcessorFactory.kt | 23 ++++++++++++++++++- .../com/executorch/webrtc/ExecutorchWebRTC.kt | 10 ++++++++ .../webrtc/ExecutorchWebRTCModule.kt | 3 +-- .../ios/ExecutorchFrameProcessor.mm | 13 +++++++++++ .../src/useBackgroundBlur.ts | 8 +++++-- 7 files changed, 90 insertions(+), 5 deletions(-) diff --git a/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp b/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp index 868a5e52f5..c6a68120b4 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp +++ b/packages/react-native-executorch-webrtc/android/src/main/cpp/FrameProcessorBridge.cpp @@ -539,4 +539,22 @@ Java_com_executorch_webrtc_ExecutorchFrameProcessor_runSegmentation( return nullptr; } } + +/** + * Unload the segmentation model and release all buffers + */ +JNIEXPORT void JNICALL +Java_com_executorch_webrtc_ExecutorchFrameProcessor_unloadModel(JNIEnv *env, + jobject thiz) { + LOGD("Unloading segmentation model and releasing resources"); + + g_segmentation.reset(); + g_modelLoaded = false; + g_modelPath.clear(); + + // Release pre-allocated buffers + g_resizedRgb.release(); + + LOGD("Model unloaded and resources released"); +} } // extern "C" diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt index 522fb73072..1fea123e0e 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessor.kt @@ -83,6 +83,9 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { rotation: Int, ): ByteArray? + // JNI: Unload model and release native resources + private external fun unloadModel() + override fun process( frame: VideoFrame, helper: SurfaceTextureHelper, @@ -287,10 +290,27 @@ class ExecutorchFrameProcessor : VideoFrameProcessor { } fun release() { + Log.d(TAG, "Releasing ExecutorchFrameProcessor resources") + + // Release GPU resources renderer.release() yuvConverter?.release() yuvConverter = null + + // Release cached frame lastProcessedFrame?.release() lastProcessedFrame = null + + // Unload native model and buffers + if (modelLoaded) { + unloadModel() + modelLoaded = false + loadedModelPath = null + } + + // Clear buffers + rgbaBuffer = null + + Log.d(TAG, "ExecutorchFrameProcessor resources released") } } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessorFactory.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessorFactory.kt index 30f42a65a8..3d7d2bda6a 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessorFactory.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchFrameProcessorFactory.kt @@ -8,5 +8,26 @@ import com.oney.WebRTCModule.videoEffects.VideoFrameProcessorFactoryInterface * Required by react-native-webrtc's ProcessorProvider system. */ class ExecutorchFrameProcessorFactory : VideoFrameProcessorFactoryInterface { - override fun build(): VideoFrameProcessor = ExecutorchFrameProcessor() + companion object { + private val activeProcessors = mutableListOf() + + /** + * Release all active processors and clear the list + */ + @JvmStatic + fun releaseAll() { + synchronized(activeProcessors) { + activeProcessors.forEach { it.release() } + activeProcessors.clear() + } + } + } + + override fun build(): VideoFrameProcessor { + val processor = ExecutorchFrameProcessor() + synchronized(activeProcessors) { + activeProcessors.add(processor) + } + return processor + } } diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt index e6bc715338..dd49fca850 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTC.kt @@ -42,6 +42,16 @@ object ExecutorchWebRTC { ExecutorchFrameProcessor.setBlurRadius(radius) } + /** + * Deinitialize and release all resources + */ + fun deinitialize() { + Log.d(TAG, "Deinitializing ExecutorchWebRTC") + modelPath = null + ExecutorchFrameProcessorFactory.releaseAll() + Log.d(TAG, "ExecutorchWebRTC deinitialized") + } + /** * Gets the processor name to use in JavaScript. * Use this when calling videoTrack._setVideoEffects(['...']) diff --git a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt index 8833294fc8..0312a5a442 100644 --- a/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt +++ b/packages/react-native-executorch-webrtc/android/src/main/java/com/executorch/webrtc/ExecutorchWebRTCModule.kt @@ -47,8 +47,7 @@ class ExecutorchWebRTCModule( */ @ReactMethod fun deinitialize() { - // Currently no-op, resources are managed per-frame - // Could be extended to unload the model if needed + ExecutorchWebRTC.deinitialize() } /** diff --git a/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.mm b/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.mm index e7f22ec167..a639644112 100644 --- a/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.mm +++ b/packages/react-native-executorch-webrtc/ios/ExecutorchFrameProcessor.mm @@ -114,6 +114,19 @@ - (void)unloadModel { _modelLoaded = NO; _ready = NO; _previousMask.release(); + + // Release cached frame + _lastProcessedFrame = nil; + + // Release pixel buffer pool + if (_outputPool) { + CVPixelBufferPoolRelease(_outputPool); + _outputPool = NULL; + _poolWidth = 0; + _poolHeight = 0; + } + + NSLog(@"[ExecutorchFrameProcessor] Model unloaded and resources released"); } - (BOOL)isAvailable { diff --git a/packages/react-native-executorch-webrtc/src/useBackgroundBlur.ts b/packages/react-native-executorch-webrtc/src/useBackgroundBlur.ts index 076f69822e..b19842184e 100644 --- a/packages/react-native-executorch-webrtc/src/useBackgroundBlur.ts +++ b/packages/react-native-executorch-webrtc/src/useBackgroundBlur.ts @@ -1,7 +1,10 @@ import { useCallback, useEffect, useRef } from 'react'; import { Platform } from 'react-native'; import NativeBackgroundBlur from './NativeBackgroundBlur'; -import { initializeBackgroundBlur } from './BackgroundBlur'; +import { + initializeBackgroundBlur, + deinitializeBackgroundBlur, +} from './BackgroundBlur'; /** * Extended MediaStreamTrack with WebRTC video effects API @@ -83,7 +86,8 @@ export function useBackgroundBlur(options: UseBackgroundBlurOptions): { } return () => { - // TODO: unload native module? + deinitializeBackgroundBlur(); + initializedRef.current = false; }; }, [modelUri]); From 59c95fb35aa9743fd896b71a54aef27037aa4386 Mon Sep 17 00:00:00 2001 From: chmjkb Date: Fri, 17 Apr 2026 14:55:31 +0200 Subject: [PATCH 15/19] chore: demo app leftovers --- apps/computer-vision/app/_layout.tsx | 8 -------- 1 file changed, 8 deletions(-) diff --git a/apps/computer-vision/app/_layout.tsx b/apps/computer-vision/app/_layout.tsx index 0d11f0ed70..ea47ebdb3f 100644 --- a/apps/computer-vision/app/_layout.tsx +++ b/apps/computer-vision/app/_layout.tsx @@ -76,14 +76,6 @@ export default function _layout() { headerTitleStyle: { color: ColorPalette.primary }, }} /> - Date: Fri, 17 Apr 2026 15:11:02 +0200 Subject: [PATCH 16/19] docs: readme tweaks --- .../react-native-executorch-webrtc/README.md | 139 +++++++----------- 1 file changed, 55 insertions(+), 84 deletions(-) diff --git a/packages/react-native-executorch-webrtc/README.md b/packages/react-native-executorch-webrtc/README.md index 0af5dfe382..711fa74522 100644 --- a/packages/react-native-executorch-webrtc/README.md +++ b/packages/react-native-executorch-webrtc/README.md @@ -1,8 +1,17 @@ # react-native-executorch-webrtc -ExecuTorch frame processor integration for react-native-webrtc. +Real-time background blur for Fishjam WebRTC applications, powered by ExecuTorch segmentation models. -Process WebRTC camera frames with ExecuTorch vision models in real-time. +This package provides GPU-accelerated background blur effects using on-device ExecuTorch models for foreground / background segmentation. + +## Requirements + +- iOS 13.0+ +- Android SDK 26+ +- Peer dependencies: +- `@fishjam-cloud/react-native-client` +- `@fishjam-cloud/react-native-webrtc` +- `react-native-executorch` ## Installation @@ -10,110 +19,72 @@ Process WebRTC camera frames with ExecuTorch vision models in real-time. yarn add react-native-executorch-webrtc ``` -**That's it!** The package auto-registers via React Native autolinking. No native code setup needed. - -### Platform Support - -- ✅ Android (auto-configured) -- 🚧 iOS (coming soon) +For iOS: +```bash +cd ios && pod install +``` ## Usage -### Basic Usage - -Just import and use the hook - everything auto-registers: +### With Fishjam SDK -```typescript -import { useWebRTCFrameProcessor } from 'react-native-executorch-webrtc'; -import { RTCView, mediaDevices } from 'react-native-webrtc'; +```tsx +import { useBackgroundBlur } from 'react-native-executorch-webrtc'; +import { useCamera } from '@fishjam-cloud/react-native-client'; -function WebRTCCamera() { - const [stream, setStream] = useState(null); +function VideoCall() { + const [blurEnabled, setBlurEnabled] = useState(true); - // Enable ExecuTorch frame processing - useWebRTCFrameProcessor(stream); + const { blurMiddleware } = useBackgroundBlur({ + // NOTE: you can use React Native Executorch's Resource Fetcher to download model files + modelUri: 'file:///path/to/selfie_segmenter.pte', + blurRadius: 15, + }); - useEffect(() => { - async function startCamera() { - const mediaStream = await mediaDevices.getUserMedia({ - video: true, - audio: false, - }); - setStream(mediaStream); - } - startCamera(); - }, []); + const { toggleCamera } = useCamera({ + cameraTrackMiddleware: blurEnabled ? blurMiddleware : undefined, + }); - return ; + return ( +