Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
572 changes: 572 additions & 0 deletions ios/RCTWebRTC/Utils/AudioDeviceModule/AudioDeviceModule.swift

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
//
// Copyright © 2025 Stream.io Inc. All rights reserved.
//

import Accelerate
import AVFoundation
import Combine
import Foundation

protocol AudioEngineNodeAdapting {

var subject: CurrentValueSubject<Float, Never>? { get set }

func installInputTap(
on node: AVAudioNode,
format: AVAudioFormat,
bus: Int,
bufferSize: UInt32
)

func uninstall(on bus: Int)
}

/// Observes an `AVAudioMixerNode` and publishes decibel readings for UI and
/// analytics consumers.
final class AudioEngineLevelNodeAdapter: AudioEngineNodeAdapting {

enum Constant {
// The down limit of audio pipeline in DB that is considered silence.
static let silenceDB: Float = -160
}

var subject: CurrentValueSubject<Float, Never>?

private var inputTap: AVAudioMixerNode?

/// Installs a tap on the supplied audio node to monitor input levels.
/// - Parameters:
/// - node: The node to observe; must be an `AVAudioMixerNode`.
/// - format: Audio format expected by the tap.
/// - bus: Output bus to observe.
/// - bufferSize: Tap buffer size.
func installInputTap(
on node: AVAudioNode,
format: AVAudioFormat,
bus: Int = 0,
bufferSize: UInt32 = 1024
) {
guard let mixer = node as? AVAudioMixerNode, inputTap == nil else { return }

mixer.installTap(
onBus: bus,
bufferSize: bufferSize,
format: format
) { [weak self] buffer, _ in
self?.processInputBuffer(buffer)
}

inputTap = mixer
// log.debug("Input node installed", subsystems: .audioRecording)
}

/// Removes the tap and resets observed audio levels.
/// - Parameter bus: Bus to remove the tap from, defaults to `0`.
func uninstall(on bus: Int = 0) {
if let mixer = inputTap, mixer.engine != nil {
mixer.removeTap(onBus: 0)
}
subject?.send(Constant.silenceDB)
inputTap = nil
// log.debug("Input node uninstalled", subsystems: .audioRecording)
}

// MARK: - Private Helpers

/// Processes the PCM buffer produced by the tap and computes a clamped RMS
/// value which is forwarded to the publisher.
private func processInputBuffer(_ buffer: AVAudioPCMBuffer) {
// Safely unwrap the `subject` (used to publish updates) and the
// `floatChannelData` (pointer to the interleaved or non-interleaved
// channel samples in memory). If either is missing, exit early since
// processing cannot continue.
guard
let subject,
let channelData = buffer.floatChannelData
else { return }

// Obtain the total number of frames in the buffer as a vDSP-compatible
// length type (`vDSP_Length`). This represents how many samples exist
// per channel in the current audio buffer.
let frameCount = vDSP_Length(buffer.frameLength)

// Declare a variable to store the computed RMS (root-mean-square)
// amplitude value for the buffer. It will represent the signal's
// average power in linear scale (not decibels yet).
var rms: Float = 0

// Use Apple's Accelerate framework to efficiently compute the RMS
// (root mean square) of the float samples in the first channel.
// - Parameters:
// - channelData[0]: Pointer to the first channel’s samples.
// - 1: Stride between consecutive elements (every sample).
// - &rms: Output variable to store the computed RMS.
// - frameCount: Number of samples to process.
vDSP_rmsqv(channelData[0], 1, &rms, frameCount)

// Convert the linear RMS value to decibels using the formula
// 20 * log10(rms). To avoid a log of zero (which is undefined),
// use `max(rms, Float.ulpOfOne)` to ensure a minimal positive value.
let rmsDB = 20 * log10(max(rms, Float.ulpOfOne))

// Clamp the computed decibel value to a reasonable audio level range
// between -160 dB (silence) and 0 dB (maximum). This prevents extreme
// or invalid values that may occur due to noise or computation errors.
let clampedRMS = max(-160.0, min(0.0, Float(rmsDB)))

// Publish the clamped decibel value to the CurrentValueSubject so that
// subscribers (e.g., UI level meters or analytics systems) receive the
// updated level reading.
subject.send(clampedRMS)
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
//
// Copyright © 2025 Stream.io Inc. All rights reserved.
//

import Combine
import WebRTC

/// Abstraction over `RTCAudioDeviceModule` so tests can provide fakes while
/// production code continues to rely on the WebRTC-backed implementation.
protocol RTCAudioDeviceModuleControlling: AnyObject {
var observer: RTCAudioDeviceModuleDelegate? { get set }
var isPlaying: Bool { get }
var isRecording: Bool { get }
var isPlayoutInitialized: Bool { get }
var isRecordingInitialized: Bool { get }
var isMicrophoneMuted: Bool { get }
var isStereoPlayoutEnabled: Bool { get }
var isVoiceProcessingBypassed: Bool { get set }
var isVoiceProcessingEnabled: Bool { get }
var isVoiceProcessingAGCEnabled: Bool { get }
var prefersStereoPlayout: Bool { get set }

func reset() -> Int
func initAndStartPlayout() -> Int
func startPlayout() -> Int
func stopPlayout() -> Int
func initAndStartRecording() -> Int
func setMicrophoneMuted(_ isMuted: Bool) -> Int
func startRecording() -> Int
func stopRecording() -> Int
func refreshStereoPlayoutState()
func setMuteMode(_ mode: RTCAudioEngineMuteMode) -> Int
func setRecordingAlwaysPreparedMode(_ alwaysPreparedRecording: Bool) -> Int
}

extension RTCAudioDeviceModule: RTCAudioDeviceModuleControlling {
/// Convenience wrapper that mirrors the old `initPlayout` and
/// `startPlayout` sequence so the caller can request playout in one call.
func initAndStartPlayout() -> Int {
let result = initPlayout()
if result == 0 {
return startPlayout()
} else {
return result
}
}
}
3 changes: 3 additions & 0 deletions ios/RCTWebRTC/WebRTCModule.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,16 @@ static NSString *const kEventMediaStreamTrackEnded = @"mediaStreamTrackEnded";
static NSString *const kEventPeerConnectionOnRemoveTrack = @"peerConnectionOnRemoveTrack";
static NSString *const kEventPeerConnectionOnTrack = @"peerConnectionOnTrack";

@class AudioDeviceModule;

@interface WebRTCModule : RCTEventEmitter<RCTBridgeModule>

@property(nonatomic, strong) dispatch_queue_t workerQueue;

@property(nonatomic, strong) RTCPeerConnectionFactory *peerConnectionFactory;
@property(nonatomic, strong) id<RTCVideoDecoderFactory> decoderFactory;
@property(nonatomic, strong) id<RTCVideoEncoderFactory> encoderFactory;
@property(nonatomic, strong) AudioDeviceModule *audioDeviceModule;

@property(nonatomic, strong) NSMutableDictionary<NSNumber *, RTCPeerConnection *> *peerConnections;
@property(nonatomic, strong) NSMutableDictionary<NSString *, RTCMediaStream *> *localStreams;
Expand Down
9 changes: 7 additions & 2 deletions ios/RCTWebRTC/WebRTCModule.m
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@
#import "WebRTCModule.h"
#import "WebRTCModuleOptions.h"

// Import Swift classes
#import <stream_react_native_webrtc/stream_react_native_webrtc-Swift.h>

@interface WebRTCModule ()
@end

Expand Down Expand Up @@ -78,7 +81,7 @@ - (instancetype)init {
}
RCTLogInfo(@"Using audio processing module: %@", NSStringFromClass([audioProcessingModule class]));
_peerConnectionFactory =
[[RTCPeerConnectionFactory alloc] initWithAudioDeviceModuleType:RTCAudioDeviceModuleTypePlatformDefault
[[RTCPeerConnectionFactory alloc] initWithAudioDeviceModuleType:RTCAudioDeviceModuleTypeAudioEngine
bypassVoiceProcessing:NO
encoderFactory:encoderFactory
decoderFactory:decoderFactory
Expand All @@ -90,13 +93,15 @@ - (instancetype)init {
audioDevice:audioDevice];
} else {
_peerConnectionFactory =
[[RTCPeerConnectionFactory alloc] initWithAudioDeviceModuleType:RTCAudioDeviceModuleTypePlatformDefault
[[RTCPeerConnectionFactory alloc] initWithAudioDeviceModuleType:RTCAudioDeviceModuleTypeAudioEngine
bypassVoiceProcessing:NO
encoderFactory:encoderFactory
decoderFactory:decoderFactory
audioProcessingModule:nil];
}

_audioDeviceModule = [[AudioDeviceModule alloc] initWithSource:_peerConnectionFactory.audioDeviceModule];

_peerConnections = [NSMutableDictionary new];
_localStreams = [NSMutableDictionary new];
_localTracks = [NSMutableDictionary new];
Expand Down
4 changes: 2 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@stream-io/react-native-webrtc",
"version": "137.0.2",
"version": "137.1.0-alpha.1",
"repository": {
"type": "git",
"url": "git+https://github.com/GetStream/react-native-webrtc.git"
Expand Down
Loading