diff --git a/android/cpp-adapter.cpp b/android/cpp-adapter.cpp index 3c6b6f8..d9af02a 100644 --- a/android/cpp-adapter.cpp +++ b/android/cpp-adapter.cpp @@ -39,6 +39,34 @@ Java_com_elementary_ElementaryModule_nativeGetSampleRate(JNIEnv *env, jclass typ return audioEngine.get() ? audioEngine->getSampleRate() : 0; } +extern "C" +JNIEXPORT jint JNICALL +Java_com_elementary_ElementaryModule_nativeGetNumChannels(JNIEnv *env, jclass type) { + return audioEngine.get() ? audioEngine->getNumChannels() : 0; +} + +extern "C" +JNIEXPORT jboolean JNICALL +Java_com_elementary_ElementaryModule_nativeIsDeviceRunning(JNIEnv *env, jclass type) { + return audioEngine.get() ? static_cast(audioEngine->isDeviceRunning()) : JNI_FALSE; +} + +extern "C" +JNIEXPORT void JNICALL +Java_com_elementary_ElementaryModule_nativeStopDevice(JNIEnv *env, jclass type) { + if (audioEngine) { + audioEngine->stopDevice(); + } +} + +extern "C" +JNIEXPORT void JNICALL +Java_com_elementary_ElementaryModule_nativeStartDevice(JNIEnv *env, jclass type) { + if (audioEngine) { + audioEngine->startDevice(); + } +} + extern "C" JNIEXPORT jobject JNICALL Java_com_elementary_ElementaryModule_nativeLoadAudioResource(JNIEnv *env, jclass type, jstring key, jstring filePath) { diff --git a/android/src/main/java/com/elementary/ElementaryModule.kt b/android/src/main/java/com/elementary/ElementaryModule.kt index d1e6b38..1c9236e 100644 --- a/android/src/main/java/com/elementary/ElementaryModule.kt +++ b/android/src/main/java/com/elementary/ElementaryModule.kt @@ -1,10 +1,20 @@ package com.elementary +import android.content.BroadcastReceiver +import android.content.Context +import android.content.Intent +import android.content.IntentFilter +import android.media.AudioAttributes +import android.media.AudioFocusRequest +import android.media.AudioManager +import android.os.Build +import android.util.Log import com.facebook.react.bridge.ReactApplicationContext import com.facebook.react.bridge.ReactContextBaseJavaModule import com.facebook.react.bridge.ReactMethod import com.facebook.react.bridge.Promise import com.facebook.react.bridge.Arguments +import com.facebook.react.bridge.LifecycleEventListener import com.facebook.react.bridge.WritableMap import com.facebook.react.modules.core.DeviceEventManagerModule @@ -22,7 +32,48 @@ data class AudioResourceInfo( ) class ElementaryModule(reactContext: ReactApplicationContext) : - ReactContextBaseJavaModule(reactContext) { + ReactContextBaseJavaModule(reactContext), LifecycleEventListener { + + private val audioManager = reactContext.getSystemService(Context.AUDIO_SERVICE) as AudioManager + private var audioFocusRequest: AudioFocusRequest? = null + private var hasAudioFocus = false + + private val audioFocusChangeListener = AudioManager.OnAudioFocusChangeListener { focusChange -> + when (focusChange) { + AudioManager.AUDIOFOCUS_GAIN -> { + Log.d(TAG, "Audio focus gained, restarting device") + hasAudioFocus = true + nativeStartDevice() + } + AudioManager.AUDIOFOCUS_LOSS -> { + Log.d(TAG, "Audio focus lost permanently, stopping device") + hasAudioFocus = false + nativeStopDevice() + } + AudioManager.AUDIOFOCUS_LOSS_TRANSIENT -> { + Log.d(TAG, "Audio focus lost transiently, stopping device") + hasAudioFocus = false + nativeStopDevice() + } + AudioManager.AUDIOFOCUS_LOSS_TRANSIENT_CAN_DUCK -> { + // Could lower volume instead, but for an audio engine it's safer to stop + Log.d(TAG, "Audio focus lost (duck), stopping device") + hasAudioFocus = false + nativeStopDevice() + } + } + } + + // Handle headphone disconnect (equivalent to iOS AVAudioEngineConfigurationChangeNotification) + private val noisyAudioReceiver = object : BroadcastReceiver() { + override fun onReceive(context: Context?, intent: Intent?) { + if (intent?.action == AudioManager.ACTION_AUDIO_BECOMING_NOISY) { + Log.d(TAG, "Audio becoming noisy (headphones disconnected), restarting device") + nativeStopDevice() + nativeStartDevice() + } + } + } override fun getName(): String { return NAME @@ -93,6 +144,32 @@ class ElementaryModule(reactContext: ReactApplicationContext) : promise.resolve(documentsDir) } + @ReactMethod + fun getBundlePath(promise: Promise) { + val dataDir = reactApplicationContext.applicationInfo.dataDir + promise.resolve(dataDir) + } + + @ReactMethod + fun setProperty(nodeHash: Double, key: String, value: Double) { + // Build a SET_PROPERTY instruction batch: [[3, nodeHash, key, value]] + // InstructionType::SET_PROPERTY = 3 + val instruction = "[3,${nodeHash.toInt()},\"$key\",$value]" + val batch = "[$instruction]" + nativeApplyInstructions(batch) + } + + @ReactMethod + fun getAudioInfo(promise: Promise) { + val info = Arguments.createMap().apply { + putInt("channels", nativeGetNumChannels()) + putInt("sampleRate", nativeGetSampleRate()) + putBoolean("engineRunning", nativeIsDeviceRunning()) + putBoolean("runtimeReady", nativeGetSampleRate() > 0) + } + promise.resolve(info) + } + // Helper to emit events private fun sendEvent(eventName: String, params: WritableMap?) { reactApplicationContext @@ -104,18 +181,95 @@ class ElementaryModule(reactContext: ReactApplicationContext) : sendEvent("AudioPlaybackFinished", null) } + private fun requestAudioFocus() { + if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) { + val focusRequest = AudioFocusRequest.Builder(AudioManager.AUDIOFOCUS_GAIN) + .setAudioAttributes( + AudioAttributes.Builder() + .setUsage(AudioAttributes.USAGE_MEDIA) + .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC) + .build() + ) + .setOnAudioFocusChangeListener(audioFocusChangeListener) + .build() + audioFocusRequest = focusRequest + val result = audioManager.requestAudioFocus(focusRequest) + hasAudioFocus = result == AudioManager.AUDIOFOCUS_REQUEST_GRANTED + } else { + @Suppress("DEPRECATION") + val result = audioManager.requestAudioFocus( + audioFocusChangeListener, + AudioManager.STREAM_MUSIC, + AudioManager.AUDIOFOCUS_GAIN + ) + hasAudioFocus = result == AudioManager.AUDIOFOCUS_REQUEST_GRANTED + } + Log.d(TAG, "Audio focus requested, granted: $hasAudioFocus") + } + + private fun abandonAudioFocus() { + if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) { + audioFocusRequest?.let { audioManager.abandonAudioFocusRequest(it) } + } else { + @Suppress("DEPRECATION") + audioManager.abandonAudioFocus(audioFocusChangeListener) + } + hasAudioFocus = false + } + + // LifecycleEventListener + override fun onHostResume() { + if (!hasAudioFocus) { + Log.d(TAG, "Host resumed without audio focus, re-requesting") + requestAudioFocus() + } + if (hasAudioFocus && !nativeIsDeviceRunning()) { + Log.d(TAG, "Device not running, restarting") + nativeStartDevice() + Log.d(TAG, "Device running after start: ${nativeIsDeviceRunning()}") + } + } + + override fun onHostPause() {} + + override fun onHostDestroy() { + abandonAudioFocus() + try { + reactApplicationContext.unregisterReceiver(noisyAudioReceiver) + } catch (_: IllegalArgumentException) { + // Receiver was not registered + } + } + companion object { const val NAME = "Elementary" + private const val TAG = "Elementary" } init { - System.loadLibrary("react-native-elementary"); - nativeStartAudioEngine(); + System.loadLibrary("react-native-elementary") + nativeStartAudioEngine() + + // Request audio focus + requestAudioFocus() + + // Register for headphone disconnect events + val filter = IntentFilter(AudioManager.ACTION_AUDIO_BECOMING_NOISY) + reactContext.registerReceiver(noisyAudioReceiver, filter) + + // Register lifecycle listener for cleanup + reactContext.addLifecycleEventListener(this) + + Log.d(TAG, "Audio engine initialized (channels=${nativeGetNumChannels()}, sampleRate=${nativeGetSampleRate()})") } external fun nativeGetSampleRate(): Int + external fun nativeGetNumChannels(): Int + external fun nativeIsDeviceRunning(): Boolean external fun nativeApplyInstructions(message: String) external fun nativeStartAudioEngine() + external fun nativeStopDevice() + external fun nativeStartDevice() external fun nativeLoadAudioResource(key: String, filePath: String): AudioResourceInfo? external fun nativeUnloadAudioResource(key: String): Boolean } diff --git a/android/src/newarch/com/elementary/ElementaryTurboModule.java b/android/src/newarch/com/elementary/ElementaryTurboModule.java index e77a4ba..45551fd 100644 --- a/android/src/newarch/com/elementary/ElementaryTurboModule.java +++ b/android/src/newarch/com/elementary/ElementaryTurboModule.java @@ -47,4 +47,14 @@ public void unloadAudioResource(String key, Promise promise) { public void getDocumentsDirectory(Promise promise) { module.getDocumentsDirectory(promise); } + + @Override + public void getBundlePath(Promise promise) { + module.getBundlePath(promise); + } + + @Override + public void setProperty(double nodeHash, String key, double value) { + module.setProperty(nodeHash, key, value); + } } diff --git a/cpp/audioengine.cpp b/cpp/audioengine.cpp index 2901d46..28e86ba 100644 --- a/cpp/audioengine.cpp +++ b/cpp/audioengine.cpp @@ -22,6 +22,42 @@ namespace elementary { return device.sampleRate; } + int AudioEngine::getNumChannels() { + return deviceInitialized ? static_cast(device.playback.channels) : 0; + } + + bool AudioEngine::isDeviceRunning() { + if (!deviceInitialized) return false; + return ma_device_get_state(&device) == ma_device_state_started; + } + + void AudioEngine::stopDevice() { + if (deviceInitialized) { + proxy->muted.store(true, std::memory_order_relaxed); + ma_device_stop(&device); + } + } + + void AudioEngine::startDevice() { + if (!deviceInitialized) return; + + proxy->muted.store(false, std::memory_order_relaxed); + ma_result result = ma_device_start(&device); + + if (result != MA_SUCCESS) { + // Device start failed — reinitialize + ma_device_uninit(&device); + deviceInitialized = false; + + deviceConfig.pUserData = proxy.get(); + result = ma_device_init(nullptr, &deviceConfig, &device); + if (result == MA_SUCCESS) { + deviceInitialized = true; + ma_device_start(&device); + } + } + } + AudioLoadResult AudioEngine::loadAudioResource(const std::string& key, const std::string& filePath) { AudioLoadResult result = AudioResourceLoader::loadFile(key, filePath); diff --git a/cpp/audioengine.h b/cpp/audioengine.h index 31d4ae7..d9c6dd0 100644 --- a/cpp/audioengine.h +++ b/cpp/audioengine.h @@ -4,6 +4,7 @@ #include "../cpp/vendor/elementary/runtime/elem/Runtime.h" #include "AudioResourceLoader.h" #include "miniaudio.h" +#include #include #include @@ -11,13 +12,23 @@ namespace elementary { struct DeviceProxy { elem::Runtime runtime; std::vector scratchData; + std::atomic muted{false}; DeviceProxy(double sampleRate, size_t blockSize) : runtime(sampleRate, blockSize), scratchData(2 * blockSize) {} void process(float* outputData, size_t numChannels, size_t numFrames) { - if (scratchData.size() < (numChannels * numFrames)) - scratchData.resize(numChannels * numFrames); + if (muted.load(std::memory_order_relaxed)) { + std::memset(outputData, 0, numChannels * numFrames * sizeof(float)); + return; + } + // Clamp to max supported channels (stereo) to prevent out-of-bounds + // access if the device reports more channels than we can handle + static constexpr size_t kMaxChannels = 2; + size_t processChannels = std::min(numChannels, kMaxChannels); + + if (scratchData.size() < (processChannels * numFrames)) + scratchData.resize(processChannels * numFrames); auto* deinterleaved = scratchData.data(); std::array ptrs {deinterleaved, deinterleaved + numFrames}; @@ -26,14 +37,18 @@ namespace elementary { nullptr, 0, ptrs.data(), - numChannels, + processChannels, numFrames, nullptr ); for (size_t i = 0; i < numChannels; ++i) { for (size_t j = 0; j < numFrames; ++j) { - outputData[i + numChannels * j] = deinterleaved[i * numFrames + j]; + if (i < processChannels) { + outputData[i + numChannels * j] = deinterleaved[i * numFrames + j]; + } else { + outputData[i + numChannels * j] = 0.0f; + } } } } @@ -46,6 +61,10 @@ namespace elementary { elem::Runtime& getRuntime(); int getSampleRate(); + int getNumChannels(); + bool isDeviceRunning(); + void stopDevice(); + void startDevice(); // VFS / Audio Resource methods AudioLoadResult loadAudioResource(const std::string& key, const std::string& filePath); diff --git a/ios/Elementary.h b/ios/Elementary.h index e42de88..0f5011e 100644 --- a/ios/Elementary.h +++ b/ios/Elementary.h @@ -21,4 +21,7 @@ @property(nonatomic, assign) std::shared_ptr> runtime; @property(nonatomic, strong) NSMutableSet *loadedResources; +/// Shared instance for native code to access the runtime (e.g. for real-time MIDI triggering) ++ (instancetype)sharedInstance; + @end diff --git a/ios/Elementary.mm b/ios/Elementary.mm index 394078c..202a59b 100644 --- a/ios/Elementary.mm +++ b/ios/Elementary.mm @@ -3,14 +3,21 @@ #include "../cpp/AudioResourceLoader.h" #include "../cpp/vendor/elementary/runtime/elem/AudioBufferResource.h" +static Elementary *_sharedInstance = nil; + @implementation Elementary RCT_EXPORT_MODULE(); ++ (instancetype)sharedInstance { + return _sharedInstance; +} + - (instancetype)init { self = [super init]; if (self) { + _sharedInstance = self; self.loadedResources = [[NSMutableSet alloc] init]; self.audioEngine = [[AVAudioEngine alloc] init]; @@ -23,13 +30,18 @@ - (instancetype)init const float **inputBuffer = (const float **)calloc(numOutputChannels, sizeof(float *)); float **outputBuffer = (float **)malloc(numOutputChannels * sizeof(float *)); + NSLog(@"[Elementary] Init: %d output channels, sampleRate=%.0f", numOutputChannels, outputFormat.sampleRate); + AVAudioSourceNode *sourceNode = [[AVAudioSourceNode alloc] initWithRenderBlock:^OSStatus( BOOL * _Nonnull isSilence, const AudioTimeStamp * _Nonnull timestamp, AVAudioFrameCount frameCount, AudioBufferList * _Nonnull audioBufferList) { - for (UInt32 channel = 0; channel < audioBufferList->mNumberBuffers; channel++) { + // Safety: ensure buffer list matches expected channel count + UInt32 actualChannels = audioBufferList->mNumberBuffers; + + for (UInt32 channel = 0; channel < actualChannels; channel++) { memset(audioBufferList->mBuffers[channel].mData, 0, audioBufferList->mBuffers[channel].mDataByteSize); } @@ -39,7 +51,7 @@ - (instancetype)init } for (UInt8 channel = 0; channel < numOutputChannels; channel++) { - outputBuffer[channel] = (float*)audioBufferList->mBuffers[channel].mData; + outputBuffer[channel] = (float*)audioBufferList->mBuffers[channel].mData; } self.runtime->process( @@ -65,14 +77,72 @@ - (instancetype)init int bufferSize = 512; self.runtime = std::make_shared>(outputFormat.sampleRate, bufferSize); + + // Handle audio session interruptions (phone calls, background, etc.) + [[NSNotificationCenter defaultCenter] addObserver:self + selector:@selector(handleAudioInterruption:) + name:AVAudioSessionInterruptionNotification + object:[AVAudioSession sharedInstance]]; + + // Handle audio engine configuration changes (headphones plugged/unplugged, etc.) + [[NSNotificationCenter defaultCenter] addObserver:self + selector:@selector(handleEngineConfigChange:) + name:AVAudioEngineConfigurationChangeNotification + object:self.audioEngine]; } return self; } +- (void)handleAudioInterruption:(NSNotification *)notification { + NSDictionary *info = notification.userInfo; + AVAudioSessionInterruptionType type = (AVAudioSessionInterruptionType)[info[AVAudioSessionInterruptionTypeKey] unsignedIntegerValue]; + + if (type == AVAudioSessionInterruptionTypeEnded) { + // Reactivate audio session and restart engine + NSError *error; + [[AVAudioSession sharedInstance] setActive:YES error:&error]; + if (error) { + NSLog(@"[Elementary] Failed to reactivate audio session: %@", error.localizedDescription); + return; + } + if (![self.audioEngine startAndReturnError:&error]) { + NSLog(@"[Elementary] Failed to restart engine after interruption: %@", error.localizedDescription); + } else { + NSLog(@"[Elementary] Engine restarted after interruption"); + } + } else { + NSLog(@"[Elementary] Audio interrupted"); + } +} + +- (void)handleEngineConfigChange:(NSNotification *)notification { + NSLog(@"[Elementary] Engine configuration changed, restarting..."); + NSError *error; + if (![self.audioEngine startAndReturnError:&error]) { + NSLog(@"[Elementary] Failed to restart engine after config change: %@", error.localizedDescription); + } else { + NSLog(@"[Elementary] Engine restarted after config change"); + } +} + + (BOOL) requiresMainQueueSetup { return YES; } +#pragma mark - Diagnostics + +RCT_EXPORT_METHOD(getAudioInfo:(RCTPromiseResolveBlock)resolve + rejecter:(RCTPromiseRejectBlock)reject) +{ + AVAudioFormat *format = [self.audioEngine.outputNode outputFormatForBus:0]; + resolve(@{ + @"channels": @(format.channelCount), + @"sampleRate": @(format.sampleRate), + @"engineRunning": @(self.audioEngine.isRunning), + @"runtimeReady": @(self.runtime != nullptr), + }); +} + #pragma mark - React Native Methods #ifdef RCT_NEW_ARCH_ENABLED @@ -87,6 +157,28 @@ - (void)applyInstructions:(NSString *)message } } +#ifdef RCT_NEW_ARCH_ENABLED +- (void)setProperty:(double)nodeHash key:(NSString *)key value:(double)value +#else +RCT_EXPORT_METHOD(setProperty:(double)nodeHash key:(NSString *)key value:(double)value) +#endif +{ + if (self.runtime == nullptr) return; + + // Build a SET_PROPERTY instruction batch: [[3, nodeHash, key, value]] + // InstructionType::SET_PROPERTY = 3 + elem::js::Array instruction; + instruction.push_back((double)3); + instruction.push_back(nodeHash); + instruction.push_back(std::string([key UTF8String])); + instruction.push_back(value); + + elem::js::Array batch; + batch.push_back(instruction); + + self.runtime->applyInstructions(batch); +} + #ifdef RCT_NEW_ARCH_ENABLED - (void)getSampleRate:(RCTPromiseResolveBlock)resolve reject:(RCTPromiseRejectBlock)reject @@ -205,6 +297,18 @@ - (void)getDocumentsDirectory:(RCTPromiseResolveBlock)resolve resolve(documentsDirectory); } +#ifdef RCT_NEW_ARCH_ENABLED +- (void)getBundlePath:(RCTPromiseResolveBlock)resolve + reject:(RCTPromiseRejectBlock)reject +#else +RCT_EXPORT_METHOD(getBundlePath:(RCTPromiseResolveBlock)resolve + rejecter:(RCTPromiseRejectBlock)reject) +#endif +{ + NSString *bundlePath = [[NSBundle mainBundle] resourcePath]; + resolve(bundlePath); +} + #pragma mark - RCTEventEmitter - (NSArray *)supportedEvents diff --git a/src/NativeElementary.ts b/src/NativeElementary.ts index e7cb52c..14be270 100644 --- a/src/NativeElementary.ts +++ b/src/NativeElementary.ts @@ -14,6 +14,10 @@ export interface Spec extends TurboModule { getSampleRate(): Promise; applyInstructions(message: string): void; + // Real-time property updates (no graph re-render, audio-thread safe) + // nodeHash is the elem node hash (int32), key is the property name, value is the new value + setProperty(nodeHash: number, key: string, value: number): void; + addListener(eventName: string): void; removeListeners(count: number): void; @@ -23,6 +27,7 @@ export interface Spec extends TurboModule { // Path helpers getDocumentsDirectory(): Promise; + getBundlePath(): Promise; } export default TurboModuleRegistry.getEnforcing('Elementary'); diff --git a/src/index.tsx b/src/index.tsx index e8aee43..39733a1 100644 --- a/src/index.tsx +++ b/src/index.tsx @@ -47,6 +47,28 @@ export function getDocumentsDirectory(): Promise { return ElementaryModule.getDocumentsDirectory(); } +/** Get the app bundle's resource path (for loading bundled assets) */ +export function getBundlePath(): Promise { + return ElementaryModule.getBundlePath(); +} + +/** + * Update a property on a graph node without re-rendering the entire graph. + * This operates directly on the audio thread — ideal for real-time MIDI + * note triggering, parameter automation, and any time-critical updates. + * + * @param nodeHash - The elem node hash (from node.hash after creating with el.*) + * @param key - The property name to update (e.g. 'value') + * @param value - The new numeric value + */ +export function setProperty( + nodeHash: number, + key: string, + value: number +): void { + ElementaryModule.setProperty(nodeHash, key, value); +} + /** * Native renderer for Elementary Audio. *