diff --git a/src/audio/MiniAudioSource.cpp b/src/audio/MiniAudioSource.cpp index 155ddc2..fdff416 100644 --- a/src/audio/MiniAudioSource.cpp +++ b/src/audio/MiniAudioSource.cpp @@ -4,6 +4,59 @@ #include #include +#ifdef __EMSCRIPTEN__ +#include + +// Enumerate audio input devices via the Web MediaDevices API. +// Stores results in a JS-side array; returns the number of devices found. +EM_ASYNC_JS(int, baudmine_js_enumerate_devices, (), { + if (!navigator.mediaDevices || !navigator.mediaDevices.enumerateDevices) { + return 0; + } + // Request mic permission first so labels are populated. + try { + var stream = await navigator.mediaDevices.getUserMedia({audio: true}); + stream.getTracks().forEach(function(t) { t.stop(); }); + } catch (e) { + console.warn("baudmine: mic permission denied, device labels may be empty"); + } + try { + var devices = await navigator.mediaDevices.enumerateDevices(); + window.baudmine_audioInputs = []; + for (var i = 0; i < devices.length; ++i) { + if (devices[i].kind === "audioinput") { + window.baudmine_audioInputs.push({ + deviceId: devices[i].deviceId, + label: devices[i].label || ("Microphone " + (window.baudmine_audioInputs.length + 1)) + }); + } + } + return window.baudmine_audioInputs.length; + } catch (e) { + console.error("baudmine: enumerateDevices failed:", e); + return 0; + } +}); + +// Get the label of the device at the given index. +EM_JS(void, baudmine_js_get_device_name, (int index, char* buf, int bufSize), { + var inputs = window.baudmine_audioInputs || []; + var name = (index >= 0 && index < inputs.length) ? inputs[index].label : ""; + stringToUTF8(name, buf, bufSize); +}); + +// Set the device ID that the next getUserMedia call should use. +EM_JS(void, baudmine_js_set_selected_device, (int index), { + var inputs = window.baudmine_audioInputs || []; + if (index >= 0 && index < inputs.length) { + window.baudmine_selectedDeviceId = inputs[index].deviceId; + } else { + window.baudmine_selectedDeviceId = null; + } +}); + +#endif // __EMSCRIPTEN__ + namespace baudmine { // ── Shared context (lazy-initialized) ──────────────────────────────────────── @@ -25,6 +78,21 @@ static ma_context* sharedContext() { std::vector MiniAudioSource::listInputDevices() { std::vector result; + +#ifdef __EMSCRIPTEN__ + // Use the Web MediaDevices API to enumerate microphones. + int count = baudmine_js_enumerate_devices(); + for (int i = 0; i < count; ++i) { + char nameBuf[256] = {}; + baudmine_js_get_device_name(i, nameBuf, sizeof(nameBuf)); + result.push_back({ + i, + nameBuf, + 2, // Web Audio typically provides stereo + 48000.0 // Web Audio default sample rate + }); + } +#else ma_context* ctx = sharedContext(); if (!ctx) return result; @@ -65,6 +133,8 @@ std::vector MiniAudioSource::listInputDevices() { defaultSR }); } +#endif + return result; } @@ -106,6 +176,10 @@ bool MiniAudioSource::open() { ma_device_id* pDeviceID = nullptr; ma_device_id deviceID{}; +#ifdef __EMSCRIPTEN__ + // Tell the JS layer which microphone to use in the next getUserMedia call. + baudmine_js_set_selected_device(deviceIndex_); +#else if (deviceIndex_ >= 0) { ma_device_info* captureDevices; ma_uint32 captureCount; @@ -117,6 +191,7 @@ bool MiniAudioSource::open() { } } } +#endif ma_device_config config = ma_device_config_init(ma_device_type_capture); config.capture.pDeviceID = pDeviceID; diff --git a/src/audio/miniaudio.h b/src/audio/miniaudio.h index 5509490..54f9aae 100644 --- a/src/audio/miniaudio.h +++ b/src/audio/miniaudio.h @@ -41907,7 +41907,11 @@ static void ma_audio_worklet_processor_created__webaudio(EMSCRIPTEN_WEBAUDIO_T a var audioContext = emscriptenGetAudioObject($1); var reqCh = $2; - navigator.mediaDevices.getUserMedia({audio:{channelCount:{ideal:reqCh}}, video:false}) + var audioConstraints = {channelCount:{ideal:reqCh}}; + if (window.baudmine_selectedDeviceId) { + audioConstraints.deviceId = {exact: window.baudmine_selectedDeviceId}; + } + navigator.mediaDevices.getUserMedia({audio:audioConstraints, video:false}) .then(function(stream) { audioContext.streamNode = audioContext.createMediaStreamSource(stream); audioContext.streamNode.connect(audioWorklet); @@ -42210,7 +42214,11 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co /* Now we need to connect our node to the graph. */ if (deviceType == window.miniaudio.device_type.capture || deviceType == window.miniaudio.device_type.duplex) { - navigator.mediaDevices.getUserMedia({audio:{channelCount:{ideal:channels}}, video:false}) + var audioConstraints = {channelCount:{ideal:channels}}; + if (window.baudmine_selectedDeviceId) { + audioConstraints.deviceId = {exact: window.baudmine_selectedDeviceId}; + } + navigator.mediaDevices.getUserMedia({audio:audioConstraints, video:false}) .then(function(stream) { device.streamNode = device.webaudio.createMediaStreamSource(stream); device.streamNode.connect(device.scriptNode);