Node Stream bindings for PortAudio.
npm install naudiodon-wasapi-exclusivearmhf architecture).
required to use from your own application to provide async processing. For example:
getDevices() function.
javascript
var portAudio = require('naudiodon');
console.log(portAudio.getDevices());
`
An example of the output is:
`javascript
[ { id: 0,
name: 'Built-in Microph',
maxInputChannels: 2,
maxOutputChannels: 0,
defaultSampleRate: 44100,
defaultLowInputLatency: 0.00199546485260771,
defaultLowOutputLatency: 0.01,
defaultHighInputLatency: 0.012154195011337868,
defaultHighOutputLatency: 0.1,
hostAPIName: 'Core Audio' },
{ id: 1,
name: 'Built-in Input',
maxInputChannels: 2,
maxOutputChannels: 0,
defaultSampleRate: 44100,
defaultLowInputLatency: 0.00199546485260771,
defaultLowOutputLatency: 0.01,
defaultHighInputLatency: 0.012154195011337868,
defaultHighOutputLatency: 0.1,
hostAPIName: 'Core Audio' },
{ id: 2,
name: 'Built-in Output',
maxInputChannels: 0,
maxOutputChannels: 2,
defaultSampleRate: 44100,
defaultLowInputLatency: 0.01,
defaultLowOutputLatency: 0.002108843537414966,
defaultHighInputLatency: 0.1,
defaultHighOutputLatency: 0.012267573696145125,
hostAPIName: 'Core Audio' } ]
`
Note that the device id parameter index value can be used as to specify which device to use for playback or recording with optional parameter deviceId.
$3
To get list of host APIs, call the getHostAPIs() function.
`javascript
var portAudio = require('naudiodon');
console.log(portAudio.getHostAPIs());
`
An example of the output is:
`javascript
{ defaultHostAPI: 0,
HostAPIs:
[ { id: 0,
name: 'MME',
deviceCount: 8,
defaultInput: 1,
defaultOutput: 5 },
{ / ... / } ] }
`
Note that the defaultInput and defaultOutput values can be used as to specify which device to use for playback or recording with optional parameter deviceId.
$3
Playing audio involves streaming audio data to a new instance of AudioIO configured with outOptions - which returns a Node.js Writable Stream:
`javascript
const fs = require('fs');
const portAudio = require('naudiodon');
// Create an instance of AudioIO with outOptions (defaults are as below), which will return a WritableStream
var ao = new portAudio.AudioIO({
outOptions: {
channelCount: 2,
sampleFormat: portAudio.SampleFormat16Bit,
sampleRate: 48000,
deviceId: -1, // Use -1 or omit the deviceId to select the default device
closeOnError: true // Close the stream if an audio error is detected, if set false then just log the error
}
});
// Create a stream to pipe into the AudioOutput
// Note that this does not strip the WAV header so a click will be heard at the beginning
var rs = fs.createReadStream('steam_48000.wav');
// Start piping data and start streaming
rs.pipe(ao);
ao.start();
`
$3
Recording audio involves streaming audio data from a new instance of AudioIO configured with inOptions - which returns a Node.js Readable Stream:
`javascript
var fs = require('fs');
var portAudio = require('../index.js');
// Create an instance of AudioIO with inOptions (defaults are as below), which will return a ReadableStream
var ai = new portAudio.AudioIO({
inOptions: {
channelCount: 2,
sampleFormat: portAudio.SampleFormat16Bit,
sampleRate: 44100,
deviceId: -1, // Use -1 or omit the deviceId to select the default device
closeOnError: true // Close the stream if an audio error is detected, if set false then just log the error
}
});
// Create a write stream to write out to a raw audio file
var ws = fs.createWriteStream('rawAudio.raw');
//Start streaming
ai.pipe(ws);
ai.start();
`
Note that this produces a raw audio file - wav headers would be required to create a wav file. However this basic example produces a file may be read by audio software such as Audacity, using the sample rate and format parameters set when establishing the stream.
There is an additional "timestamp" property available on the buffers that are streamed from the input which represents a time value for the first sample in the returned buffer. It can be accessed as follows:
`javascript
ai.on('data', buf => console.log(buf.timestamp));
`
To stop the recording, call ai.quit(). For example:
`javascript
process.on('SIGINT', () => {
console.log('Received SIGINT. Stopping recording.');
ai.quit();
});
`
$3
A bi-directional audio stream is available by creating an instance of AudioIO configured with both inOptions and outOptions - which returns a Node.js Duplex stream:
`javascript
var portAudio = require('../index.js');
// Create an instance of AudioIO with inOptions and outOptions, which will return a DuplexStream
var aio = new portAudio.AudioIO({
inOptions: {
channelCount: 2,
sampleFormat: portAudio.SampleFormat16Bit,
sampleRate: 44100,
deviceId: -1 // Use -1 or omit the deviceId to select the default device
},
outOptions: {
channelCount: 2,
sampleFormat: portAudio.SampleFormat16Bit,
sampleRate: 44100,
deviceId: -1 // Use -1 or omit the deviceId to select the default device
}
});
aio.start();
`
$3
`javascript
var portAudio = require('../index.js');
var aio = new portAudio.AudioIO({
inOptions: {
channelCount: 2,
sampleFormat: portAudio.SampleFormat16Bit,
sampleRate: 44100,
deviceId: -1, // WASAPI Device
useExclusiveMode: true,
},
outOptions: {
channelCount: 2,
sampleFormat: portAudio.SampleFormat16Bit,
sampleRate: 44100,
deviceId: -1, // WASAPI Device
useExclusiveMode: true,
}
});
aio.start();
`
Troubleshooting
$3
Ensure that when you compile portaudio that the configure scripts says "ALSA" yes.
$3
You may see or have seen the following message during initilisation of the audio library on MacOS:
`
WARNING: 140: This application, or a library it uses, is using the deprecated Carbon Component Manager
for hosting Audio Units. Support for this will be removed in a future release. Also, this makes the host
incompatible with version 3 audio units. Please transition to the API's in AudioComponent.h.
``