Merge lp:~marcustomlinson/dspatchables/add_basic_components into lp:dspatchables

Proposed by Marcus Tomlinson
Status: Merged
Merged at revision: 2
Proposed branch: lp:~marcustomlinson/dspatchables/add_basic_components
Merge into: lp:dspatchables
Diff against target: 16367 lines (+15835/-331)
35 files modified
CMakeLists.txt (+7/-0)
DspAdder/CMakeLists.txt (+17/-0)
DspAdder/DspAdder.cpp (+76/-0)
DspAdder/DspAdder.h (+80/-0)
DspAudioDevice/CMakeLists.txt (+61/-0)
DspAudioDevice/DspAudioDevice.cpp (+427/-0)
DspAudioDevice/DspAudioDevice.h (+109/-0)
DspAudioDevice/rtaudio/RtAudio.cpp (+10136/-0)
DspAudioDevice/rtaudio/RtAudio.h (+1162/-0)
DspGain/CMakeLists.txt (+17/-0)
DspGain/DspGain.cpp (+86/-0)
DspGain/DspGain.h (+65/-0)
DspOscillator/CMakeLists.txt (+1/-3)
DspOscillator/DspOscillator.cpp (+222/-223)
DspOscillator/DspOscillator.h (+105/-105)
DspWaveStreamer/CMakeLists.txt (+17/-0)
DspWaveStreamer/DspWaveStreamer.cpp (+279/-0)
DspWaveStreamer/DspWaveStreamer.h (+103/-0)
include/DSPatch.h (+632/-0)
include/dspatch/DspCircuit.h (+321/-0)
include/dspatch/DspCircuitThread.h (+89/-0)
include/dspatch/DspComponent.h (+282/-0)
include/dspatch/DspComponentThread.h (+72/-0)
include/dspatch/DspParameter.h (+111/-0)
include/dspatch/DspPlugin.h (+95/-0)
include/dspatch/DspPluginLoader.h (+75/-0)
include/dspatch/DspRunType.h (+204/-0)
include/dspatch/DspSignal.h (+130/-0)
include/dspatch/DspSignalBus.h (+192/-0)
include/dspatch/DspThread.h (+43/-0)
include/dspatch/DspThreadNull.h (+135/-0)
include/dspatch/DspThreadUnix.h (+177/-0)
include/dspatch/DspThreadWin.h (+178/-0)
include/dspatch/DspWire.h (+58/-0)
include/dspatch/DspWireBus.h (+71/-0)
To merge this branch: bzr merge lp:~marcustomlinson/dspatchables/add_basic_components
Reviewer Review Type Date Requested Status
Marcus Tomlinson Pending
Review via email: mp+255246@code.launchpad.net
To post a comment you must log in.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'CMakeLists.txt'
--- CMakeLists.txt 2014-12-23 19:18:01 +0000
+++ CMakeLists.txt 2015-04-05 22:23:36 +0000
@@ -4,4 +4,11 @@
44
5project(DSPatchables)5project(DSPatchables)
66
7include_directories(${CMAKE_SOURCE_DIR}/include)
8link_directories(${CMAKE_SOURCE_DIR}/link)
9
10add_subdirectory(DspAdder)
11add_subdirectory(DspAudioDevice)
12add_subdirectory(DspGain)
7add_subdirectory(DspOscillator)13add_subdirectory(DspOscillator)
14add_subdirectory(DspWaveStreamer)
815
=== added directory 'DspAdder'
=== added file 'DspAdder/CMakeLists.txt'
--- DspAdder/CMakeLists.txt 1970-01-01 00:00:00 +0000
+++ DspAdder/CMakeLists.txt 2015-04-05 22:23:36 +0000
@@ -0,0 +1,17 @@
1project(DspAdder)
2
3file(GLOB srcs *.cpp)
4file(GLOB hdrs *.h)
5
6include_directories(${CMAKE_CURRENT_SOURCE_DIR})
7
8add_library(
9 ${PROJECT_NAME} SHARED
10 ${srcs}
11 ${hdrs}
12)
13
14target_link_libraries(
15 ${PROJECT_NAME}
16 DSPatch
17)
018
=== added file 'DspAdder/DspAdder.cpp'
--- DspAdder/DspAdder.cpp 1970-01-01 00:00:00 +0000
+++ DspAdder/DspAdder.cpp 2015-04-05 22:23:36 +0000
@@ -0,0 +1,76 @@
1/************************************************************************
2DSPatch - Cross-Platform, Object-Oriented, Flow-Based Programming Library
3Copyright (c) 2012-2015 Marcus Tomlinson
4
5This file is part of DSPatch.
6
7GNU Lesser General Public License Usage
8This file may be used under the terms of the GNU Lesser General Public
9License version 3.0 as published by the Free Software Foundation and
10appearing in the file LGPLv3.txt included in the packaging of this
11file. Please review the following information to ensure the GNU Lesser
12General Public License version 3.0 requirements will be met:
13http://www.gnu.org/copyleft/lgpl.html.
14
15Other Usage
16Alternatively, this file may be used in accordance with the terms and
17conditions contained in a signed written agreement between you and
18Marcus Tomlinson.
19
20DSPatch is distributed in the hope that it will be useful,
21but WITHOUT ANY WARRANTY; without even the implied warranty of
22MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
23************************************************************************/
24
25#include <DspAdder.h>
26
27//=================================================================================================
28
29DspAdder::DspAdder()
30{
31 // add 2 inputs
32 AddInput_("Input1");
33 AddInput_("Input2");
34
35 // add 1 output
36 AddOutput_("Output1");
37}
38
39//-------------------------------------------------------------------------------------------------
40
41DspAdder::~DspAdder()
42{
43}
44
45//=================================================================================================
46
47void DspAdder::Process_(DspSignalBus& inputs, DspSignalBus& outputs)
48{
49 // get input values from inputs bus (GetValue() returns true if successful)
50 if (!inputs.GetValue(0, _stream1))
51 {
52 _stream1.assign(_stream1.size(), 0); // clear buffer if no input received
53 }
54 // do the same to the 2nd input buffer
55 if (!inputs.GetValue(1, _stream2))
56 {
57 _stream2.assign(_stream2.size(), 0);
58 }
59
60 // ensure that the 2 input buffer sizes match
61 if (_stream1.size() == _stream2.size())
62 {
63 for (size_t i = 0; i < _stream1.size(); i++)
64 {
65 _stream1[i] += _stream2[i]; // perform addition element-by-element
66 }
67 outputs.SetValue(0, _stream1); // set output 1
68 }
69 // if input sizes don't match
70 else
71 {
72 outputs.ClearValue(0); // clear the output
73 }
74}
75
76//=================================================================================================
077
=== added file 'DspAdder/DspAdder.h'
--- DspAdder/DspAdder.h 1970-01-01 00:00:00 +0000
+++ DspAdder/DspAdder.h 2015-04-05 22:23:36 +0000
@@ -0,0 +1,80 @@
1/************************************************************************
2DSPatch - Cross-Platform, Object-Oriented, Flow-Based Programming Library
3Copyright (c) 2012-2015 Marcus Tomlinson
4
5This file is part of DSPatch.
6
7GNU Lesser General Public License Usage
8This file may be used under the terms of the GNU Lesser General Public
9License version 3.0 as published by the Free Software Foundation and
10appearing in the file LGPLv3.txt included in the packaging of this
11file. Please review the following information to ensure the GNU Lesser
12General Public License version 3.0 requirements will be met:
13http://www.gnu.org/copyleft/lgpl.html.
14
15Other Usage
16Alternatively, this file may be used in accordance with the terms and
17conditions contained in a signed written agreement between you and
18Marcus Tomlinson.
19
20DSPatch is distributed in the hope that it will be useful,
21but WITHOUT ANY WARRANTY; without even the implied warranty of
22MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
23************************************************************************/
24
25#ifndef DSPADDER_H
26#define DSPADDER_H
27
28#include <DSPatch.h>
29
30//=================================================================================================
31/// Example DspComponent: Adder
32
33/** This component has 2 inputs and 1 output. The component receives 2 floating-point buffers into
34it's 2 inputs, adds each buffer element of the 1st buffer to the corresponding element of the 2nd
35buffer, then stores the resultant buffer into a 3rd buffer. This resultant buffer is then passed to
36output 1 of the component output bus. */
37
38class DspAdder : public DspComponent
39{
40public:
41 //! Component constructor
42 /*! When a component is constructed, it's input and output buses must be configured. This is
43 achieved by making calls to the base class protected methods: "AddInput_()" and "AddOutput_().
44 These methods must be called once per input / output required. IO signal names are optional
45 (Component IO can be referenced by either string ID or index) and can be assigned to each
46 input / output by supplying the desired string ID as an argument to the respective AddInput_()
47 / AddOutput_() method call.*/
48
49 DspAdder();
50 ~DspAdder();
51
52protected:
53 //! Virtual process method inherited from DspComponent
54 /*! The Process_() method is called from the DSPatch engine when a new set of component input
55 signals are ready for processing. The Process() method has 2 arguments: the input bus and the
56 output bus. This method's purpose is to pull its required inputs out of the input bus, process
57 these inputs, and populate the output bus with the results (see DspSignalBus). */
58
59 virtual void Process_(DspSignalBus& inputs, DspSignalBus& outputs);
60
61private:
62 std::vector<float> _stream1;
63 std::vector<float> _stream2;
64};
65
66//=================================================================================================
67
68class DspAdderPlugin : public DspPlugin
69{
70 DspComponent* Create(std::map<std::string, DspParameter>&) const
71 {
72 return new DspAdder();
73 }
74};
75
76EXPORT_DSPPLUGIN(DspAdderPlugin)
77
78//=================================================================================================
79
80#endif // DSPADDER_H
081
=== added directory 'DspAudioDevice'
=== added file 'DspAudioDevice/CMakeLists.txt'
--- DspAudioDevice/CMakeLists.txt 1970-01-01 00:00:00 +0000
+++ DspAudioDevice/CMakeLists.txt 2015-04-05 22:23:36 +0000
@@ -0,0 +1,61 @@
1project(DspAudioDevice)
2
3file(GLOB srcs *.cpp rtaudio/*.cpp)
4file(GLOB hdrs *.h rtaudio/*.h)
5
6include_directories(
7 ${CMAKE_CURRENT_SOURCE_DIR}
8 ${CMAKE_CURRENT_SOURCE_DIR}/rtaudio
9)
10
11add_library(
12 ${PROJECT_NAME} SHARED
13 ${srcs}
14 ${hdrs}
15)
16
17target_link_libraries(
18 ${PROJECT_NAME}
19 DSPatch
20)
21
22# Definition for RtAudio Windows, using direct sound
23if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
24 add_definitions(-DEXAMPLE_PLUGIN_FILE="${CMAKE_CURRENT_BINARY_DIR}/oscillator-plugin/$<CONFIGURATION>/DspOscillator.dll")
25 add_definitions(-D__WINDOWS_WASAPI__)
26
27 add_custom_command(
28 TARGET ${PROJECT_NAME} POST_BUILD
29 COMMAND ${CMAKE_COMMAND} -E copy_if_different
30 ${CMAKE_BINARY_DIR}/$<CONFIGURATION>/DSPatch.dll
31 ${CMAKE_CURRENT_BINARY_DIR}/$<CONFIGURATION>
32 )
33endif(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
34
35# Definition for RtAudio Linux, using ALSA
36if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
37 add_definitions(-DEXAMPLE_PLUGIN_FILE="${CMAKE_CURRENT_BINARY_DIR}/oscillator-plugin/libDspOscillator.so")
38 add_definitions(-D__LINUX_ALSA__)
39
40 find_library(ASOUND asound)
41 if(NOT ASOUND)
42 message(FATAL_ERROR "ALSA not found (Ensure that libasound2-dev is installed)")
43 endif()
44
45 target_link_libraries(
46 ${PROJECT_NAME}
47 ${ASOUND}
48 )
49endif(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
50
51# Definition for RtAudio Mac OSX, using Core Audio
52if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
53 add_definitions(-DEXAMPLE_PLUGIN_FILE="${CMAKE_CURRENT_BINARY_DIR}/oscillator-plugin/$<CONFIGURATION>/libDspOscillator.dylib")
54 add_definitions(-D__MACOSX_CORE__)
55
56 target_link_libraries(
57 ${PROJECT_NAME}
58 "-framework CoreAudio"
59 "-framework CoreFoundation"
60 )
61endif(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
062
=== added file 'DspAudioDevice/DspAudioDevice.cpp'
--- DspAudioDevice/DspAudioDevice.cpp 1970-01-01 00:00:00 +0000
+++ DspAudioDevice/DspAudioDevice.cpp 2015-04-05 22:23:36 +0000
@@ -0,0 +1,427 @@
1/************************************************************************
2DSPatch - Cross-Platform, Object-Oriented, Flow-Based Programming Library
3Copyright (c) 2012-2015 Marcus Tomlinson
4
5This file is part of DSPatch.
6
7GNU Lesser General Public License Usage
8This file may be used under the terms of the GNU Lesser General Public
9License version 3.0 as published by the Free Software Foundation and
10appearing in the file LGPLv3.txt included in the packaging of this
11file. Please review the following information to ensure the GNU Lesser
12General Public License version 3.0 requirements will be met:
13http://www.gnu.org/copyleft/lgpl.html.
14
15Other Usage
16Alternatively, this file may be used in accordance with the terms and
17conditions contained in a signed written agreement between you and
18Marcus Tomlinson.
19
20DSPatch is distributed in the hope that it will be useful,
21but WITHOUT ANY WARRANTY; without even the implied warranty of
22MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
23************************************************************************/
24
25#include <DspAudioDevice.h>
26
27#include <RtAudio.h>
28
29#include <iostream>
30#include <string.h>
31#include <cstdlib>
32
33//=================================================================================================
34
35struct RtAudioMembers
36{
37 std::vector<RtAudio::DeviceInfo> deviceList;
38
39 RtAudio audioStream;
40 RtAudio::StreamParameters outputParams;
41 RtAudio::StreamParameters inputParams;
42};
43
44//=================================================================================================
45
46DspAudioDevice::DspAudioDevice()
47 : _rtAudio(new RtAudioMembers())
48 , _gotWaitReady(false)
49 , _gotSyncReady(true)
50{
51 _outputChannels.resize(8);
52 for (int i = 0; i < 8; i++)
53 {
54 AddInput_();
55 }
56
57 AddInput_("Sample Rate");
58
59 _inputChannels.resize(8);
60 for (int i = 0; i < 8; i++)
61 {
62 AddOutput_();
63 }
64
65 std::vector<std::string> deviceNameList;
66
67 for (unsigned int i = 0; i < _rtAudio->audioStream.getDeviceCount(); i++)
68 {
69 _rtAudio->deviceList.push_back(_rtAudio->audioStream.getDeviceInfo(i));
70 deviceNameList.push_back(_rtAudio->audioStream.getDeviceInfo(i).name);
71 }
72
73 pDeviceList = AddParameter_("deviceList", DspParameter(DspParameter::List, deviceNameList));
74 pIsStreaming = AddParameter_("isStreaming", DspParameter(DspParameter::Bool, false));
75 pBufferSize = AddParameter_("bufferSize", DspParameter(DspParameter::Int, 256));
76 pSampleRate = AddParameter_("sampleRate", DspParameter(DspParameter::Int, 44100));
77
78 SetDevice(_rtAudio->audioStream.getDefaultOutputDevice());
79 SetBufferSize(GetBufferSize());
80 SetSampleRate(GetSampleRate());
81}
82
83//-------------------------------------------------------------------------------------------------
84
85DspAudioDevice::~DspAudioDevice()
86{
87 _StopStream();
88
89 delete _rtAudio;
90}
91
92//-------------------------------------------------------------------------------------------------
93
94bool DspAudioDevice::SetDevice(int deviceIndex)
95{
96 if (deviceIndex >= 0 && deviceIndex < GetDeviceCount())
97 {
98 _StopStream();
99
100 SetParameter_(pDeviceList, DspParameter(DspParameter::Int, deviceIndex));
101
102 _rtAudio->inputParams.nChannels = _rtAudio->deviceList[deviceIndex].inputChannels;
103 _rtAudio->inputParams.deviceId = deviceIndex;
104
105 _rtAudio->outputParams.nChannels = _rtAudio->deviceList[deviceIndex].outputChannels;
106 _rtAudio->outputParams.deviceId = deviceIndex;
107
108 _StartStream();
109
110 return true;
111 }
112
113 return false;
114}
115
116//-------------------------------------------------------------------------------------------------
117
118std::string DspAudioDevice::GetDeviceName(int deviceIndex) const
119{
120 if (deviceIndex >= 0 && deviceIndex < GetDeviceCount())
121 {
122 return _rtAudio->deviceList[deviceIndex].name;
123 }
124
125 return "";
126}
127
128//-------------------------------------------------------------------------------------------------
129
130int DspAudioDevice::GetDeviceInputCount(int deviceIndex) const
131{
132 return _rtAudio->deviceList[deviceIndex].inputChannels;
133}
134
135//-------------------------------------------------------------------------------------------------
136
137int DspAudioDevice::GetDeviceOutputCount(int deviceIndex) const
138{
139 return _rtAudio->deviceList[deviceIndex].outputChannels;
140}
141
142//-------------------------------------------------------------------------------------------------
143
144int DspAudioDevice::GetCurrentDevice() const
145{
146 return *GetParameter_(pDeviceList)->GetInt();
147}
148
149//-------------------------------------------------------------------------------------------------
150
151int DspAudioDevice::GetDeviceCount() const
152{
153 return GetParameter_(pDeviceList)->GetList()->size();
154}
155
156//-------------------------------------------------------------------------------------------------
157
158void DspAudioDevice::SetBufferSize(int bufferSize)
159{
160 _StopStream();
161
162 SetParameter_(pBufferSize, DspParameter(DspParameter::Int, bufferSize));
163 for (size_t i = 0; i < _inputChannels.size(); i++)
164 {
165 _inputChannels[i].resize(bufferSize);
166 }
167
168 _StartStream();
169}
170
171//-------------------------------------------------------------------------------------------------
172
173void DspAudioDevice::SetSampleRate(int sampleRate)
174{
175 _StopStream();
176 SetParameter_(pSampleRate, DspParameter(DspParameter::Int, sampleRate));
177 _StartStream();
178}
179
180//-------------------------------------------------------------------------------------------------
181
182bool DspAudioDevice::IsStreaming() const
183{
184 return *GetParameter_(pIsStreaming)->GetBool();
185}
186
187//-------------------------------------------------------------------------------------------------
188
189int DspAudioDevice::GetBufferSize() const
190{
191 return *GetParameter_(pBufferSize)->GetInt();
192}
193
194//-------------------------------------------------------------------------------------------------
195
196int DspAudioDevice::GetSampleRate() const
197{
198 return *GetParameter_(pSampleRate)->GetInt();
199}
200
201//=================================================================================================
202
203void DspAudioDevice::Process_(DspSignalBus& inputs, DspSignalBus& outputs)
204{
205 // Wait until the sound card is ready for the next set of buffers
206 // ==============================================================
207 _syncMutex.Lock();
208 if (!_gotSyncReady) // if haven't already got the release
209 {
210 _syncCondt.Wait(_syncMutex); // wait for sync
211 }
212 _gotSyncReady = false; // reset the release flag
213 _syncMutex.Unlock();
214
215 // Synchronise sample rate with the "Sample Rate" input feed
216 // =========================================================
217 int sampleRate;
218 if (inputs.GetValue("Sample Rate", sampleRate))
219 {
220 if (sampleRate != GetSampleRate())
221 {
222 SetSampleRate(sampleRate);
223 }
224 }
225
226 // Synchronise buffer size with the size of incoming buffers
227 // =========================================================
228 if (inputs.GetValue(0, _outputChannels[0]))
229 {
230 if (GetBufferSize() != (int)_outputChannels[0].size() && _outputChannels[0].size() != 0)
231 {
232 SetBufferSize(_outputChannels[0].size());
233 }
234 }
235
236 // Retrieve incoming component buffers for the sound card to output
237 // ================================================================
238 for (size_t i = 0; i < _outputChannels.size(); i++)
239 {
240 if (!inputs.GetValue(i, _outputChannels[i]))
241 {
242 _outputChannels[i].assign(_outputChannels[i].size(), 0);
243 }
244 }
245
246 // Retrieve incoming sound card buffers for the component to output
247 // ================================================================
248 for (size_t i = 0; i < _inputChannels.size(); i++)
249 {
250 outputs.SetValue(i, _inputChannels[i]);
251 }
252
253 // Inform the sound card that buffers are now ready
254 // ================================================
255 _buffersMutex.Lock();
256 _gotWaitReady = true; // set release flag
257 _waitCondt.WakeAll(); // release sync
258 _buffersMutex.Unlock();
259}
260
261//-------------------------------------------------------------------------------------------------
262
263bool DspAudioDevice::ParameterUpdating_(int index, DspParameter const& param)
264{
265 if (index == pDeviceList)
266 {
267 return SetDevice(*param.GetInt());
268 }
269 else if (index == pBufferSize)
270 {
271 SetBufferSize(*param.GetInt());
272 return true;
273 }
274 else if (index == pSampleRate)
275 {
276 SetSampleRate(*param.GetInt());
277 return true;
278 }
279
280 return false;
281}
282
283//=================================================================================================
284
285void DspAudioDevice::_SetIsStreaming(bool isStreaming)
286{
287 SetParameter_(pIsStreaming, DspParameter(DspParameter::Bool, isStreaming));
288}
289
290//-------------------------------------------------------------------------------------------------
291
292void DspAudioDevice::_WaitForBuffer()
293{
294 _buffersMutex.Lock();
295 if (!_gotWaitReady) // if haven't already got the release
296 {
297 _waitCondt.Wait(_buffersMutex); // wait for sync
298 }
299 _gotWaitReady = false; // reset the release flag
300 _buffersMutex.Unlock();
301}
302
303//-------------------------------------------------------------------------------------------------
304
305void DspAudioDevice::_SyncBuffer()
306{
307 _syncMutex.Lock();
308 _gotSyncReady = true; // set release flag
309 _syncCondt.WakeAll(); // release sync
310 _syncMutex.Unlock();
311}
312
313//-------------------------------------------------------------------------------------------------
314
315void DspAudioDevice::_StopStream()
316{
317 _SetIsStreaming(false);
318
319 _buffersMutex.Lock();
320 _gotWaitReady = true; // set release flag
321 _waitCondt.WakeAll(); // release sync
322 _buffersMutex.Unlock();
323
324 if (_rtAudio->audioStream.isStreamOpen())
325 {
326 _rtAudio->audioStream.closeStream();
327 }
328}
329
330//-------------------------------------------------------------------------------------------------
331
332void DspAudioDevice::_StartStream()
333{
334 RtAudio::StreamParameters* inputParams = NULL;
335 RtAudio::StreamParameters* outputParams = NULL;
336
337 if (_rtAudio->inputParams.nChannels != 0)
338 {
339 inputParams = &_rtAudio->inputParams;
340 }
341
342 if (_rtAudio->outputParams.nChannels != 0)
343 {
344 outputParams = &_rtAudio->outputParams;
345 }
346
347 RtAudio::StreamOptions options;
348 options.flags |= RTAUDIO_SCHEDULE_REALTIME;
349 options.flags |= RTAUDIO_NONINTERLEAVED;
350
351 _rtAudio->audioStream.openStream(outputParams,
352 inputParams,
353 RTAUDIO_FLOAT32,
354 GetSampleRate(),
355 (unsigned int*)const_cast<int*>(GetParameter_(pBufferSize)->GetInt()),
356 &_StaticCallback,
357 this,
358 &options);
359
360 _rtAudio->audioStream.startStream();
361
362 while (!_rtAudio->audioStream.isStreamOpen())
363 {
364 DspThread::MsSleep(10);
365 }
366
367 _SetIsStreaming(true);
368}
369
370//-------------------------------------------------------------------------------------------------
371
372int DspAudioDevice::_StaticCallback(
373 void* outputBuffer, void* inputBuffer, unsigned int, double, unsigned int, void* userData)
374{
375 return (reinterpret_cast<DspAudioDevice*>(userData))->_DynamicCallback(inputBuffer, outputBuffer);
376}
377
378//-------------------------------------------------------------------------------------------------
379
380int DspAudioDevice::_DynamicCallback(void* inputBuffer, void* outputBuffer)
381{
382 _WaitForBuffer();
383
384 if (IsStreaming())
385 {
386 float* floatOutput = (float*)outputBuffer;
387 float* floatInput = (float*)inputBuffer;
388
389 if (outputBuffer != NULL)
390 {
391 for (size_t i = 0; i < _outputChannels.size(); i++)
392 {
393 if (_rtAudio->deviceList[GetCurrentDevice()].outputChannels >= (i + 1))
394 {
395 for (size_t j = 0; j < _outputChannels[i].size(); j++)
396 {
397 *floatOutput++ = _outputChannels[i][j];
398 }
399 }
400 }
401 }
402
403 if (inputBuffer != NULL)
404 {
405 for (size_t i = 0; i < _inputChannels.size(); i++)
406 {
407 if (_rtAudio->deviceList[GetCurrentDevice()].inputChannels >= (i + 1))
408 {
409 for (size_t j = 0; j < _inputChannels[i].size(); j++)
410 {
411 _inputChannels[i][j] = *floatInput++;
412 }
413 }
414 }
415 }
416 }
417 else
418 {
419 _SyncBuffer();
420 return 1;
421 }
422
423 _SyncBuffer();
424 return 0;
425}
426
427//=================================================================================================
0428
=== added file 'DspAudioDevice/DspAudioDevice.h'
--- DspAudioDevice/DspAudioDevice.h 1970-01-01 00:00:00 +0000
+++ DspAudioDevice/DspAudioDevice.h 2015-04-05 22:23:36 +0000
@@ -0,0 +1,109 @@
1/************************************************************************
2DSPatch - Cross-Platform, Object-Oriented, Flow-Based Programming Library
3Copyright (c) 2012-2015 Marcus Tomlinson
4
5This file is part of DSPatch.
6
7GNU Lesser General Public License Usage
8This file may be used under the terms of the GNU Lesser General Public
9License version 3.0 as published by the Free Software Foundation and
10appearing in the file LGPLv3.txt included in the packaging of this
11file. Please review the following information to ensure the GNU Lesser
12General Public License version 3.0 requirements will be met:
13http://www.gnu.org/copyleft/lgpl.html.
14
15Other Usage
16Alternatively, this file may be used in accordance with the terms and
17conditions contained in a signed written agreement between you and
18Marcus Tomlinson.
19
20DSPatch is distributed in the hope that it will be useful,
21but WITHOUT ANY WARRANTY; without even the implied warranty of
22MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
23************************************************************************/
24
25#ifndef DSPAUDIODEVICE_H
26#define DSPAUDIODEVICE_H
27
28#include <DSPatch.h>
29
30struct RtAudioMembers;
31
32//=================================================================================================
33
34class DspAudioDevice : public DspComponent
35{
36public:
37 int pDeviceList; // List
38 int pIsStreaming; // Bool
39 int pBufferSize; // Int
40 int pSampleRate; // Int
41
42 DspAudioDevice();
43 ~DspAudioDevice();
44
45 bool SetDevice(int deviceIndex);
46
47 std::string GetDeviceName(int deviceIndex) const;
48 int GetDeviceInputCount(int deviceIndex) const;
49 int GetDeviceOutputCount(int deviceIndex) const;
50 int GetCurrentDevice() const;
51 int GetDeviceCount() const;
52
53 void SetBufferSize(int bufferSize);
54 void SetSampleRate(int sampleRate);
55
56 bool IsStreaming() const;
57 int GetBufferSize() const;
58 int GetSampleRate() const;
59
60protected:
61 virtual void Process_(DspSignalBus& inputs, DspSignalBus& outputs);
62 virtual bool ParameterUpdating_(int index, DspParameter const& param);
63
64private:
65 std::vector< std::vector<float> > _outputChannels;
66 std::vector< std::vector<float> > _inputChannels;
67
68 RtAudioMembers* _rtAudio;
69
70 DspMutex _buffersMutex;
71 DspMutex _syncMutex;
72 DspWaitCondition _waitCondt;
73 DspWaitCondition _syncCondt;
74 bool _gotWaitReady;
75 bool _gotSyncReady;
76
77 void _SetIsStreaming(bool isStreaming);
78
79 void _WaitForBuffer();
80 void _SyncBuffer();
81
82 void _StopStream();
83 void _StartStream();
84
85 static int _StaticCallback(void* outputBuffer,
86 void* inputBuffer,
87 unsigned int nBufferFrames,
88 double streamTime,
89 unsigned int status,
90 void* userData);
91
92 int _DynamicCallback(void* inputBuffer, void* outputBuffer);
93};
94
95//=================================================================================================
96
97class DspAudioDevicePlugin : public DspPlugin
98{
99 DspComponent* Create(std::map<std::string, DspParameter>&) const
100 {
101 return new DspAudioDevice();
102 }
103};
104
105EXPORT_DSPPLUGIN(DspAudioDevicePlugin)
106
107//=================================================================================================
108
109#endif // DSPAUDIODEVICE_H
0110
=== added directory 'DspAudioDevice/rtaudio'
=== added file 'DspAudioDevice/rtaudio/RtAudio.cpp'
--- DspAudioDevice/rtaudio/RtAudio.cpp 1970-01-01 00:00:00 +0000
+++ DspAudioDevice/rtaudio/RtAudio.cpp 2015-04-05 22:23:36 +0000
@@ -0,0 +1,10136 @@
1/************************************************************************/
2/*! \class RtAudio
3 \brief Realtime audio i/o C++ classes.
4
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
9
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
11
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2014 Gary P. Scavone
14
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
22
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
25
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
30
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38*/
39/************************************************************************/
40
41// RtAudio: Version 4.1.1
42
43#include "RtAudio.h"
44#include <iostream>
45#include <cstdlib>
46#include <cstring>
47#include <climits>
48
49// Static variable definitions.
50const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
51const unsigned int RtApi::SAMPLE_RATES[] = {
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
54};
55
56#if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
61#elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
62 // pthread API
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
67#else
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
70#endif
71
72// *************************************************** //
73//
74// RtAudio definitions.
75//
76// *************************************************** //
77
78std::string RtAudio :: getVersion( void ) throw()
79{
80 return RTAUDIO_VERSION;
81}
82
83void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
84{
85 apis.clear();
86
87 // The order here will control the order of RtAudio's API search in
88 // the constructor.
89#if defined(__UNIX_JACK__)
90 apis.push_back( UNIX_JACK );
91#endif
92#if defined(__LINUX_ALSA__)
93 apis.push_back( LINUX_ALSA );
94#endif
95#if defined(__LINUX_PULSE__)
96 apis.push_back( LINUX_PULSE );
97#endif
98#if defined(__LINUX_OSS__)
99 apis.push_back( LINUX_OSS );
100#endif
101#if defined(__WINDOWS_ASIO__)
102 apis.push_back( WINDOWS_ASIO );
103#endif
104#if defined(__WINDOWS_WASAPI__)
105 apis.push_back( WINDOWS_WASAPI );
106#endif
107#if defined(__WINDOWS_DS__)
108 apis.push_back( WINDOWS_DS );
109#endif
110#if defined(__MACOSX_CORE__)
111 apis.push_back( MACOSX_CORE );
112#endif
113#if defined(__RTAUDIO_DUMMY__)
114 apis.push_back( RTAUDIO_DUMMY );
115#endif
116}
117
118void RtAudio :: openRtApi( RtAudio::Api api )
119{
120 if ( rtapi_ )
121 delete rtapi_;
122 rtapi_ = 0;
123
124#if defined(__UNIX_JACK__)
125 if ( api == UNIX_JACK )
126 rtapi_ = new RtApiJack();
127#endif
128#if defined(__LINUX_ALSA__)
129 if ( api == LINUX_ALSA )
130 rtapi_ = new RtApiAlsa();
131#endif
132#if defined(__LINUX_PULSE__)
133 if ( api == LINUX_PULSE )
134 rtapi_ = new RtApiPulse();
135#endif
136#if defined(__LINUX_OSS__)
137 if ( api == LINUX_OSS )
138 rtapi_ = new RtApiOss();
139#endif
140#if defined(__WINDOWS_ASIO__)
141 if ( api == WINDOWS_ASIO )
142 rtapi_ = new RtApiAsio();
143#endif
144#if defined(__WINDOWS_WASAPI__)
145 if ( api == WINDOWS_WASAPI )
146 rtapi_ = new RtApiWasapi();
147#endif
148#if defined(__WINDOWS_DS__)
149 if ( api == WINDOWS_DS )
150 rtapi_ = new RtApiDs();
151#endif
152#if defined(__MACOSX_CORE__)
153 if ( api == MACOSX_CORE )
154 rtapi_ = new RtApiCore();
155#endif
156#if defined(__RTAUDIO_DUMMY__)
157 if ( api == RTAUDIO_DUMMY )
158 rtapi_ = new RtApiDummy();
159#endif
160}
161
162RtAudio :: RtAudio( RtAudio::Api api )
163{
164 rtapi_ = 0;
165
166 if ( api != UNSPECIFIED ) {
167 // Attempt to open the specified API.
168 openRtApi( api );
169 if ( rtapi_ ) return;
170
171 // No compiled support for specified API value. Issue a debug
172 // warning and continue as if no API was specified.
173 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
174 }
175
176 // Iterate through the compiled APIs and return as soon as we find
177 // one with at least one device or we reach the end of the list.
178 std::vector< RtAudio::Api > apis;
179 getCompiledApi( apis );
180 for ( unsigned int i=0; i<apis.size(); i++ ) {
181 openRtApi( apis[i] );
182 if ( rtapi_->getDeviceCount() ) break;
183 }
184
185 if ( rtapi_ ) return;
186
187 // It should not be possible to get here because the preprocessor
188 // definition __RTAUDIO_DUMMY__ is automatically defined if no
189 // API-specific definitions are passed to the compiler. But just in
190 // case something weird happens, we'll thow an error.
191 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
192 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
193}
194
195RtAudio :: ~RtAudio() throw()
196{
197 if ( rtapi_ )
198 delete rtapi_;
199}
200
201void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
202 RtAudio::StreamParameters *inputParameters,
203 RtAudioFormat format, unsigned int sampleRate,
204 unsigned int *bufferFrames,
205 RtAudioCallback callback, void *userData,
206 RtAudio::StreamOptions *options,
207 RtAudioErrorCallback errorCallback )
208{
209 return rtapi_->openStream( outputParameters, inputParameters, format,
210 sampleRate, bufferFrames, callback,
211 userData, options, errorCallback );
212}
213
214// *************************************************** //
215//
216// Public RtApi definitions (see end of file for
217// private or protected utility functions).
218//
219// *************************************************** //
220
221RtApi :: RtApi()
222{
223 stream_.state = STREAM_CLOSED;
224 stream_.mode = UNINITIALIZED;
225 stream_.apiHandle = 0;
226 stream_.userBuffer[0] = 0;
227 stream_.userBuffer[1] = 0;
228 MUTEX_INITIALIZE( &stream_.mutex );
229 showWarnings_ = true;
230 firstErrorOccurred_ = false;
231}
232
233RtApi :: ~RtApi()
234{
235 MUTEX_DESTROY( &stream_.mutex );
236}
237
238void RtApi :: openStream( RtAudio::StreamParameters *oParams,
239 RtAudio::StreamParameters *iParams,
240 RtAudioFormat format, unsigned int sampleRate,
241 unsigned int *bufferFrames,
242 RtAudioCallback callback, void *userData,
243 RtAudio::StreamOptions *options,
244 RtAudioErrorCallback errorCallback )
245{
246 if ( stream_.state != STREAM_CLOSED ) {
247 errorText_ = "RtApi::openStream: a stream is already open!";
248 error( RtAudioError::INVALID_USE );
249 return;
250 }
251
252 // Clear stream information potentially left from a previously open stream.
253 clearStreamInfo();
254
255 if ( oParams && oParams->nChannels < 1 ) {
256 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
257 error( RtAudioError::INVALID_USE );
258 return;
259 }
260
261 if ( iParams && iParams->nChannels < 1 ) {
262 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
263 error( RtAudioError::INVALID_USE );
264 return;
265 }
266
267 if ( oParams == NULL && iParams == NULL ) {
268 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
269 error( RtAudioError::INVALID_USE );
270 return;
271 }
272
273 if ( formatBytes(format) == 0 ) {
274 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
275 error( RtAudioError::INVALID_USE );
276 return;
277 }
278
279 unsigned int nDevices = getDeviceCount();
280 unsigned int oChannels = 0;
281 if ( oParams ) {
282 oChannels = oParams->nChannels;
283 if ( oParams->deviceId >= nDevices ) {
284 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
285 error( RtAudioError::INVALID_USE );
286 return;
287 }
288 }
289
290 unsigned int iChannels = 0;
291 if ( iParams ) {
292 iChannels = iParams->nChannels;
293 if ( iParams->deviceId >= nDevices ) {
294 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
295 error( RtAudioError::INVALID_USE );
296 return;
297 }
298 }
299
300 bool result;
301
302 if ( oChannels > 0 ) {
303
304 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
305 sampleRate, format, bufferFrames, options );
306 if ( result == false ) {
307 error( RtAudioError::SYSTEM_ERROR );
308 return;
309 }
310 }
311
312 if ( iChannels > 0 ) {
313
314 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
315 sampleRate, format, bufferFrames, options );
316 if ( result == false ) {
317 if ( oChannels > 0 ) closeStream();
318 error( RtAudioError::SYSTEM_ERROR );
319 return;
320 }
321 }
322
323 stream_.callbackInfo.callback = (void *) callback;
324 stream_.callbackInfo.userData = userData;
325 stream_.callbackInfo.errorCallback = (void *) errorCallback;
326
327 if ( options ) options->numberOfBuffers = stream_.nBuffers;
328 stream_.state = STREAM_STOPPED;
329}
330
331unsigned int RtApi :: getDefaultInputDevice( void )
332{
333 // Should be implemented in subclasses if possible.
334 return 0;
335}
336
337unsigned int RtApi :: getDefaultOutputDevice( void )
338{
339 // Should be implemented in subclasses if possible.
340 return 0;
341}
342
343void RtApi :: closeStream( void )
344{
345 // MUST be implemented in subclasses!
346 return;
347}
348
349bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
350 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
351 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
352 RtAudio::StreamOptions * /*options*/ )
353{
354 // MUST be implemented in subclasses!
355 return FAILURE;
356}
357
358void RtApi :: tickStreamTime( void )
359{
360 // Subclasses that do not provide their own implementation of
361 // getStreamTime should call this function once per buffer I/O to
362 // provide basic stream time support.
363
364 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
365
366#if defined( HAVE_GETTIMEOFDAY )
367 gettimeofday( &stream_.lastTickTimestamp, NULL );
368#endif
369}
370
371long RtApi :: getStreamLatency( void )
372{
373 verifyStream();
374
375 long totalLatency = 0;
376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
377 totalLatency = stream_.latency[0];
378 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
379 totalLatency += stream_.latency[1];
380
381 return totalLatency;
382}
383
384double RtApi :: getStreamTime( void )
385{
386 verifyStream();
387
388#if defined( HAVE_GETTIMEOFDAY )
389 // Return a very accurate estimate of the stream time by
390 // adding in the elapsed time since the last tick.
391 struct timeval then;
392 struct timeval now;
393
394 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
395 return stream_.streamTime;
396
397 gettimeofday( &now, NULL );
398 then = stream_.lastTickTimestamp;
399 return stream_.streamTime +
400 ((now.tv_sec + 0.000001 * now.tv_usec) -
401 (then.tv_sec + 0.000001 * then.tv_usec));
402#else
403 return stream_.streamTime;
404#endif
405}
406
407void RtApi :: setStreamTime( double time )
408{
409 verifyStream();
410
411 if ( time >= 0.0 )
412 stream_.streamTime = time;
413}
414
415unsigned int RtApi :: getStreamSampleRate( void )
416{
417 verifyStream();
418
419 return stream_.sampleRate;
420}
421
422
423// *************************************************** //
424//
425// OS/API-specific methods.
426//
427// *************************************************** //
428
429#if defined(__MACOSX_CORE__)
430
431// The OS X CoreAudio API is designed to use a separate callback
432// procedure for each of its audio devices. A single RtAudio duplex
433// stream using two different devices is supported here, though it
434// cannot be guaranteed to always behave correctly because we cannot
435// synchronize these two callbacks.
436//
437// A property listener is installed for over/underrun information.
438// However, no functionality is currently provided to allow property
439// listeners to trigger user handlers because it is unclear what could
440// be done if a critical stream parameter (buffer size, sample rate,
441// device disconnect) notification arrived. The listeners entail
442// quite a bit of extra code and most likely, a user program wouldn't
443// be prepared for the result anyway. However, we do provide a flag
444// to the client callback function to inform of an over/underrun.
445
446// A structure to hold various information related to the CoreAudio API
447// implementation.
448struct CoreHandle {
449 AudioDeviceID id[2]; // device ids
450#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
451 AudioDeviceIOProcID procId[2];
452#endif
453 UInt32 iStream[2]; // device stream index (or first if using multiple)
454 UInt32 nStreams[2]; // number of streams to use
455 bool xrun[2];
456 char *deviceBuffer;
457 pthread_cond_t condition;
458 int drainCounter; // Tracks callback counts when draining
459 bool internalDrain; // Indicates if stop is initiated from callback or not.
460
461 CoreHandle()
462 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
463};
464
465RtApiCore:: RtApiCore()
466{
467#if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
468 // This is a largely undocumented but absolutely necessary
469 // requirement starting with OS-X 10.6. If not called, queries and
470 // updates to various audio device properties are not handled
471 // correctly.
472 CFRunLoopRef theRunLoop = NULL;
473 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
474 kAudioObjectPropertyScopeGlobal,
475 kAudioObjectPropertyElementMaster };
476 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
477 if ( result != noErr ) {
478 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
479 error( RtAudioError::WARNING );
480 }
481#endif
482}
483
484RtApiCore :: ~RtApiCore()
485{
486 // The subclass destructor gets called before the base class
487 // destructor, so close an existing stream before deallocating
488 // apiDeviceId memory.
489 if ( stream_.state != STREAM_CLOSED ) closeStream();
490}
491
492unsigned int RtApiCore :: getDeviceCount( void )
493{
494 // Find out how many audio devices there are, if any.
495 UInt32 dataSize;
496 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
500 error( RtAudioError::WARNING );
501 return 0;
502 }
503
504 return dataSize / sizeof( AudioDeviceID );
505}
506
507unsigned int RtApiCore :: getDefaultInputDevice( void )
508{
509 unsigned int nDevices = getDeviceCount();
510 if ( nDevices <= 1 ) return 0;
511
512 AudioDeviceID id;
513 UInt32 dataSize = sizeof( AudioDeviceID );
514 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
515 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
516 if ( result != noErr ) {
517 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
518 error( RtAudioError::WARNING );
519 return 0;
520 }
521
522 dataSize *= nDevices;
523 AudioDeviceID deviceList[ nDevices ];
524 property.mSelector = kAudioHardwarePropertyDevices;
525 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
526 if ( result != noErr ) {
527 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
528 error( RtAudioError::WARNING );
529 return 0;
530 }
531
532 for ( unsigned int i=0; i<nDevices; i++ )
533 if ( id == deviceList[i] ) return i;
534
535 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
536 error( RtAudioError::WARNING );
537 return 0;
538}
539
540unsigned int RtApiCore :: getDefaultOutputDevice( void )
541{
542 unsigned int nDevices = getDeviceCount();
543 if ( nDevices <= 1 ) return 0;
544
545 AudioDeviceID id;
546 UInt32 dataSize = sizeof( AudioDeviceID );
547 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
548 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
549 if ( result != noErr ) {
550 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
551 error( RtAudioError::WARNING );
552 return 0;
553 }
554
555 dataSize = sizeof( AudioDeviceID ) * nDevices;
556 AudioDeviceID deviceList[ nDevices ];
557 property.mSelector = kAudioHardwarePropertyDevices;
558 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
559 if ( result != noErr ) {
560 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
561 error( RtAudioError::WARNING );
562 return 0;
563 }
564
565 for ( unsigned int i=0; i<nDevices; i++ )
566 if ( id == deviceList[i] ) return i;
567
568 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
569 error( RtAudioError::WARNING );
570 return 0;
571}
572
573RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
574{
575 RtAudio::DeviceInfo info;
576 info.probed = false;
577
578 // Get device ID
579 unsigned int nDevices = getDeviceCount();
580 if ( nDevices == 0 ) {
581 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
582 error( RtAudioError::INVALID_USE );
583 return info;
584 }
585
586 if ( device >= nDevices ) {
587 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
588 error( RtAudioError::INVALID_USE );
589 return info;
590 }
591
592 AudioDeviceID deviceList[ nDevices ];
593 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
594 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
595 kAudioObjectPropertyScopeGlobal,
596 kAudioObjectPropertyElementMaster };
597 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
598 0, NULL, &dataSize, (void *) &deviceList );
599 if ( result != noErr ) {
600 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
601 error( RtAudioError::WARNING );
602 return info;
603 }
604
605 AudioDeviceID id = deviceList[ device ];
606
607 // Get the device name.
608 info.name.erase();
609 CFStringRef cfname;
610 dataSize = sizeof( CFStringRef );
611 property.mSelector = kAudioObjectPropertyManufacturer;
612 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
613 if ( result != noErr ) {
614 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
615 errorText_ = errorStream_.str();
616 error( RtAudioError::WARNING );
617 return info;
618 }
619
620 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
621 int length = CFStringGetLength(cfname);
622 char *mname = (char *)malloc(length * 3 + 1);
623#if defined( UNICODE ) || defined( _UNICODE )
624 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
625#else
626 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
627#endif
628 info.name.append( (const char *)mname, strlen(mname) );
629 info.name.append( ": " );
630 CFRelease( cfname );
631 free(mname);
632
633 property.mSelector = kAudioObjectPropertyName;
634 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
635 if ( result != noErr ) {
636 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
637 errorText_ = errorStream_.str();
638 error( RtAudioError::WARNING );
639 return info;
640 }
641
642 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
643 length = CFStringGetLength(cfname);
644 char *name = (char *)malloc(length * 3 + 1);
645#if defined( UNICODE ) || defined( _UNICODE )
646 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
647#else
648 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
649#endif
650 info.name.append( (const char *)name, strlen(name) );
651 CFRelease( cfname );
652 free(name);
653
654 // Get the output stream "configuration".
655 AudioBufferList *bufferList = nil;
656 property.mSelector = kAudioDevicePropertyStreamConfiguration;
657 property.mScope = kAudioDevicePropertyScopeOutput;
658 // property.mElement = kAudioObjectPropertyElementWildcard;
659 dataSize = 0;
660 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
661 if ( result != noErr || dataSize == 0 ) {
662 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
663 errorText_ = errorStream_.str();
664 error( RtAudioError::WARNING );
665 return info;
666 }
667
668 // Allocate the AudioBufferList.
669 bufferList = (AudioBufferList *) malloc( dataSize );
670 if ( bufferList == NULL ) {
671 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
672 error( RtAudioError::WARNING );
673 return info;
674 }
675
676 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
677 if ( result != noErr || dataSize == 0 ) {
678 free( bufferList );
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
680 errorText_ = errorStream_.str();
681 error( RtAudioError::WARNING );
682 return info;
683 }
684
685 // Get output channel information.
686 unsigned int i, nStreams = bufferList->mNumberBuffers;
687 for ( i=0; i<nStreams; i++ )
688 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
689 free( bufferList );
690
691 // Get the input stream "configuration".
692 property.mScope = kAudioDevicePropertyScopeInput;
693 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
694 if ( result != noErr || dataSize == 0 ) {
695 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
696 errorText_ = errorStream_.str();
697 error( RtAudioError::WARNING );
698 return info;
699 }
700
701 // Allocate the AudioBufferList.
702 bufferList = (AudioBufferList *) malloc( dataSize );
703 if ( bufferList == NULL ) {
704 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
705 error( RtAudioError::WARNING );
706 return info;
707 }
708
709 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
710 if (result != noErr || dataSize == 0) {
711 free( bufferList );
712 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
713 errorText_ = errorStream_.str();
714 error( RtAudioError::WARNING );
715 return info;
716 }
717
718 // Get input channel information.
719 nStreams = bufferList->mNumberBuffers;
720 for ( i=0; i<nStreams; i++ )
721 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
722 free( bufferList );
723
724 // If device opens for both playback and capture, we determine the channels.
725 if ( info.outputChannels > 0 && info.inputChannels > 0 )
726 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
727
728 // Probe the device sample rates.
729 bool isInput = false;
730 if ( info.outputChannels == 0 ) isInput = true;
731
732 // Determine the supported sample rates.
733 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
734 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
735 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
736 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
737 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
738 errorText_ = errorStream_.str();
739 error( RtAudioError::WARNING );
740 return info;
741 }
742
743 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
744 AudioValueRange rangeList[ nRanges ];
745 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
746 if ( result != kAudioHardwareNoError ) {
747 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
748 errorText_ = errorStream_.str();
749 error( RtAudioError::WARNING );
750 return info;
751 }
752
753 // The sample rate reporting mechanism is a bit of a mystery. It
754 // seems that it can either return individual rates or a range of
755 // rates. I assume that if the min / max range values are the same,
756 // then that represents a single supported rate and if the min / max
757 // range values are different, the device supports an arbitrary
758 // range of values (though there might be multiple ranges, so we'll
759 // use the most conservative range).
760 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
761 bool haveValueRange = false;
762 info.sampleRates.clear();
763 for ( UInt32 i=0; i<nRanges; i++ ) {
764 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
765 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
766 else {
767 haveValueRange = true;
768 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
769 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
770 }
771 }
772
773 if ( haveValueRange ) {
774 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
775 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
776 info.sampleRates.push_back( SAMPLE_RATES[k] );
777 }
778 }
779
780 // Sort and remove any redundant values
781 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
782 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
783
784 if ( info.sampleRates.size() == 0 ) {
785 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
786 errorText_ = errorStream_.str();
787 error( RtAudioError::WARNING );
788 return info;
789 }
790
791 // CoreAudio always uses 32-bit floating point data for PCM streams.
792 // Thus, any other "physical" formats supported by the device are of
793 // no interest to the client.
794 info.nativeFormats = RTAUDIO_FLOAT32;
795
796 if ( info.outputChannels > 0 )
797 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
798 if ( info.inputChannels > 0 )
799 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
800
801 info.probed = true;
802 return info;
803}
804
805static OSStatus callbackHandler( AudioDeviceID inDevice,
806 const AudioTimeStamp* /*inNow*/,
807 const AudioBufferList* inInputData,
808 const AudioTimeStamp* /*inInputTime*/,
809 AudioBufferList* outOutputData,
810 const AudioTimeStamp* /*inOutputTime*/,
811 void* infoPointer )
812{
813 CallbackInfo *info = (CallbackInfo *) infoPointer;
814
815 RtApiCore *object = (RtApiCore *) info->object;
816 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
817 return kAudioHardwareUnspecifiedError;
818 else
819 return kAudioHardwareNoError;
820}
821
822static OSStatus xrunListener( AudioObjectID /*inDevice*/,
823 UInt32 nAddresses,
824 const AudioObjectPropertyAddress properties[],
825 void* handlePointer )
826{
827 CoreHandle *handle = (CoreHandle *) handlePointer;
828 for ( UInt32 i=0; i<nAddresses; i++ ) {
829 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
830 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
831 handle->xrun[1] = true;
832 else
833 handle->xrun[0] = true;
834 }
835 }
836
837 return kAudioHardwareNoError;
838}
839
840static OSStatus rateListener( AudioObjectID inDevice,
841 UInt32 /*nAddresses*/,
842 const AudioObjectPropertyAddress /*properties*/[],
843 void* ratePointer )
844{
845 Float64 *rate = (Float64 *) ratePointer;
846 UInt32 dataSize = sizeof( Float64 );
847 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
848 kAudioObjectPropertyScopeGlobal,
849 kAudioObjectPropertyElementMaster };
850 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
851 return kAudioHardwareNoError;
852}
853
854bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
855 unsigned int firstChannel, unsigned int sampleRate,
856 RtAudioFormat format, unsigned int *bufferSize,
857 RtAudio::StreamOptions *options )
858{
859 // Get device ID
860 unsigned int nDevices = getDeviceCount();
861 if ( nDevices == 0 ) {
862 // This should not happen because a check is made before this function is called.
863 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
864 return FAILURE;
865 }
866
867 if ( device >= nDevices ) {
868 // This should not happen because a check is made before this function is called.
869 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
870 return FAILURE;
871 }
872
873 AudioDeviceID deviceList[ nDevices ];
874 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
875 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
876 kAudioObjectPropertyScopeGlobal,
877 kAudioObjectPropertyElementMaster };
878 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
879 0, NULL, &dataSize, (void *) &deviceList );
880 if ( result != noErr ) {
881 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
882 return FAILURE;
883 }
884
885 AudioDeviceID id = deviceList[ device ];
886
887 // Setup for stream mode.
888 bool isInput = false;
889 if ( mode == INPUT ) {
890 isInput = true;
891 property.mScope = kAudioDevicePropertyScopeInput;
892 }
893 else
894 property.mScope = kAudioDevicePropertyScopeOutput;
895
896 // Get the stream "configuration".
897 AudioBufferList *bufferList = nil;
898 dataSize = 0;
899 property.mSelector = kAudioDevicePropertyStreamConfiguration;
900 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
901 if ( result != noErr || dataSize == 0 ) {
902 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
903 errorText_ = errorStream_.str();
904 return FAILURE;
905 }
906
907 // Allocate the AudioBufferList.
908 bufferList = (AudioBufferList *) malloc( dataSize );
909 if ( bufferList == NULL ) {
910 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
911 return FAILURE;
912 }
913
914 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
915 if (result != noErr || dataSize == 0) {
916 free( bufferList );
917 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
918 errorText_ = errorStream_.str();
919 return FAILURE;
920 }
921
922 // Search for one or more streams that contain the desired number of
923 // channels. CoreAudio devices can have an arbitrary number of
924 // streams and each stream can have an arbitrary number of channels.
925 // For each stream, a single buffer of interleaved samples is
926 // provided. RtAudio prefers the use of one stream of interleaved
927 // data or multiple consecutive single-channel streams. However, we
928 // now support multiple consecutive multi-channel streams of
929 // interleaved data as well.
930 UInt32 iStream, offsetCounter = firstChannel;
931 UInt32 nStreams = bufferList->mNumberBuffers;
932 bool monoMode = false;
933 bool foundStream = false;
934
935 // First check that the device supports the requested number of
936 // channels.
937 UInt32 deviceChannels = 0;
938 for ( iStream=0; iStream<nStreams; iStream++ )
939 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
940
941 if ( deviceChannels < ( channels + firstChannel ) ) {
942 free( bufferList );
943 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
944 errorText_ = errorStream_.str();
945 return FAILURE;
946 }
947
948 // Look for a single stream meeting our needs.
949 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
950 for ( iStream=0; iStream<nStreams; iStream++ ) {
951 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
952 if ( streamChannels >= channels + offsetCounter ) {
953 firstStream = iStream;
954 channelOffset = offsetCounter;
955 foundStream = true;
956 break;
957 }
958 if ( streamChannels > offsetCounter ) break;
959 offsetCounter -= streamChannels;
960 }
961
962 // If we didn't find a single stream above, then we should be able
963 // to meet the channel specification with multiple streams.
964 if ( foundStream == false ) {
965 monoMode = true;
966 offsetCounter = firstChannel;
967 for ( iStream=0; iStream<nStreams; iStream++ ) {
968 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
969 if ( streamChannels > offsetCounter ) break;
970 offsetCounter -= streamChannels;
971 }
972
973 firstStream = iStream;
974 channelOffset = offsetCounter;
975 Int32 channelCounter = channels + offsetCounter - streamChannels;
976
977 if ( streamChannels > 1 ) monoMode = false;
978 while ( channelCounter > 0 ) {
979 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
980 if ( streamChannels > 1 ) monoMode = false;
981 channelCounter -= streamChannels;
982 streamCount++;
983 }
984 }
985
986 free( bufferList );
987
988 // Determine the buffer size.
989 AudioValueRange bufferRange;
990 dataSize = sizeof( AudioValueRange );
991 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
992 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
993
994 if ( result != noErr ) {
995 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
996 errorText_ = errorStream_.str();
997 return FAILURE;
998 }
999
1000 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1001 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1002 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1003
1004 // Set the buffer size. For multiple streams, I'm assuming we only
1005 // need to make this setting for the master channel.
1006 UInt32 theSize = (UInt32) *bufferSize;
1007 dataSize = sizeof( UInt32 );
1008 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1009 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1010
1011 if ( result != noErr ) {
1012 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1013 errorText_ = errorStream_.str();
1014 return FAILURE;
1015 }
1016
1017 // If attempting to setup a duplex stream, the bufferSize parameter
1018 // MUST be the same in both directions!
1019 *bufferSize = theSize;
1020 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1022 errorText_ = errorStream_.str();
1023 return FAILURE;
1024 }
1025
1026 stream_.bufferSize = *bufferSize;
1027 stream_.nBuffers = 1;
1028
1029 // Try to set "hog" mode ... it's not clear to me this is working.
1030 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1031 pid_t hog_pid;
1032 dataSize = sizeof( hog_pid );
1033 property.mSelector = kAudioDevicePropertyHogMode;
1034 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1035 if ( result != noErr ) {
1036 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1037 errorText_ = errorStream_.str();
1038 return FAILURE;
1039 }
1040
1041 if ( hog_pid != getpid() ) {
1042 hog_pid = getpid();
1043 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1044 if ( result != noErr ) {
1045 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1046 errorText_ = errorStream_.str();
1047 return FAILURE;
1048 }
1049 }
1050 }
1051
1052 // Check and if necessary, change the sample rate for the device.
1053 Float64 nominalRate;
1054 dataSize = sizeof( Float64 );
1055 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1056 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1057 if ( result != noErr ) {
1058 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1059 errorText_ = errorStream_.str();
1060 return FAILURE;
1061 }
1062
1063 // Only change the sample rate if off by more than 1 Hz.
1064 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1065
1066 // Set a property listener for the sample rate change
1067 Float64 reportedRate = 0.0;
1068 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1069 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1070 if ( result != noErr ) {
1071 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1072 errorText_ = errorStream_.str();
1073 return FAILURE;
1074 }
1075
1076 nominalRate = (Float64) sampleRate;
1077 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1078 if ( result != noErr ) {
1079 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1080 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1081 errorText_ = errorStream_.str();
1082 return FAILURE;
1083 }
1084
1085 // Now wait until the reported nominal rate is what we just set.
1086 UInt32 microCounter = 0;
1087 while ( reportedRate != nominalRate ) {
1088 microCounter += 5000;
1089 if ( microCounter > 5000000 ) break;
1090 usleep( 5000 );
1091 }
1092
1093 // Remove the property listener.
1094 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1095
1096 if ( microCounter > 5000000 ) {
1097 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1098 errorText_ = errorStream_.str();
1099 return FAILURE;
1100 }
1101 }
1102
1103 // Now set the stream format for all streams. Also, check the
1104 // physical format of the device and change that if necessary.
1105 AudioStreamBasicDescription description;
1106 dataSize = sizeof( AudioStreamBasicDescription );
1107 property.mSelector = kAudioStreamPropertyVirtualFormat;
1108 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1109 if ( result != noErr ) {
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1112 return FAILURE;
1113 }
1114
1115 // Set the sample rate and data format id. However, only make the
1116 // change if the sample rate is not within 1.0 of the desired
1117 // rate and the format is not linear pcm.
1118 bool updateFormat = false;
1119 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1120 description.mSampleRate = (Float64) sampleRate;
1121 updateFormat = true;
1122 }
1123
1124 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1125 description.mFormatID = kAudioFormatLinearPCM;
1126 updateFormat = true;
1127 }
1128
1129 if ( updateFormat ) {
1130 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1131 if ( result != noErr ) {
1132 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1133 errorText_ = errorStream_.str();
1134 return FAILURE;
1135 }
1136 }
1137
1138 // Now check the physical format.
1139 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1140 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1141 if ( result != noErr ) {
1142 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1143 errorText_ = errorStream_.str();
1144 return FAILURE;
1145 }
1146
1147 //std::cout << "Current physical stream format:" << std::endl;
1148 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1149 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1150 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1151 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1152
1153 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1154 description.mFormatID = kAudioFormatLinearPCM;
1155 //description.mSampleRate = (Float64) sampleRate;
1156 AudioStreamBasicDescription testDescription = description;
1157 UInt32 formatFlags;
1158
1159 // We'll try higher bit rates first and then work our way down.
1160 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1161 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1162 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1163 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1164 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1165 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1166 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1167 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1168 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1169 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1170 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1171 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1172 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1173
1174 bool setPhysicalFormat = false;
1175 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1176 testDescription = description;
1177 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1178 testDescription.mFormatFlags = physicalFormats[i].second;
1179 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1180 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1181 else
1182 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1183 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1184 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1185 if ( result == noErr ) {
1186 setPhysicalFormat = true;
1187 //std::cout << "Updated physical stream format:" << std::endl;
1188 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1189 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1190 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1191 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1192 break;
1193 }
1194 }
1195
1196 if ( !setPhysicalFormat ) {
1197 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1198 errorText_ = errorStream_.str();
1199 return FAILURE;
1200 }
1201 } // done setting virtual/physical formats.
1202
1203 // Get the stream / device latency.
1204 UInt32 latency;
1205 dataSize = sizeof( UInt32 );
1206 property.mSelector = kAudioDevicePropertyLatency;
1207 if ( AudioObjectHasProperty( id, &property ) == true ) {
1208 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1209 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1210 else {
1211 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1212 errorText_ = errorStream_.str();
1213 error( RtAudioError::WARNING );
1214 }
1215 }
1216
1217 // Byte-swapping: According to AudioHardware.h, the stream data will
1218 // always be presented in native-endian format, so we should never
1219 // need to byte swap.
1220 stream_.doByteSwap[mode] = false;
1221
1222 // From the CoreAudio documentation, PCM data must be supplied as
1223 // 32-bit floats.
1224 stream_.userFormat = format;
1225 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1226
1227 if ( streamCount == 1 )
1228 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1229 else // multiple streams
1230 stream_.nDeviceChannels[mode] = channels;
1231 stream_.nUserChannels[mode] = channels;
1232 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1233 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1234 else stream_.userInterleaved = true;
1235 stream_.deviceInterleaved[mode] = true;
1236 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1237
1238 // Set flags for buffer conversion.
1239 stream_.doConvertBuffer[mode] = false;
1240 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1241 stream_.doConvertBuffer[mode] = true;
1242 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1243 stream_.doConvertBuffer[mode] = true;
1244 if ( streamCount == 1 ) {
1245 if ( stream_.nUserChannels[mode] > 1 &&
1246 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1247 stream_.doConvertBuffer[mode] = true;
1248 }
1249 else if ( monoMode && stream_.userInterleaved )
1250 stream_.doConvertBuffer[mode] = true;
1251
1252 // Allocate our CoreHandle structure for the stream.
1253 CoreHandle *handle = 0;
1254 if ( stream_.apiHandle == 0 ) {
1255 try {
1256 handle = new CoreHandle;
1257 }
1258 catch ( std::bad_alloc& ) {
1259 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1260 goto error;
1261 }
1262
1263 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1264 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1265 goto error;
1266 }
1267 stream_.apiHandle = (void *) handle;
1268 }
1269 else
1270 handle = (CoreHandle *) stream_.apiHandle;
1271 handle->iStream[mode] = firstStream;
1272 handle->nStreams[mode] = streamCount;
1273 handle->id[mode] = id;
1274
1275 // Allocate necessary internal buffers.
1276 unsigned long bufferBytes;
1277 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1278 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1279 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1280 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1281 if ( stream_.userBuffer[mode] == NULL ) {
1282 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1283 goto error;
1284 }
1285
1286 // If possible, we will make use of the CoreAudio stream buffers as
1287 // "device buffers". However, we can't do this if using multiple
1288 // streams.
1289 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1290
1291 bool makeBuffer = true;
1292 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1293 if ( mode == INPUT ) {
1294 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1295 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1296 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1297 }
1298 }
1299
1300 if ( makeBuffer ) {
1301 bufferBytes *= *bufferSize;
1302 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1303 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1304 if ( stream_.deviceBuffer == NULL ) {
1305 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1306 goto error;
1307 }
1308 }
1309 }
1310
1311 stream_.sampleRate = sampleRate;
1312 stream_.device[mode] = device;
1313 stream_.state = STREAM_STOPPED;
1314 stream_.callbackInfo.object = (void *) this;
1315
1316 // Setup the buffer conversion information structure.
1317 if ( stream_.doConvertBuffer[mode] ) {
1318 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1319 else setConvertInfo( mode, channelOffset );
1320 }
1321
1322 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1323 // Only one callback procedure per device.
1324 stream_.mode = DUPLEX;
1325 else {
1326#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1327 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1328#else
1329 // deprecated in favor of AudioDeviceCreateIOProcID()
1330 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1331#endif
1332 if ( result != noErr ) {
1333 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1334 errorText_ = errorStream_.str();
1335 goto error;
1336 }
1337 if ( stream_.mode == OUTPUT && mode == INPUT )
1338 stream_.mode = DUPLEX;
1339 else
1340 stream_.mode = mode;
1341 }
1342
1343 // Setup the device property listener for over/underload.
1344 property.mSelector = kAudioDeviceProcessorOverload;
1345 property.mScope = kAudioObjectPropertyScopeGlobal;
1346 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1347
1348 return SUCCESS;
1349
1350 error:
1351 if ( handle ) {
1352 pthread_cond_destroy( &handle->condition );
1353 delete handle;
1354 stream_.apiHandle = 0;
1355 }
1356
1357 for ( int i=0; i<2; i++ ) {
1358 if ( stream_.userBuffer[i] ) {
1359 free( stream_.userBuffer[i] );
1360 stream_.userBuffer[i] = 0;
1361 }
1362 }
1363
1364 if ( stream_.deviceBuffer ) {
1365 free( stream_.deviceBuffer );
1366 stream_.deviceBuffer = 0;
1367 }
1368
1369 stream_.state = STREAM_CLOSED;
1370 return FAILURE;
1371}
1372
1373void RtApiCore :: closeStream( void )
1374{
1375 if ( stream_.state == STREAM_CLOSED ) {
1376 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1377 error( RtAudioError::WARNING );
1378 return;
1379 }
1380
1381 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1382 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1383 if ( stream_.state == STREAM_RUNNING )
1384 AudioDeviceStop( handle->id[0], callbackHandler );
1385#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1386 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1387#else
1388 // deprecated in favor of AudioDeviceDestroyIOProcID()
1389 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1390#endif
1391 }
1392
1393 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1394 if ( stream_.state == STREAM_RUNNING )
1395 AudioDeviceStop( handle->id[1], callbackHandler );
1396#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1397 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1398#else
1399 // deprecated in favor of AudioDeviceDestroyIOProcID()
1400 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1401#endif
1402 }
1403
1404 for ( int i=0; i<2; i++ ) {
1405 if ( stream_.userBuffer[i] ) {
1406 free( stream_.userBuffer[i] );
1407 stream_.userBuffer[i] = 0;
1408 }
1409 }
1410
1411 if ( stream_.deviceBuffer ) {
1412 free( stream_.deviceBuffer );
1413 stream_.deviceBuffer = 0;
1414 }
1415
1416 // Destroy pthread condition variable.
1417 pthread_cond_destroy( &handle->condition );
1418 delete handle;
1419 stream_.apiHandle = 0;
1420
1421 stream_.mode = UNINITIALIZED;
1422 stream_.state = STREAM_CLOSED;
1423}
1424
1425void RtApiCore :: startStream( void )
1426{
1427 verifyStream();
1428 if ( stream_.state == STREAM_RUNNING ) {
1429 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1430 error( RtAudioError::WARNING );
1431 return;
1432 }
1433
1434 OSStatus result = noErr;
1435 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1436 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1437
1438 result = AudioDeviceStart( handle->id[0], callbackHandler );
1439 if ( result != noErr ) {
1440 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1441 errorText_ = errorStream_.str();
1442 goto unlock;
1443 }
1444 }
1445
1446 if ( stream_.mode == INPUT ||
1447 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1448
1449 result = AudioDeviceStart( handle->id[1], callbackHandler );
1450 if ( result != noErr ) {
1451 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1452 errorText_ = errorStream_.str();
1453 goto unlock;
1454 }
1455 }
1456
1457 handle->drainCounter = 0;
1458 handle->internalDrain = false;
1459 stream_.state = STREAM_RUNNING;
1460
1461 unlock:
1462 if ( result == noErr ) return;
1463 error( RtAudioError::SYSTEM_ERROR );
1464}
1465
1466void RtApiCore :: stopStream( void )
1467{
1468 verifyStream();
1469 if ( stream_.state == STREAM_STOPPED ) {
1470 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1471 error( RtAudioError::WARNING );
1472 return;
1473 }
1474
1475 OSStatus result = noErr;
1476 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1477 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1478
1479 if ( handle->drainCounter == 0 ) {
1480 handle->drainCounter = 2;
1481 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1482 }
1483
1484 result = AudioDeviceStop( handle->id[0], callbackHandler );
1485 if ( result != noErr ) {
1486 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1487 errorText_ = errorStream_.str();
1488 goto unlock;
1489 }
1490 }
1491
1492 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1493
1494 result = AudioDeviceStop( handle->id[1], callbackHandler );
1495 if ( result != noErr ) {
1496 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1497 errorText_ = errorStream_.str();
1498 goto unlock;
1499 }
1500 }
1501
1502 stream_.state = STREAM_STOPPED;
1503
1504 unlock:
1505 if ( result == noErr ) return;
1506 error( RtAudioError::SYSTEM_ERROR );
1507}
1508
1509void RtApiCore :: abortStream( void )
1510{
1511 verifyStream();
1512 if ( stream_.state == STREAM_STOPPED ) {
1513 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1514 error( RtAudioError::WARNING );
1515 return;
1516 }
1517
1518 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1519 handle->drainCounter = 2;
1520
1521 stopStream();
1522}
1523
1524// This function will be called by a spawned thread when the user
1525// callback function signals that the stream should be stopped or
1526// aborted. It is better to handle it this way because the
1527// callbackEvent() function probably should return before the AudioDeviceStop()
1528// function is called.
1529static void *coreStopStream( void *ptr )
1530{
1531 CallbackInfo *info = (CallbackInfo *) ptr;
1532 RtApiCore *object = (RtApiCore *) info->object;
1533
1534 object->stopStream();
1535 pthread_exit( NULL );
1536}
1537
1538bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1539 const AudioBufferList *inBufferList,
1540 const AudioBufferList *outBufferList )
1541{
1542 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1543 if ( stream_.state == STREAM_CLOSED ) {
1544 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1545 error( RtAudioError::WARNING );
1546 return FAILURE;
1547 }
1548
1549 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1550 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1551
1552 // Check if we were draining the stream and signal is finished.
1553 if ( handle->drainCounter > 3 ) {
1554 ThreadHandle threadId;
1555
1556 stream_.state = STREAM_STOPPING;
1557 if ( handle->internalDrain == true )
1558 pthread_create( &threadId, NULL, coreStopStream, info );
1559 else // external call to stopStream()
1560 pthread_cond_signal( &handle->condition );
1561 return SUCCESS;
1562 }
1563
1564 AudioDeviceID outputDevice = handle->id[0];
1565
1566 // Invoke user callback to get fresh output data UNLESS we are
1567 // draining stream or duplex mode AND the input/output devices are
1568 // different AND this function is called for the input device.
1569 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1570 RtAudioCallback callback = (RtAudioCallback) info->callback;
1571 double streamTime = getStreamTime();
1572 RtAudioStreamStatus status = 0;
1573 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1574 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1575 handle->xrun[0] = false;
1576 }
1577 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1578 status |= RTAUDIO_INPUT_OVERFLOW;
1579 handle->xrun[1] = false;
1580 }
1581
1582 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1583 stream_.bufferSize, streamTime, status, info->userData );
1584 if ( cbReturnValue == 2 ) {
1585 stream_.state = STREAM_STOPPING;
1586 handle->drainCounter = 2;
1587 abortStream();
1588 return SUCCESS;
1589 }
1590 else if ( cbReturnValue == 1 ) {
1591 handle->drainCounter = 1;
1592 handle->internalDrain = true;
1593 }
1594 }
1595
1596 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1597
1598 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1599
1600 if ( handle->nStreams[0] == 1 ) {
1601 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1602 0,
1603 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1604 }
1605 else { // fill multiple streams with zeros
1606 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1607 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1608 0,
1609 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1610 }
1611 }
1612 }
1613 else if ( handle->nStreams[0] == 1 ) {
1614 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1615 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1616 stream_.userBuffer[0], stream_.convertInfo[0] );
1617 }
1618 else { // copy from user buffer
1619 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1620 stream_.userBuffer[0],
1621 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1622 }
1623 }
1624 else { // fill multiple streams
1625 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1626 if ( stream_.doConvertBuffer[0] ) {
1627 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1628 inBuffer = (Float32 *) stream_.deviceBuffer;
1629 }
1630
1631 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1632 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1633 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1634 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1635 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1636 }
1637 }
1638 else { // fill multiple multi-channel streams with interleaved data
1639 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1640 Float32 *out, *in;
1641
1642 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1643 UInt32 inChannels = stream_.nUserChannels[0];
1644 if ( stream_.doConvertBuffer[0] ) {
1645 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1646 inChannels = stream_.nDeviceChannels[0];
1647 }
1648
1649 if ( inInterleaved ) inOffset = 1;
1650 else inOffset = stream_.bufferSize;
1651
1652 channelsLeft = inChannels;
1653 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1654 in = inBuffer;
1655 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1656 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1657
1658 outJump = 0;
1659 // Account for possible channel offset in first stream
1660 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1661 streamChannels -= stream_.channelOffset[0];
1662 outJump = stream_.channelOffset[0];
1663 out += outJump;
1664 }
1665
1666 // Account for possible unfilled channels at end of the last stream
1667 if ( streamChannels > channelsLeft ) {
1668 outJump = streamChannels - channelsLeft;
1669 streamChannels = channelsLeft;
1670 }
1671
1672 // Determine input buffer offsets and skips
1673 if ( inInterleaved ) {
1674 inJump = inChannels;
1675 in += inChannels - channelsLeft;
1676 }
1677 else {
1678 inJump = 1;
1679 in += (inChannels - channelsLeft) * inOffset;
1680 }
1681
1682 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1683 for ( unsigned int j=0; j<streamChannels; j++ ) {
1684 *out++ = in[j*inOffset];
1685 }
1686 out += outJump;
1687 in += inJump;
1688 }
1689 channelsLeft -= streamChannels;
1690 }
1691 }
1692 }
1693 }
1694
1695 // Don't bother draining input
1696 if ( handle->drainCounter ) {
1697 handle->drainCounter++;
1698 goto unlock;
1699 }
1700
1701 AudioDeviceID inputDevice;
1702 inputDevice = handle->id[1];
1703 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1704
1705 if ( handle->nStreams[1] == 1 ) {
1706 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1707 convertBuffer( stream_.userBuffer[1],
1708 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1709 stream_.convertInfo[1] );
1710 }
1711 else { // copy to user buffer
1712 memcpy( stream_.userBuffer[1],
1713 inBufferList->mBuffers[handle->iStream[1]].mData,
1714 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1715 }
1716 }
1717 else { // read from multiple streams
1718 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1719 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1720
1721 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1722 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1723 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1724 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1725 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1726 }
1727 }
1728 else { // read from multiple multi-channel streams
1729 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1730 Float32 *out, *in;
1731
1732 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1733 UInt32 outChannels = stream_.nUserChannels[1];
1734 if ( stream_.doConvertBuffer[1] ) {
1735 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1736 outChannels = stream_.nDeviceChannels[1];
1737 }
1738
1739 if ( outInterleaved ) outOffset = 1;
1740 else outOffset = stream_.bufferSize;
1741
1742 channelsLeft = outChannels;
1743 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1744 out = outBuffer;
1745 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1746 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1747
1748 inJump = 0;
1749 // Account for possible channel offset in first stream
1750 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1751 streamChannels -= stream_.channelOffset[1];
1752 inJump = stream_.channelOffset[1];
1753 in += inJump;
1754 }
1755
1756 // Account for possible unread channels at end of the last stream
1757 if ( streamChannels > channelsLeft ) {
1758 inJump = streamChannels - channelsLeft;
1759 streamChannels = channelsLeft;
1760 }
1761
1762 // Determine output buffer offsets and skips
1763 if ( outInterleaved ) {
1764 outJump = outChannels;
1765 out += outChannels - channelsLeft;
1766 }
1767 else {
1768 outJump = 1;
1769 out += (outChannels - channelsLeft) * outOffset;
1770 }
1771
1772 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1773 for ( unsigned int j=0; j<streamChannels; j++ ) {
1774 out[j*outOffset] = *in++;
1775 }
1776 out += outJump;
1777 in += inJump;
1778 }
1779 channelsLeft -= streamChannels;
1780 }
1781 }
1782
1783 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1784 convertBuffer( stream_.userBuffer[1],
1785 stream_.deviceBuffer,
1786 stream_.convertInfo[1] );
1787 }
1788 }
1789 }
1790
1791 unlock:
1792 //MUTEX_UNLOCK( &stream_.mutex );
1793
1794 RtApi::tickStreamTime();
1795 return SUCCESS;
1796}
1797
1798const char* RtApiCore :: getErrorCode( OSStatus code )
1799{
1800 switch( code ) {
1801
1802 case kAudioHardwareNotRunningError:
1803 return "kAudioHardwareNotRunningError";
1804
1805 case kAudioHardwareUnspecifiedError:
1806 return "kAudioHardwareUnspecifiedError";
1807
1808 case kAudioHardwareUnknownPropertyError:
1809 return "kAudioHardwareUnknownPropertyError";
1810
1811 case kAudioHardwareBadPropertySizeError:
1812 return "kAudioHardwareBadPropertySizeError";
1813
1814 case kAudioHardwareIllegalOperationError:
1815 return "kAudioHardwareIllegalOperationError";
1816
1817 case kAudioHardwareBadObjectError:
1818 return "kAudioHardwareBadObjectError";
1819
1820 case kAudioHardwareBadDeviceError:
1821 return "kAudioHardwareBadDeviceError";
1822
1823 case kAudioHardwareBadStreamError:
1824 return "kAudioHardwareBadStreamError";
1825
1826 case kAudioHardwareUnsupportedOperationError:
1827 return "kAudioHardwareUnsupportedOperationError";
1828
1829 case kAudioDeviceUnsupportedFormatError:
1830 return "kAudioDeviceUnsupportedFormatError";
1831
1832 case kAudioDevicePermissionsError:
1833 return "kAudioDevicePermissionsError";
1834
1835 default:
1836 return "CoreAudio unknown error";
1837 }
1838}
1839
1840 //******************** End of __MACOSX_CORE__ *********************//
1841#endif
1842
1843#if defined(__UNIX_JACK__)
1844
1845// JACK is a low-latency audio server, originally written for the
1846// GNU/Linux operating system and now also ported to OS-X. It can
1847// connect a number of different applications to an audio device, as
1848// well as allowing them to share audio between themselves.
1849//
1850// When using JACK with RtAudio, "devices" refer to JACK clients that
1851// have ports connected to the server. The JACK server is typically
1852// started in a terminal as follows:
1853//
1854// .jackd -d alsa -d hw:0
1855//
1856// or through an interface program such as qjackctl. Many of the
1857// parameters normally set for a stream are fixed by the JACK server
1858// and can be specified when the JACK server is started. In
1859// particular,
1860//
1861// .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1862//
1863// specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1864// frames, and number of buffers = 4. Once the server is running, it
1865// is not possible to override these values. If the values are not
1866// specified in the command-line, the JACK server uses default values.
1867//
1868// The JACK server does not have to be running when an instance of
1869// RtApiJack is created, though the function getDeviceCount() will
1870// report 0 devices found until JACK has been started. When no
1871// devices are available (i.e., the JACK server is not running), a
1872// stream cannot be opened.
1873
1874#include <jack/jack.h>
1875#include <unistd.h>
1876#include <cstdio>
1877
1878// A structure to hold various information related to the Jack API
1879// implementation.
1880struct JackHandle {
1881 jack_client_t *client;
1882 jack_port_t **ports[2];
1883 std::string deviceName[2];
1884 bool xrun[2];
1885 pthread_cond_t condition;
1886 int drainCounter; // Tracks callback counts when draining
1887 bool internalDrain; // Indicates if stop is initiated from callback or not.
1888
1889 JackHandle()
1890 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1891};
1892
1893static void jackSilentError( const char * ) {};
1894
1895RtApiJack :: RtApiJack()
1896{
1897 // Nothing to do here.
1898#if !defined(__RTAUDIO_DEBUG__)
1899 // Turn off Jack's internal error reporting.
1900 jack_set_error_function( &jackSilentError );
1901#endif
1902}
1903
1904RtApiJack :: ~RtApiJack()
1905{
1906 if ( stream_.state != STREAM_CLOSED ) closeStream();
1907}
1908
1909unsigned int RtApiJack :: getDeviceCount( void )
1910{
1911 // See if we can become a jack client.
1912 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1913 jack_status_t *status = NULL;
1914 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1915 if ( client == 0 ) return 0;
1916
1917 const char **ports;
1918 std::string port, previousPort;
1919 unsigned int nChannels = 0, nDevices = 0;
1920 ports = jack_get_ports( client, NULL, NULL, 0 );
1921 if ( ports ) {
1922 // Parse the port names up to the first colon (:).
1923 size_t iColon = 0;
1924 do {
1925 port = (char *) ports[ nChannels ];
1926 iColon = port.find(":");
1927 if ( iColon != std::string::npos ) {
1928 port = port.substr( 0, iColon + 1 );
1929 if ( port != previousPort ) {
1930 nDevices++;
1931 previousPort = port;
1932 }
1933 }
1934 } while ( ports[++nChannels] );
1935 free( ports );
1936 }
1937
1938 jack_client_close( client );
1939 return nDevices;
1940}
1941
1942RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
1943{
1944 RtAudio::DeviceInfo info;
1945 info.probed = false;
1946
1947 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
1948 jack_status_t *status = NULL;
1949 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
1950 if ( client == 0 ) {
1951 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
1952 error( RtAudioError::WARNING );
1953 return info;
1954 }
1955
1956 const char **ports;
1957 std::string port, previousPort;
1958 unsigned int nPorts = 0, nDevices = 0;
1959 ports = jack_get_ports( client, NULL, NULL, 0 );
1960 if ( ports ) {
1961 // Parse the port names up to the first colon (:).
1962 size_t iColon = 0;
1963 do {
1964 port = (char *) ports[ nPorts ];
1965 iColon = port.find(":");
1966 if ( iColon != std::string::npos ) {
1967 port = port.substr( 0, iColon );
1968 if ( port != previousPort ) {
1969 if ( nDevices == device ) info.name = port;
1970 nDevices++;
1971 previousPort = port;
1972 }
1973 }
1974 } while ( ports[++nPorts] );
1975 free( ports );
1976 }
1977
1978 if ( device >= nDevices ) {
1979 jack_client_close( client );
1980 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
1981 error( RtAudioError::INVALID_USE );
1982 return info;
1983 }
1984
1985 // Get the current jack server sample rate.
1986 info.sampleRates.clear();
1987 info.sampleRates.push_back( jack_get_sample_rate( client ) );
1988
1989 // Count the available ports containing the client name as device
1990 // channels. Jack "input ports" equal RtAudio output channels.
1991 unsigned int nChannels = 0;
1992 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
1993 if ( ports ) {
1994 while ( ports[ nChannels ] ) nChannels++;
1995 free( ports );
1996 info.outputChannels = nChannels;
1997 }
1998
1999 // Jack "output ports" equal RtAudio input channels.
2000 nChannels = 0;
2001 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2002 if ( ports ) {
2003 while ( ports[ nChannels ] ) nChannels++;
2004 free( ports );
2005 info.inputChannels = nChannels;
2006 }
2007
2008 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2009 jack_client_close(client);
2010 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2011 error( RtAudioError::WARNING );
2012 return info;
2013 }
2014
2015 // If device opens for both playback and capture, we determine the channels.
2016 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2017 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2018
2019 // Jack always uses 32-bit floats.
2020 info.nativeFormats = RTAUDIO_FLOAT32;
2021
2022 // Jack doesn't provide default devices so we'll use the first available one.
2023 if ( device == 0 && info.outputChannels > 0 )
2024 info.isDefaultOutput = true;
2025 if ( device == 0 && info.inputChannels > 0 )
2026 info.isDefaultInput = true;
2027
2028 jack_client_close(client);
2029 info.probed = true;
2030 return info;
2031}
2032
2033static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2034{
2035 CallbackInfo *info = (CallbackInfo *) infoPointer;
2036
2037 RtApiJack *object = (RtApiJack *) info->object;
2038 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2039
2040 return 0;
2041}
2042
2043// This function will be called by a spawned thread when the Jack
2044// server signals that it is shutting down. It is necessary to handle
2045// it this way because the jackShutdown() function must return before
2046// the jack_deactivate() function (in closeStream()) will return.
2047static void *jackCloseStream( void *ptr )
2048{
2049 CallbackInfo *info = (CallbackInfo *) ptr;
2050 RtApiJack *object = (RtApiJack *) info->object;
2051
2052 object->closeStream();
2053
2054 pthread_exit( NULL );
2055}
2056static void jackShutdown( void *infoPointer )
2057{
2058 CallbackInfo *info = (CallbackInfo *) infoPointer;
2059 RtApiJack *object = (RtApiJack *) info->object;
2060
2061 // Check current stream state. If stopped, then we'll assume this
2062 // was called as a result of a call to RtApiJack::stopStream (the
2063 // deactivation of a client handle causes this function to be called).
2064 // If not, we'll assume the Jack server is shutting down or some
2065 // other problem occurred and we should close the stream.
2066 if ( object->isStreamRunning() == false ) return;
2067
2068 ThreadHandle threadId;
2069 pthread_create( &threadId, NULL, jackCloseStream, info );
2070 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2071}
2072
2073static int jackXrun( void *infoPointer )
2074{
2075 JackHandle *handle = (JackHandle *) infoPointer;
2076
2077 if ( handle->ports[0] ) handle->xrun[0] = true;
2078 if ( handle->ports[1] ) handle->xrun[1] = true;
2079
2080 return 0;
2081}
2082
2083bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2084 unsigned int firstChannel, unsigned int sampleRate,
2085 RtAudioFormat format, unsigned int *bufferSize,
2086 RtAudio::StreamOptions *options )
2087{
2088 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2089
2090 // Look for jack server and try to become a client (only do once per stream).
2091 jack_client_t *client = 0;
2092 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2093 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2094 jack_status_t *status = NULL;
2095 if ( options && !options->streamName.empty() )
2096 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2097 else
2098 client = jack_client_open( "RtApiJack", jackoptions, status );
2099 if ( client == 0 ) {
2100 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2101 error( RtAudioError::WARNING );
2102 return FAILURE;
2103 }
2104 }
2105 else {
2106 // The handle must have been created on an earlier pass.
2107 client = handle->client;
2108 }
2109
2110 const char **ports;
2111 std::string port, previousPort, deviceName;
2112 unsigned int nPorts = 0, nDevices = 0;
2113 ports = jack_get_ports( client, NULL, NULL, 0 );
2114 if ( ports ) {
2115 // Parse the port names up to the first colon (:).
2116 size_t iColon = 0;
2117 do {
2118 port = (char *) ports[ nPorts ];
2119 iColon = port.find(":");
2120 if ( iColon != std::string::npos ) {
2121 port = port.substr( 0, iColon );
2122 if ( port != previousPort ) {
2123 if ( nDevices == device ) deviceName = port;
2124 nDevices++;
2125 previousPort = port;
2126 }
2127 }
2128 } while ( ports[++nPorts] );
2129 free( ports );
2130 }
2131
2132 if ( device >= nDevices ) {
2133 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2134 return FAILURE;
2135 }
2136
2137 // Count the available ports containing the client name as device
2138 // channels. Jack "input ports" equal RtAudio output channels.
2139 unsigned int nChannels = 0;
2140 unsigned long flag = JackPortIsInput;
2141 if ( mode == INPUT ) flag = JackPortIsOutput;
2142 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2143 if ( ports ) {
2144 while ( ports[ nChannels ] ) nChannels++;
2145 free( ports );
2146 }
2147
2148 // Compare the jack ports for specified client to the requested number of channels.
2149 if ( nChannels < (channels + firstChannel) ) {
2150 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2151 errorText_ = errorStream_.str();
2152 return FAILURE;
2153 }
2154
2155 // Check the jack server sample rate.
2156 unsigned int jackRate = jack_get_sample_rate( client );
2157 if ( sampleRate != jackRate ) {
2158 jack_client_close( client );
2159 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2160 errorText_ = errorStream_.str();
2161 return FAILURE;
2162 }
2163 stream_.sampleRate = jackRate;
2164
2165 // Get the latency of the JACK port.
2166 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2167 if ( ports[ firstChannel ] ) {
2168 // Added by Ge Wang
2169 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2170 // the range (usually the min and max are equal)
2171 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2172 // get the latency range
2173 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2174 // be optimistic, use the min!
2175 stream_.latency[mode] = latrange.min;
2176 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2177 }
2178 free( ports );
2179
2180 // The jack server always uses 32-bit floating-point data.
2181 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2182 stream_.userFormat = format;
2183
2184 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2185 else stream_.userInterleaved = true;
2186
2187 // Jack always uses non-interleaved buffers.
2188 stream_.deviceInterleaved[mode] = false;
2189
2190 // Jack always provides host byte-ordered data.
2191 stream_.doByteSwap[mode] = false;
2192
2193 // Get the buffer size. The buffer size and number of buffers
2194 // (periods) is set when the jack server is started.
2195 stream_.bufferSize = (int) jack_get_buffer_size( client );
2196 *bufferSize = stream_.bufferSize;
2197
2198 stream_.nDeviceChannels[mode] = channels;
2199 stream_.nUserChannels[mode] = channels;
2200
2201 // Set flags for buffer conversion.
2202 stream_.doConvertBuffer[mode] = false;
2203 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2204 stream_.doConvertBuffer[mode] = true;
2205 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2206 stream_.nUserChannels[mode] > 1 )
2207 stream_.doConvertBuffer[mode] = true;
2208
2209 // Allocate our JackHandle structure for the stream.
2210 if ( handle == 0 ) {
2211 try {
2212 handle = new JackHandle;
2213 }
2214 catch ( std::bad_alloc& ) {
2215 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2216 goto error;
2217 }
2218
2219 if ( pthread_cond_init(&handle->condition, NULL) ) {
2220 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2221 goto error;
2222 }
2223 stream_.apiHandle = (void *) handle;
2224 handle->client = client;
2225 }
2226 handle->deviceName[mode] = deviceName;
2227
2228 // Allocate necessary internal buffers.
2229 unsigned long bufferBytes;
2230 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2231 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2232 if ( stream_.userBuffer[mode] == NULL ) {
2233 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2234 goto error;
2235 }
2236
2237 if ( stream_.doConvertBuffer[mode] ) {
2238
2239 bool makeBuffer = true;
2240 if ( mode == OUTPUT )
2241 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2242 else { // mode == INPUT
2243 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2244 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2245 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2246 if ( bufferBytes < bytesOut ) makeBuffer = false;
2247 }
2248 }
2249
2250 if ( makeBuffer ) {
2251 bufferBytes *= *bufferSize;
2252 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2253 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2254 if ( stream_.deviceBuffer == NULL ) {
2255 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2256 goto error;
2257 }
2258 }
2259 }
2260
2261 // Allocate memory for the Jack ports (channels) identifiers.
2262 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2263 if ( handle->ports[mode] == NULL ) {
2264 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2265 goto error;
2266 }
2267
2268 stream_.device[mode] = device;
2269 stream_.channelOffset[mode] = firstChannel;
2270 stream_.state = STREAM_STOPPED;
2271 stream_.callbackInfo.object = (void *) this;
2272
2273 if ( stream_.mode == OUTPUT && mode == INPUT )
2274 // We had already set up the stream for output.
2275 stream_.mode = DUPLEX;
2276 else {
2277 stream_.mode = mode;
2278 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2279 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
2280 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2281 }
2282
2283 // Register our ports.
2284 char label[64];
2285 if ( mode == OUTPUT ) {
2286 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2287 snprintf( label, 64, "outport %d", i );
2288 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2289 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2290 }
2291 }
2292 else {
2293 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2294 snprintf( label, 64, "inport %d", i );
2295 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2296 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2297 }
2298 }
2299
2300 // Setup the buffer conversion information structure. We don't use
2301 // buffers to do channel offsets, so we override that parameter
2302 // here.
2303 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2304
2305 return SUCCESS;
2306
2307 error:
2308 if ( handle ) {
2309 pthread_cond_destroy( &handle->condition );
2310 jack_client_close( handle->client );
2311
2312 if ( handle->ports[0] ) free( handle->ports[0] );
2313 if ( handle->ports[1] ) free( handle->ports[1] );
2314
2315 delete handle;
2316 stream_.apiHandle = 0;
2317 }
2318
2319 for ( int i=0; i<2; i++ ) {
2320 if ( stream_.userBuffer[i] ) {
2321 free( stream_.userBuffer[i] );
2322 stream_.userBuffer[i] = 0;
2323 }
2324 }
2325
2326 if ( stream_.deviceBuffer ) {
2327 free( stream_.deviceBuffer );
2328 stream_.deviceBuffer = 0;
2329 }
2330
2331 return FAILURE;
2332}
2333
2334void RtApiJack :: closeStream( void )
2335{
2336 if ( stream_.state == STREAM_CLOSED ) {
2337 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2338 error( RtAudioError::WARNING );
2339 return;
2340 }
2341
2342 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2343 if ( handle ) {
2344
2345 if ( stream_.state == STREAM_RUNNING )
2346 jack_deactivate( handle->client );
2347
2348 jack_client_close( handle->client );
2349 }
2350
2351 if ( handle ) {
2352 if ( handle->ports[0] ) free( handle->ports[0] );
2353 if ( handle->ports[1] ) free( handle->ports[1] );
2354 pthread_cond_destroy( &handle->condition );
2355 delete handle;
2356 stream_.apiHandle = 0;
2357 }
2358
2359 for ( int i=0; i<2; i++ ) {
2360 if ( stream_.userBuffer[i] ) {
2361 free( stream_.userBuffer[i] );
2362 stream_.userBuffer[i] = 0;
2363 }
2364 }
2365
2366 if ( stream_.deviceBuffer ) {
2367 free( stream_.deviceBuffer );
2368 stream_.deviceBuffer = 0;
2369 }
2370
2371 stream_.mode = UNINITIALIZED;
2372 stream_.state = STREAM_CLOSED;
2373}
2374
2375void RtApiJack :: startStream( void )
2376{
2377 verifyStream();
2378 if ( stream_.state == STREAM_RUNNING ) {
2379 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2380 error( RtAudioError::WARNING );
2381 return;
2382 }
2383
2384 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2385 int result = jack_activate( handle->client );
2386 if ( result ) {
2387 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2388 goto unlock;
2389 }
2390
2391 const char **ports;
2392
2393 // Get the list of available ports.
2394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2395 result = 1;
2396 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2397 if ( ports == NULL) {
2398 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2399 goto unlock;
2400 }
2401
2402 // Now make the port connections. Since RtAudio wasn't designed to
2403 // allow the user to select particular channels of a device, we'll
2404 // just open the first "nChannels" ports with offset.
2405 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2406 result = 1;
2407 if ( ports[ stream_.channelOffset[0] + i ] )
2408 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2409 if ( result ) {
2410 free( ports );
2411 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2412 goto unlock;
2413 }
2414 }
2415 free(ports);
2416 }
2417
2418 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2419 result = 1;
2420 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2421 if ( ports == NULL) {
2422 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2423 goto unlock;
2424 }
2425
2426 // Now make the port connections. See note above.
2427 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2428 result = 1;
2429 if ( ports[ stream_.channelOffset[1] + i ] )
2430 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2431 if ( result ) {
2432 free( ports );
2433 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2434 goto unlock;
2435 }
2436 }
2437 free(ports);
2438 }
2439
2440 handle->drainCounter = 0;
2441 handle->internalDrain = false;
2442 stream_.state = STREAM_RUNNING;
2443
2444 unlock:
2445 if ( result == 0 ) return;
2446 error( RtAudioError::SYSTEM_ERROR );
2447}
2448
2449void RtApiJack :: stopStream( void )
2450{
2451 verifyStream();
2452 if ( stream_.state == STREAM_STOPPED ) {
2453 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2454 error( RtAudioError::WARNING );
2455 return;
2456 }
2457
2458 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2459 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2460
2461 if ( handle->drainCounter == 0 ) {
2462 handle->drainCounter = 2;
2463 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2464 }
2465 }
2466
2467 jack_deactivate( handle->client );
2468 stream_.state = STREAM_STOPPED;
2469}
2470
2471void RtApiJack :: abortStream( void )
2472{
2473 verifyStream();
2474 if ( stream_.state == STREAM_STOPPED ) {
2475 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2476 error( RtAudioError::WARNING );
2477 return;
2478 }
2479
2480 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2481 handle->drainCounter = 2;
2482
2483 stopStream();
2484}
2485
2486// This function will be called by a spawned thread when the user
2487// callback function signals that the stream should be stopped or
2488// aborted. It is necessary to handle it this way because the
2489// callbackEvent() function must return before the jack_deactivate()
2490// function will return.
2491static void *jackStopStream( void *ptr )
2492{
2493 CallbackInfo *info = (CallbackInfo *) ptr;
2494 RtApiJack *object = (RtApiJack *) info->object;
2495
2496 object->stopStream();
2497 pthread_exit( NULL );
2498}
2499
2500bool RtApiJack :: callbackEvent( unsigned long nframes )
2501{
2502 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2503 if ( stream_.state == STREAM_CLOSED ) {
2504 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2505 error( RtAudioError::WARNING );
2506 return FAILURE;
2507 }
2508 if ( stream_.bufferSize != nframes ) {
2509 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2510 error( RtAudioError::WARNING );
2511 return FAILURE;
2512 }
2513
2514 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2515 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2516
2517 // Check if we were draining the stream and signal is finished.
2518 if ( handle->drainCounter > 3 ) {
2519 ThreadHandle threadId;
2520
2521 stream_.state = STREAM_STOPPING;
2522 if ( handle->internalDrain == true )
2523 pthread_create( &threadId, NULL, jackStopStream, info );
2524 else
2525 pthread_cond_signal( &handle->condition );
2526 return SUCCESS;
2527 }
2528
2529 // Invoke user callback first, to get fresh output data.
2530 if ( handle->drainCounter == 0 ) {
2531 RtAudioCallback callback = (RtAudioCallback) info->callback;
2532 double streamTime = getStreamTime();
2533 RtAudioStreamStatus status = 0;
2534 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2535 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2536 handle->xrun[0] = false;
2537 }
2538 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2539 status |= RTAUDIO_INPUT_OVERFLOW;
2540 handle->xrun[1] = false;
2541 }
2542 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2543 stream_.bufferSize, streamTime, status, info->userData );
2544 if ( cbReturnValue == 2 ) {
2545 stream_.state = STREAM_STOPPING;
2546 handle->drainCounter = 2;
2547 ThreadHandle id;
2548 pthread_create( &id, NULL, jackStopStream, info );
2549 return SUCCESS;
2550 }
2551 else if ( cbReturnValue == 1 ) {
2552 handle->drainCounter = 1;
2553 handle->internalDrain = true;
2554 }
2555 }
2556
2557 jack_default_audio_sample_t *jackbuffer;
2558 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2559 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2560
2561 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2562
2563 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2564 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2565 memset( jackbuffer, 0, bufferBytes );
2566 }
2567
2568 }
2569 else if ( stream_.doConvertBuffer[0] ) {
2570
2571 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2572
2573 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2574 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2575 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2576 }
2577 }
2578 else { // no buffer conversion
2579 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2580 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2581 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2582 }
2583 }
2584 }
2585
2586 // Don't bother draining input
2587 if ( handle->drainCounter ) {
2588 handle->drainCounter++;
2589 goto unlock;
2590 }
2591
2592 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2593
2594 if ( stream_.doConvertBuffer[1] ) {
2595 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2596 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2597 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2598 }
2599 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2600 }
2601 else { // no buffer conversion
2602 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2603 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2604 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2605 }
2606 }
2607 }
2608
2609 unlock:
2610 RtApi::tickStreamTime();
2611 return SUCCESS;
2612}
2613 //******************** End of __UNIX_JACK__ *********************//
2614#endif
2615
2616#if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2617
2618// The ASIO API is designed around a callback scheme, so this
2619// implementation is similar to that used for OS-X CoreAudio and Linux
2620// Jack. The primary constraint with ASIO is that it only allows
2621// access to a single driver at a time. Thus, it is not possible to
2622// have more than one simultaneous RtAudio stream.
2623//
2624// This implementation also requires a number of external ASIO files
2625// and a few global variables. The ASIO callback scheme does not
2626// allow for the passing of user data, so we must create a global
2627// pointer to our callbackInfo structure.
2628//
2629// On unix systems, we make use of a pthread condition variable.
2630// Since there is no equivalent in Windows, I hacked something based
2631// on information found in
2632// http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2633
2634#include "asiosys.h"
2635#include "asio.h"
2636#include "iasiothiscallresolver.h"
2637#include "asiodrivers.h"
2638#include <cmath>
2639
2640static AsioDrivers drivers;
2641static ASIOCallbacks asioCallbacks;
2642static ASIODriverInfo driverInfo;
2643static CallbackInfo *asioCallbackInfo;
2644static bool asioXRun;
2645
2646struct AsioHandle {
2647 int drainCounter; // Tracks callback counts when draining
2648 bool internalDrain; // Indicates if stop is initiated from callback or not.
2649 ASIOBufferInfo *bufferInfos;
2650 HANDLE condition;
2651
2652 AsioHandle()
2653 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2654};
2655
2656// Function declarations (definitions at end of section)
2657static const char* getAsioErrorString( ASIOError result );
2658static void sampleRateChanged( ASIOSampleRate sRate );
2659static long asioMessages( long selector, long value, void* message, double* opt );
2660
2661RtApiAsio :: RtApiAsio()
2662{
2663 // ASIO cannot run on a multi-threaded appartment. You can call
2664 // CoInitialize beforehand, but it must be for appartment threading
2665 // (in which case, CoInitilialize will return S_FALSE here).
2666 coInitialized_ = false;
2667 HRESULT hr = CoInitialize( NULL );
2668 if ( FAILED(hr) ) {
2669 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2670 error( RtAudioError::WARNING );
2671 }
2672 coInitialized_ = true;
2673
2674 drivers.removeCurrentDriver();
2675 driverInfo.asioVersion = 2;
2676
2677 // See note in DirectSound implementation about GetDesktopWindow().
2678 driverInfo.sysRef = GetForegroundWindow();
2679}
2680
2681RtApiAsio :: ~RtApiAsio()
2682{
2683 if ( stream_.state != STREAM_CLOSED ) closeStream();
2684 if ( coInitialized_ ) CoUninitialize();
2685}
2686
2687unsigned int RtApiAsio :: getDeviceCount( void )
2688{
2689 return (unsigned int) drivers.asioGetNumDev();
2690}
2691
2692RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2693{
2694 RtAudio::DeviceInfo info;
2695 info.probed = false;
2696
2697 // Get device ID
2698 unsigned int nDevices = getDeviceCount();
2699 if ( nDevices == 0 ) {
2700 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2701 error( RtAudioError::INVALID_USE );
2702 return info;
2703 }
2704
2705 if ( device >= nDevices ) {
2706 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2707 error( RtAudioError::INVALID_USE );
2708 return info;
2709 }
2710
2711 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2712 if ( stream_.state != STREAM_CLOSED ) {
2713 if ( device >= devices_.size() ) {
2714 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2715 error( RtAudioError::WARNING );
2716 return info;
2717 }
2718 return devices_[ device ];
2719 }
2720
2721 char driverName[32];
2722 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2723 if ( result != ASE_OK ) {
2724 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2725 errorText_ = errorStream_.str();
2726 error( RtAudioError::WARNING );
2727 return info;
2728 }
2729
2730 info.name = driverName;
2731
2732 if ( !drivers.loadDriver( driverName ) ) {
2733 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2734 errorText_ = errorStream_.str();
2735 error( RtAudioError::WARNING );
2736 return info;
2737 }
2738
2739 result = ASIOInit( &driverInfo );
2740 if ( result != ASE_OK ) {
2741 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2742 errorText_ = errorStream_.str();
2743 error( RtAudioError::WARNING );
2744 return info;
2745 }
2746
2747 // Determine the device channel information.
2748 long inputChannels, outputChannels;
2749 result = ASIOGetChannels( &inputChannels, &outputChannels );
2750 if ( result != ASE_OK ) {
2751 drivers.removeCurrentDriver();
2752 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2753 errorText_ = errorStream_.str();
2754 error( RtAudioError::WARNING );
2755 return info;
2756 }
2757
2758 info.outputChannels = outputChannels;
2759 info.inputChannels = inputChannels;
2760 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2761 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2762
2763 // Determine the supported sample rates.
2764 info.sampleRates.clear();
2765 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2766 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2767 if ( result == ASE_OK )
2768 info.sampleRates.push_back( SAMPLE_RATES[i] );
2769 }
2770
2771 // Determine supported data types ... just check first channel and assume rest are the same.
2772 ASIOChannelInfo channelInfo;
2773 channelInfo.channel = 0;
2774 channelInfo.isInput = true;
2775 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2776 result = ASIOGetChannelInfo( &channelInfo );
2777 if ( result != ASE_OK ) {
2778 drivers.removeCurrentDriver();
2779 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2780 errorText_ = errorStream_.str();
2781 error( RtAudioError::WARNING );
2782 return info;
2783 }
2784
2785 info.nativeFormats = 0;
2786 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2787 info.nativeFormats |= RTAUDIO_SINT16;
2788 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2789 info.nativeFormats |= RTAUDIO_SINT32;
2790 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2791 info.nativeFormats |= RTAUDIO_FLOAT32;
2792 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2793 info.nativeFormats |= RTAUDIO_FLOAT64;
2794 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2795 info.nativeFormats |= RTAUDIO_SINT24;
2796
2797 if ( info.outputChannels > 0 )
2798 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2799 if ( info.inputChannels > 0 )
2800 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2801
2802 info.probed = true;
2803 drivers.removeCurrentDriver();
2804 return info;
2805}
2806
2807static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2808{
2809 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2810 object->callbackEvent( index );
2811}
2812
2813void RtApiAsio :: saveDeviceInfo( void )
2814{
2815 devices_.clear();
2816
2817 unsigned int nDevices = getDeviceCount();
2818 devices_.resize( nDevices );
2819 for ( unsigned int i=0; i<nDevices; i++ )
2820 devices_[i] = getDeviceInfo( i );
2821}
2822
2823bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2824 unsigned int firstChannel, unsigned int sampleRate,
2825 RtAudioFormat format, unsigned int *bufferSize,
2826 RtAudio::StreamOptions *options )
2827{
2828 // For ASIO, a duplex stream MUST use the same driver.
2829 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
2830 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2831 return FAILURE;
2832 }
2833
2834 char driverName[32];
2835 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2836 if ( result != ASE_OK ) {
2837 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2838 errorText_ = errorStream_.str();
2839 return FAILURE;
2840 }
2841
2842 // Only load the driver once for duplex stream.
2843 if ( mode != INPUT || stream_.mode != OUTPUT ) {
2844 // The getDeviceInfo() function will not work when a stream is open
2845 // because ASIO does not allow multiple devices to run at the same
2846 // time. Thus, we'll probe the system before opening a stream and
2847 // save the results for use by getDeviceInfo().
2848 this->saveDeviceInfo();
2849
2850 if ( !drivers.loadDriver( driverName ) ) {
2851 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2852 errorText_ = errorStream_.str();
2853 return FAILURE;
2854 }
2855
2856 result = ASIOInit( &driverInfo );
2857 if ( result != ASE_OK ) {
2858 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2859 errorText_ = errorStream_.str();
2860 return FAILURE;
2861 }
2862 }
2863
2864 // Check the device channel count.
2865 long inputChannels, outputChannels;
2866 result = ASIOGetChannels( &inputChannels, &outputChannels );
2867 if ( result != ASE_OK ) {
2868 drivers.removeCurrentDriver();
2869 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2870 errorText_ = errorStream_.str();
2871 return FAILURE;
2872 }
2873
2874 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2875 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2876 drivers.removeCurrentDriver();
2877 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2878 errorText_ = errorStream_.str();
2879 return FAILURE;
2880 }
2881 stream_.nDeviceChannels[mode] = channels;
2882 stream_.nUserChannels[mode] = channels;
2883 stream_.channelOffset[mode] = firstChannel;
2884
2885 // Verify the sample rate is supported.
2886 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2887 if ( result != ASE_OK ) {
2888 drivers.removeCurrentDriver();
2889 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2890 errorText_ = errorStream_.str();
2891 return FAILURE;
2892 }
2893
2894 // Get the current sample rate
2895 ASIOSampleRate currentRate;
2896 result = ASIOGetSampleRate( &currentRate );
2897 if ( result != ASE_OK ) {
2898 drivers.removeCurrentDriver();
2899 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2900 errorText_ = errorStream_.str();
2901 return FAILURE;
2902 }
2903
2904 // Set the sample rate only if necessary
2905 if ( currentRate != sampleRate ) {
2906 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2907 if ( result != ASE_OK ) {
2908 drivers.removeCurrentDriver();
2909 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2910 errorText_ = errorStream_.str();
2911 return FAILURE;
2912 }
2913 }
2914
2915 // Determine the driver data type.
2916 ASIOChannelInfo channelInfo;
2917 channelInfo.channel = 0;
2918 if ( mode == OUTPUT ) channelInfo.isInput = false;
2919 else channelInfo.isInput = true;
2920 result = ASIOGetChannelInfo( &channelInfo );
2921 if ( result != ASE_OK ) {
2922 drivers.removeCurrentDriver();
2923 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2924 errorText_ = errorStream_.str();
2925 return FAILURE;
2926 }
2927
2928 // Assuming WINDOWS host is always little-endian.
2929 stream_.doByteSwap[mode] = false;
2930 stream_.userFormat = format;
2931 stream_.deviceFormat[mode] = 0;
2932 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
2933 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
2934 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
2935 }
2936 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
2937 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
2938 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
2939 }
2940 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
2941 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2942 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
2943 }
2944 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
2945 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
2946 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
2947 }
2948 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
2949 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
2950 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
2951 }
2952
2953 if ( stream_.deviceFormat[mode] == 0 ) {
2954 drivers.removeCurrentDriver();
2955 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
2956 errorText_ = errorStream_.str();
2957 return FAILURE;
2958 }
2959
2960 // Set the buffer size. For a duplex stream, this will end up
2961 // setting the buffer size based on the input constraints, which
2962 // should be ok.
2963 long minSize, maxSize, preferSize, granularity;
2964 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
2965 if ( result != ASE_OK ) {
2966 drivers.removeCurrentDriver();
2967 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
2968 errorText_ = errorStream_.str();
2969 return FAILURE;
2970 }
2971
2972 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
2973 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
2974 else if ( granularity == -1 ) {
2975 // Make sure bufferSize is a power of two.
2976 int log2_of_min_size = 0;
2977 int log2_of_max_size = 0;
2978
2979 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
2980 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
2981 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
2982 }
2983
2984 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
2985 int min_delta_num = log2_of_min_size;
2986
2987 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
2988 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
2989 if (current_delta < min_delta) {
2990 min_delta = current_delta;
2991 min_delta_num = i;
2992 }
2993 }
2994
2995 *bufferSize = ( (unsigned int)1 << min_delta_num );
2996 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
2997 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
2998 }
2999 else if ( granularity != 0 ) {
3000 // Set to an even multiple of granularity, rounding up.
3001 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3002 }
3003
3004 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
3005 drivers.removeCurrentDriver();
3006 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3007 return FAILURE;
3008 }
3009
3010 stream_.bufferSize = *bufferSize;
3011 stream_.nBuffers = 2;
3012
3013 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3014 else stream_.userInterleaved = true;
3015
3016 // ASIO always uses non-interleaved buffers.
3017 stream_.deviceInterleaved[mode] = false;
3018
3019 // Allocate, if necessary, our AsioHandle structure for the stream.
3020 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3021 if ( handle == 0 ) {
3022 try {
3023 handle = new AsioHandle;
3024 }
3025 catch ( std::bad_alloc& ) {
3026 //if ( handle == NULL ) {
3027 drivers.removeCurrentDriver();
3028 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3029 return FAILURE;
3030 }
3031 handle->bufferInfos = 0;
3032
3033 // Create a manual-reset event.
3034 handle->condition = CreateEvent( NULL, // no security
3035 TRUE, // manual-reset
3036 FALSE, // non-signaled initially
3037 NULL ); // unnamed
3038 stream_.apiHandle = (void *) handle;
3039 }
3040
3041 // Create the ASIO internal buffers. Since RtAudio sets up input
3042 // and output separately, we'll have to dispose of previously
3043 // created output buffers for a duplex stream.
3044 long inputLatency, outputLatency;
3045 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3046 ASIODisposeBuffers();
3047 if ( handle->bufferInfos ) free( handle->bufferInfos );
3048 }
3049
3050 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3051 bool buffersAllocated = false;
3052 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3053 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3054 if ( handle->bufferInfos == NULL ) {
3055 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3056 errorText_ = errorStream_.str();
3057 goto error;
3058 }
3059
3060 ASIOBufferInfo *infos;
3061 infos = handle->bufferInfos;
3062 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3063 infos->isInput = ASIOFalse;
3064 infos->channelNum = i + stream_.channelOffset[0];
3065 infos->buffers[0] = infos->buffers[1] = 0;
3066 }
3067 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3068 infos->isInput = ASIOTrue;
3069 infos->channelNum = i + stream_.channelOffset[1];
3070 infos->buffers[0] = infos->buffers[1] = 0;
3071 }
3072
3073 // Set up the ASIO callback structure and create the ASIO data buffers.
3074 asioCallbacks.bufferSwitch = &bufferSwitch;
3075 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3076 asioCallbacks.asioMessage = &asioMessages;
3077 asioCallbacks.bufferSwitchTimeInfo = NULL;
3078 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3079 if ( result != ASE_OK ) {
3080 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3081 errorText_ = errorStream_.str();
3082 goto error;
3083 }
3084 buffersAllocated = true;
3085
3086 // Set flags for buffer conversion.
3087 stream_.doConvertBuffer[mode] = false;
3088 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3089 stream_.doConvertBuffer[mode] = true;
3090 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3091 stream_.nUserChannels[mode] > 1 )
3092 stream_.doConvertBuffer[mode] = true;
3093
3094 // Allocate necessary internal buffers
3095 unsigned long bufferBytes;
3096 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3097 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3098 if ( stream_.userBuffer[mode] == NULL ) {
3099 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3100 goto error;
3101 }
3102
3103 if ( stream_.doConvertBuffer[mode] ) {
3104
3105 bool makeBuffer = true;
3106 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3107 if ( mode == INPUT ) {
3108 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
3109 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3110 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3111 }
3112 }
3113
3114 if ( makeBuffer ) {
3115 bufferBytes *= *bufferSize;
3116 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3117 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3118 if ( stream_.deviceBuffer == NULL ) {
3119 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3120 goto error;
3121 }
3122 }
3123 }
3124
3125 stream_.sampleRate = sampleRate;
3126 stream_.device[mode] = device;
3127 stream_.state = STREAM_STOPPED;
3128 asioCallbackInfo = &stream_.callbackInfo;
3129 stream_.callbackInfo.object = (void *) this;
3130 if ( stream_.mode == OUTPUT && mode == INPUT )
3131 // We had already set up an output stream.
3132 stream_.mode = DUPLEX;
3133 else
3134 stream_.mode = mode;
3135
3136 // Determine device latencies
3137 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3138 if ( result != ASE_OK ) {
3139 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3140 errorText_ = errorStream_.str();
3141 error( RtAudioError::WARNING); // warn but don't fail
3142 }
3143 else {
3144 stream_.latency[0] = outputLatency;
3145 stream_.latency[1] = inputLatency;
3146 }
3147
3148 // Setup the buffer conversion information structure. We don't use
3149 // buffers to do channel offsets, so we override that parameter
3150 // here.
3151 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3152
3153 return SUCCESS;
3154
3155 error:
3156 if ( buffersAllocated )
3157 ASIODisposeBuffers();
3158 drivers.removeCurrentDriver();
3159
3160 if ( handle ) {
3161 CloseHandle( handle->condition );
3162 if ( handle->bufferInfos )
3163 free( handle->bufferInfos );
3164 delete handle;
3165 stream_.apiHandle = 0;
3166 }
3167
3168 for ( int i=0; i<2; i++ ) {
3169 if ( stream_.userBuffer[i] ) {
3170 free( stream_.userBuffer[i] );
3171 stream_.userBuffer[i] = 0;
3172 }
3173 }
3174
3175 if ( stream_.deviceBuffer ) {
3176 free( stream_.deviceBuffer );
3177 stream_.deviceBuffer = 0;
3178 }
3179
3180 return FAILURE;
3181}
3182
3183void RtApiAsio :: closeStream()
3184{
3185 if ( stream_.state == STREAM_CLOSED ) {
3186 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3187 error( RtAudioError::WARNING );
3188 return;
3189 }
3190
3191 if ( stream_.state == STREAM_RUNNING ) {
3192 stream_.state = STREAM_STOPPED;
3193 ASIOStop();
3194 }
3195 ASIODisposeBuffers();
3196 drivers.removeCurrentDriver();
3197
3198 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3199 if ( handle ) {
3200 CloseHandle( handle->condition );
3201 if ( handle->bufferInfos )
3202 free( handle->bufferInfos );
3203 delete handle;
3204 stream_.apiHandle = 0;
3205 }
3206
3207 for ( int i=0; i<2; i++ ) {
3208 if ( stream_.userBuffer[i] ) {
3209 free( stream_.userBuffer[i] );
3210 stream_.userBuffer[i] = 0;
3211 }
3212 }
3213
3214 if ( stream_.deviceBuffer ) {
3215 free( stream_.deviceBuffer );
3216 stream_.deviceBuffer = 0;
3217 }
3218
3219 stream_.mode = UNINITIALIZED;
3220 stream_.state = STREAM_CLOSED;
3221}
3222
3223bool stopThreadCalled = false;
3224
3225void RtApiAsio :: startStream()
3226{
3227 verifyStream();
3228 if ( stream_.state == STREAM_RUNNING ) {
3229 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3230 error( RtAudioError::WARNING );
3231 return;
3232 }
3233
3234 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3235 ASIOError result = ASIOStart();
3236 if ( result != ASE_OK ) {
3237 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3238 errorText_ = errorStream_.str();
3239 goto unlock;
3240 }
3241
3242 handle->drainCounter = 0;
3243 handle->internalDrain = false;
3244 ResetEvent( handle->condition );
3245 stream_.state = STREAM_RUNNING;
3246 asioXRun = false;
3247
3248 unlock:
3249 stopThreadCalled = false;
3250
3251 if ( result == ASE_OK ) return;
3252 error( RtAudioError::SYSTEM_ERROR );
3253}
3254
3255void RtApiAsio :: stopStream()
3256{
3257 verifyStream();
3258 if ( stream_.state == STREAM_STOPPED ) {
3259 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3260 error( RtAudioError::WARNING );
3261 return;
3262 }
3263
3264 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3265 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3266 if ( handle->drainCounter == 0 ) {
3267 handle->drainCounter = 2;
3268 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3269 }
3270 }
3271
3272 stream_.state = STREAM_STOPPED;
3273
3274 ASIOError result = ASIOStop();
3275 if ( result != ASE_OK ) {
3276 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3277 errorText_ = errorStream_.str();
3278 }
3279
3280 if ( result == ASE_OK ) return;
3281 error( RtAudioError::SYSTEM_ERROR );
3282}
3283
3284void RtApiAsio :: abortStream()
3285{
3286 verifyStream();
3287 if ( stream_.state == STREAM_STOPPED ) {
3288 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3289 error( RtAudioError::WARNING );
3290 return;
3291 }
3292
3293 // The following lines were commented-out because some behavior was
3294 // noted where the device buffers need to be zeroed to avoid
3295 // continuing sound, even when the device buffers are completely
3296 // disposed. So now, calling abort is the same as calling stop.
3297 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3298 // handle->drainCounter = 2;
3299 stopStream();
3300}
3301
3302// This function will be called by a spawned thread when the user
3303// callback function signals that the stream should be stopped or
3304// aborted. It is necessary to handle it this way because the
3305// callbackEvent() function must return before the ASIOStop()
3306// function will return.
3307static unsigned __stdcall asioStopStream( void *ptr )
3308{
3309 CallbackInfo *info = (CallbackInfo *) ptr;
3310 RtApiAsio *object = (RtApiAsio *) info->object;
3311
3312 object->stopStream();
3313 _endthreadex( 0 );
3314 return 0;
3315}
3316
3317bool RtApiAsio :: callbackEvent( long bufferIndex )
3318{
3319 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3320 if ( stream_.state == STREAM_CLOSED ) {
3321 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3322 error( RtAudioError::WARNING );
3323 return FAILURE;
3324 }
3325
3326 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3327 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3328
3329 // Check if we were draining the stream and signal if finished.
3330 if ( handle->drainCounter > 3 ) {
3331
3332 stream_.state = STREAM_STOPPING;
3333 if ( handle->internalDrain == false )
3334 SetEvent( handle->condition );
3335 else { // spawn a thread to stop the stream
3336 unsigned threadId;
3337 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3338 &stream_.callbackInfo, 0, &threadId );
3339 }
3340 return SUCCESS;
3341 }
3342
3343 // Invoke user callback to get fresh output data UNLESS we are
3344 // draining stream.
3345 if ( handle->drainCounter == 0 ) {
3346 RtAudioCallback callback = (RtAudioCallback) info->callback;
3347 double streamTime = getStreamTime();
3348 RtAudioStreamStatus status = 0;
3349 if ( stream_.mode != INPUT && asioXRun == true ) {
3350 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3351 asioXRun = false;
3352 }
3353 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3354 status |= RTAUDIO_INPUT_OVERFLOW;
3355 asioXRun = false;
3356 }
3357 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3358 stream_.bufferSize, streamTime, status, info->userData );
3359 if ( cbReturnValue == 2 ) {
3360 stream_.state = STREAM_STOPPING;
3361 handle->drainCounter = 2;
3362 unsigned threadId;
3363 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3364 &stream_.callbackInfo, 0, &threadId );
3365 return SUCCESS;
3366 }
3367 else if ( cbReturnValue == 1 ) {
3368 handle->drainCounter = 1;
3369 handle->internalDrain = true;
3370 }
3371 }
3372
3373 unsigned int nChannels, bufferBytes, i, j;
3374 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3375 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3376
3377 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3378
3379 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3380
3381 for ( i=0, j=0; i<nChannels; i++ ) {
3382 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3383 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3384 }
3385
3386 }
3387 else if ( stream_.doConvertBuffer[0] ) {
3388
3389 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3390 if ( stream_.doByteSwap[0] )
3391 byteSwapBuffer( stream_.deviceBuffer,
3392 stream_.bufferSize * stream_.nDeviceChannels[0],
3393 stream_.deviceFormat[0] );
3394
3395 for ( i=0, j=0; i<nChannels; i++ ) {
3396 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3397 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3398 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3399 }
3400
3401 }
3402 else {
3403
3404 if ( stream_.doByteSwap[0] )
3405 byteSwapBuffer( stream_.userBuffer[0],
3406 stream_.bufferSize * stream_.nUserChannels[0],
3407 stream_.userFormat );
3408
3409 for ( i=0, j=0; i<nChannels; i++ ) {
3410 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3411 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3412 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3413 }
3414
3415 }
3416 }
3417
3418 // Don't bother draining input
3419 if ( handle->drainCounter ) {
3420 handle->drainCounter++;
3421 goto unlock;
3422 }
3423
3424 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3425
3426 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3427
3428 if (stream_.doConvertBuffer[1]) {
3429
3430 // Always interleave ASIO input data.
3431 for ( i=0, j=0; i<nChannels; i++ ) {
3432 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3433 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3434 handle->bufferInfos[i].buffers[bufferIndex],
3435 bufferBytes );
3436 }
3437
3438 if ( stream_.doByteSwap[1] )
3439 byteSwapBuffer( stream_.deviceBuffer,
3440 stream_.bufferSize * stream_.nDeviceChannels[1],
3441 stream_.deviceFormat[1] );
3442 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3443
3444 }
3445 else {
3446 for ( i=0, j=0; i<nChannels; i++ ) {
3447 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3448 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3449 handle->bufferInfos[i].buffers[bufferIndex],
3450 bufferBytes );
3451 }
3452 }
3453
3454 if ( stream_.doByteSwap[1] )
3455 byteSwapBuffer( stream_.userBuffer[1],
3456 stream_.bufferSize * stream_.nUserChannels[1],
3457 stream_.userFormat );
3458 }
3459 }
3460
3461 unlock:
3462 // The following call was suggested by Malte Clasen. While the API
3463 // documentation indicates it should not be required, some device
3464 // drivers apparently do not function correctly without it.
3465 ASIOOutputReady();
3466
3467 RtApi::tickStreamTime();
3468 return SUCCESS;
3469}
3470
3471static void sampleRateChanged( ASIOSampleRate sRate )
3472{
3473 // The ASIO documentation says that this usually only happens during
3474 // external sync. Audio processing is not stopped by the driver,
3475 // actual sample rate might not have even changed, maybe only the
3476 // sample rate status of an AES/EBU or S/PDIF digital input at the
3477 // audio device.
3478
3479 RtApi *object = (RtApi *) asioCallbackInfo->object;
3480 try {
3481 object->stopStream();
3482 }
3483 catch ( RtAudioError &exception ) {
3484 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3485 return;
3486 }
3487
3488 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3489}
3490
3491static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3492{
3493 long ret = 0;
3494
3495 switch( selector ) {
3496 case kAsioSelectorSupported:
3497 if ( value == kAsioResetRequest
3498 || value == kAsioEngineVersion
3499 || value == kAsioResyncRequest
3500 || value == kAsioLatenciesChanged
3501 // The following three were added for ASIO 2.0, you don't
3502 // necessarily have to support them.
3503 || value == kAsioSupportsTimeInfo
3504 || value == kAsioSupportsTimeCode
3505 || value == kAsioSupportsInputMonitor)
3506 ret = 1L;
3507 break;
3508 case kAsioResetRequest:
3509 // Defer the task and perform the reset of the driver during the
3510 // next "safe" situation. You cannot reset the driver right now,
3511 // as this code is called from the driver. Reset the driver is
3512 // done by completely destruct is. I.e. ASIOStop(),
3513 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3514 // driver again.
3515 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3516 ret = 1L;
3517 break;
3518 case kAsioResyncRequest:
3519 // This informs the application that the driver encountered some
3520 // non-fatal data loss. It is used for synchronization purposes
3521 // of different media. Added mainly to work around the Win16Mutex
3522 // problems in Windows 95/98 with the Windows Multimedia system,
3523 // which could lose data because the Mutex was held too long by
3524 // another thread. However a driver can issue it in other
3525 // situations, too.
3526 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3527 asioXRun = true;
3528 ret = 1L;
3529 break;
3530 case kAsioLatenciesChanged:
3531 // This will inform the host application that the drivers were
3532 // latencies changed. Beware, it this does not mean that the
3533 // buffer sizes have changed! You might need to update internal
3534 // delay data.
3535 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3536 ret = 1L;
3537 break;
3538 case kAsioEngineVersion:
3539 // Return the supported ASIO version of the host application. If
3540 // a host application does not implement this selector, ASIO 1.0
3541 // is assumed by the driver.
3542 ret = 2L;
3543 break;
3544 case kAsioSupportsTimeInfo:
3545 // Informs the driver whether the
3546 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3547 // For compatibility with ASIO 1.0 drivers the host application
3548 // should always support the "old" bufferSwitch method, too.
3549 ret = 0;
3550 break;
3551 case kAsioSupportsTimeCode:
3552 // Informs the driver whether application is interested in time
3553 // code info. If an application does not need to know about time
3554 // code, the driver has less work to do.
3555 ret = 0;
3556 break;
3557 }
3558 return ret;
3559}
3560
3561static const char* getAsioErrorString( ASIOError result )
3562{
3563 struct Messages
3564 {
3565 ASIOError value;
3566 const char*message;
3567 };
3568
3569 static const Messages m[] =
3570 {
3571 { ASE_NotPresent, "Hardware input or output is not present or available." },
3572 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3573 { ASE_InvalidParameter, "Invalid input parameter." },
3574 { ASE_InvalidMode, "Invalid mode." },
3575 { ASE_SPNotAdvancing, "Sample position not advancing." },
3576 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3577 { ASE_NoMemory, "Not enough memory to complete the request." }
3578 };
3579
3580 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3581 if ( m[i].value == result ) return m[i].message;
3582
3583 return "Unknown error.";
3584}
3585
3586//******************** End of __WINDOWS_ASIO__ *********************//
3587#endif
3588
3589
3590#if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3591
3592// Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3593// - Introduces support for the Windows WASAPI API
3594// - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3595// - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3596// - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3597
3598#ifndef INITGUID
3599 #define INITGUID
3600#endif
3601#include <audioclient.h>
3602#include <avrt.h>
3603#include <mmdeviceapi.h>
3604#include <functiondiscoverykeys_devpkey.h>
3605
3606//=============================================================================
3607
3608#define SAFE_RELEASE( objectPtr )\
3609if ( objectPtr )\
3610{\
3611 objectPtr->Release();\
3612 objectPtr = NULL;\
3613}
3614
3615typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3616
3617//-----------------------------------------------------------------------------
3618
3619// WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3620// Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3621// requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3622// provide intermediate storage for read / write synchronization.
3623class WasapiBuffer
3624{
3625public:
3626 WasapiBuffer()
3627 : buffer_( NULL ),
3628 bufferSize_( 0 ),
3629 inIndex_( 0 ),
3630 outIndex_( 0 ) {}
3631
3632 ~WasapiBuffer() {
3633 delete buffer_;
3634 }
3635
3636 // sets the length of the internal ring buffer
3637 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3638 delete buffer_;
3639
3640 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3641
3642 bufferSize_ = bufferSize;
3643 inIndex_ = 0;
3644 outIndex_ = 0;
3645 }
3646
3647 // attempt to push a buffer into the ring buffer at the current "in" index
3648 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3649 {
3650 if ( !buffer || // incoming buffer is NULL
3651 bufferSize == 0 || // incoming buffer has no data
3652 bufferSize > bufferSize_ ) // incoming buffer too large
3653 {
3654 return false;
3655 }
3656
3657 unsigned int relOutIndex = outIndex_;
3658 unsigned int inIndexEnd = inIndex_ + bufferSize;
3659 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3660 relOutIndex += bufferSize_;
3661 }
3662
3663 // "in" index can end on the "out" index but cannot begin at it
3664 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3665 return false; // not enough space between "in" index and "out" index
3666 }
3667
3668 // copy buffer from external to internal
3669 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3670 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3671 int fromInSize = bufferSize - fromZeroSize;
3672
3673 switch( format )
3674 {
3675 case RTAUDIO_SINT8:
3676 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3677 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3678 break;
3679 case RTAUDIO_SINT16:
3680 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3681 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3682 break;
3683 case RTAUDIO_SINT24:
3684 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3685 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3686 break;
3687 case RTAUDIO_SINT32:
3688 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3689 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3690 break;
3691 case RTAUDIO_FLOAT32:
3692 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3693 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3694 break;
3695 case RTAUDIO_FLOAT64:
3696 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3697 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3698 break;
3699 }
3700
3701 // update "in" index
3702 inIndex_ += bufferSize;
3703 inIndex_ %= bufferSize_;
3704
3705 return true;
3706 }
3707
3708 // attempt to pull a buffer from the ring buffer from the current "out" index
3709 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3710 {
3711 if ( !buffer || // incoming buffer is NULL
3712 bufferSize == 0 || // incoming buffer has no data
3713 bufferSize > bufferSize_ ) // incoming buffer too large
3714 {
3715 return false;
3716 }
3717
3718 unsigned int relInIndex = inIndex_;
3719 unsigned int outIndexEnd = outIndex_ + bufferSize;
3720 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3721 relInIndex += bufferSize_;
3722 }
3723
3724 // "out" index can begin at and end on the "in" index
3725 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3726 return false; // not enough space between "out" index and "in" index
3727 }
3728
3729 // copy buffer from internal to external
3730 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3731 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3732 int fromOutSize = bufferSize - fromZeroSize;
3733
3734 switch( format )
3735 {
3736 case RTAUDIO_SINT8:
3737 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3738 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3739 break;
3740 case RTAUDIO_SINT16:
3741 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3742 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3743 break;
3744 case RTAUDIO_SINT24:
3745 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3746 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3747 break;
3748 case RTAUDIO_SINT32:
3749 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3750 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3751 break;
3752 case RTAUDIO_FLOAT32:
3753 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3754 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3755 break;
3756 case RTAUDIO_FLOAT64:
3757 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3758 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3759 break;
3760 }
3761
3762 // update "out" index
3763 outIndex_ += bufferSize;
3764 outIndex_ %= bufferSize_;
3765
3766 return true;
3767 }
3768
3769private:
3770 char* buffer_;
3771 unsigned int bufferSize_;
3772 unsigned int inIndex_;
3773 unsigned int outIndex_;
3774};
3775
3776//-----------------------------------------------------------------------------
3777
3778// In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3779// between HW and the user. The convertBufferWasapi function is used to perform this conversion
3780// between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3781// This sample rate converter favors speed over quality, and works best with conversions between
3782// one rate and its multiple.
3783void convertBufferWasapi( char* outBuffer,
3784 const char* inBuffer,
3785 const unsigned int& channelCount,
3786 const unsigned int& inSampleRate,
3787 const unsigned int& outSampleRate,
3788 const unsigned int& inSampleCount,
3789 unsigned int& outSampleCount,
3790 const RtAudioFormat& format )
3791{
3792 // calculate the new outSampleCount and relative sampleStep
3793 float sampleRatio = ( float ) outSampleRate / inSampleRate;
3794 float sampleStep = 1.0f / sampleRatio;
3795 float inSampleFraction = 0.0f;
3796
3797 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
3798
3799 // frame-by-frame, copy each relative input sample into it's corresponding output sample
3800 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
3801 {
3802 unsigned int inSample = ( unsigned int ) inSampleFraction;
3803
3804 switch ( format )
3805 {
3806 case RTAUDIO_SINT8:
3807 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
3808 break;
3809 case RTAUDIO_SINT16:
3810 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
3811 break;
3812 case RTAUDIO_SINT24:
3813 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
3814 break;
3815 case RTAUDIO_SINT32:
3816 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
3817 break;
3818 case RTAUDIO_FLOAT32:
3819 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
3820 break;
3821 case RTAUDIO_FLOAT64:
3822 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
3823 break;
3824 }
3825
3826 // jump to next in sample
3827 inSampleFraction += sampleStep;
3828 }
3829}
3830
3831//-----------------------------------------------------------------------------
3832
3833// A structure to hold various information related to the WASAPI implementation.
3834struct WasapiHandle
3835{
3836 IAudioClient* captureAudioClient;
3837 IAudioClient* renderAudioClient;
3838 IAudioCaptureClient* captureClient;
3839 IAudioRenderClient* renderClient;
3840 HANDLE captureEvent;
3841 HANDLE renderEvent;
3842
3843 WasapiHandle()
3844 : captureAudioClient( NULL ),
3845 renderAudioClient( NULL ),
3846 captureClient( NULL ),
3847 renderClient( NULL ),
3848 captureEvent( NULL ),
3849 renderEvent( NULL ) {}
3850};
3851
3852//=============================================================================
3853
3854RtApiWasapi::RtApiWasapi()
3855 : coInitialized_( false ), deviceEnumerator_( NULL )
3856{
3857 // WASAPI can run either apartment or multi-threaded
3858 HRESULT hr = CoInitialize( NULL );
3859 if ( !FAILED( hr ) )
3860 coInitialized_ = true;
3861
3862 // Instantiate device enumerator
3863 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3864 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3865 ( void** ) &deviceEnumerator_ );
3866
3867 if ( FAILED( hr ) ) {
3868 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3869 error( RtAudioError::DRIVER_ERROR );
3870 }
3871}
3872
3873//-----------------------------------------------------------------------------
3874
3875RtApiWasapi::~RtApiWasapi()
3876{
3877 if ( stream_.state != STREAM_CLOSED )
3878 closeStream();
3879
3880 SAFE_RELEASE( deviceEnumerator_ );
3881
3882 // If this object previously called CoInitialize()
3883 if ( coInitialized_ )
3884 CoUninitialize();
3885}
3886
3887//=============================================================================
3888
3889unsigned int RtApiWasapi::getDeviceCount( void )
3890{
3891 unsigned int captureDeviceCount = 0;
3892 unsigned int renderDeviceCount = 0;
3893
3894 IMMDeviceCollection* captureDevices = NULL;
3895 IMMDeviceCollection* renderDevices = NULL;
3896
3897 // Count capture devices
3898 errorText_.clear();
3899 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3900 if ( FAILED( hr ) ) {
3901 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
3902 goto Exit;
3903 }
3904
3905 hr = captureDevices->GetCount( &captureDeviceCount );
3906 if ( FAILED( hr ) ) {
3907 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
3908 goto Exit;
3909 }
3910
3911 // Count render devices
3912 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
3913 if ( FAILED( hr ) ) {
3914 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
3915 goto Exit;
3916 }
3917
3918 hr = renderDevices->GetCount( &renderDeviceCount );
3919 if ( FAILED( hr ) ) {
3920 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
3921 goto Exit;
3922 }
3923
3924Exit:
3925 // release all references
3926 SAFE_RELEASE( captureDevices );
3927 SAFE_RELEASE( renderDevices );
3928
3929 if ( errorText_.empty() )
3930 return captureDeviceCount + renderDeviceCount;
3931
3932 error( RtAudioError::DRIVER_ERROR );
3933 return 0;
3934}
3935
3936//-----------------------------------------------------------------------------
3937
3938RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
3939{
3940 RtAudio::DeviceInfo info;
3941 unsigned int captureDeviceCount = 0;
3942 unsigned int renderDeviceCount = 0;
3943 std::wstring deviceName;
3944 std::string defaultDeviceName;
3945 bool isCaptureDevice = false;
3946
3947 PROPVARIANT deviceNameProp;
3948 PROPVARIANT defaultDeviceNameProp;
3949
3950 IMMDeviceCollection* captureDevices = NULL;
3951 IMMDeviceCollection* renderDevices = NULL;
3952 IMMDevice* devicePtr = NULL;
3953 IMMDevice* defaultDevicePtr = NULL;
3954 IAudioClient* audioClient = NULL;
3955 IPropertyStore* devicePropStore = NULL;
3956 IPropertyStore* defaultDevicePropStore = NULL;
3957
3958 WAVEFORMATEX* deviceFormat = NULL;
3959 WAVEFORMATEX* closestMatchFormat = NULL;
3960
3961 // probed
3962 info.probed = false;
3963
3964 // Count capture devices
3965 errorText_.clear();
3966 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
3967 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3968 if ( FAILED( hr ) ) {
3969 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
3970 goto Exit;
3971 }
3972
3973 hr = captureDevices->GetCount( &captureDeviceCount );
3974 if ( FAILED( hr ) ) {
3975 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
3976 goto Exit;
3977 }
3978
3979 // Count render devices
3980 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
3981 if ( FAILED( hr ) ) {
3982 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
3983 goto Exit;
3984 }
3985
3986 hr = renderDevices->GetCount( &renderDeviceCount );
3987 if ( FAILED( hr ) ) {
3988 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
3989 goto Exit;
3990 }
3991
3992 // validate device index
3993 if ( device >= captureDeviceCount + renderDeviceCount ) {
3994 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
3995 errorType = RtAudioError::INVALID_USE;
3996 goto Exit;
3997 }
3998
3999 // determine whether index falls within capture or render devices
4000 if ( device >= renderDeviceCount ) {
4001 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4002 if ( FAILED( hr ) ) {
4003 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4004 goto Exit;
4005 }
4006 isCaptureDevice = true;
4007 }
4008 else {
4009 hr = renderDevices->Item( device, &devicePtr );
4010 if ( FAILED( hr ) ) {
4011 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4012 goto Exit;
4013 }
4014 isCaptureDevice = false;
4015 }
4016
4017 // get default device name
4018 if ( isCaptureDevice ) {
4019 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4020 if ( FAILED( hr ) ) {
4021 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4022 goto Exit;
4023 }
4024 }
4025 else {
4026 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4027 if ( FAILED( hr ) ) {
4028 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4029 goto Exit;
4030 }
4031 }
4032
4033 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4034 if ( FAILED( hr ) ) {
4035 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4036 goto Exit;
4037 }
4038 PropVariantInit( &defaultDeviceNameProp );
4039
4040 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4041 if ( FAILED( hr ) ) {
4042 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4043 goto Exit;
4044 }
4045
4046 deviceName = defaultDeviceNameProp.pwszVal;
4047 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
4048
4049 // name
4050 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4051 if ( FAILED( hr ) ) {
4052 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4053 goto Exit;
4054 }
4055
4056 PropVariantInit( &deviceNameProp );
4057
4058 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4059 if ( FAILED( hr ) ) {
4060 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4061 goto Exit;
4062 }
4063
4064 deviceName = deviceNameProp.pwszVal;
4065 info.name = std::string( deviceName.begin(), deviceName.end() );
4066
4067 // is default
4068 if ( isCaptureDevice ) {
4069 info.isDefaultInput = info.name == defaultDeviceName;
4070 info.isDefaultOutput = false;
4071 }
4072 else {
4073 info.isDefaultInput = false;
4074 info.isDefaultOutput = info.name == defaultDeviceName;
4075 }
4076
4077 // channel count
4078 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4079 if ( FAILED( hr ) ) {
4080 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4081 goto Exit;
4082 }
4083
4084 hr = audioClient->GetMixFormat( &deviceFormat );
4085 if ( FAILED( hr ) ) {
4086 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4087 goto Exit;
4088 }
4089
4090 if ( isCaptureDevice ) {
4091 info.inputChannels = deviceFormat->nChannels;
4092 info.outputChannels = 0;
4093 info.duplexChannels = 0;
4094 }
4095 else {
4096 info.inputChannels = 0;
4097 info.outputChannels = deviceFormat->nChannels;
4098 info.duplexChannels = 0;
4099 }
4100
4101 // sample rates
4102 info.sampleRates.clear();
4103
4104 // allow support for all sample rates as we have a built-in sample rate converter
4105 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4106 info.sampleRates.push_back( SAMPLE_RATES[i] );
4107 }
4108
4109 // native format
4110 info.nativeFormats = 0;
4111
4112 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4113 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4114 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4115 {
4116 if ( deviceFormat->wBitsPerSample == 32 ) {
4117 info.nativeFormats |= RTAUDIO_FLOAT32;
4118 }
4119 else if ( deviceFormat->wBitsPerSample == 64 ) {
4120 info.nativeFormats |= RTAUDIO_FLOAT64;
4121 }
4122 }
4123 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4124 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4125 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4126 {
4127 if ( deviceFormat->wBitsPerSample == 8 ) {
4128 info.nativeFormats |= RTAUDIO_SINT8;
4129 }
4130 else if ( deviceFormat->wBitsPerSample == 16 ) {
4131 info.nativeFormats |= RTAUDIO_SINT16;
4132 }
4133 else if ( deviceFormat->wBitsPerSample == 24 ) {
4134 info.nativeFormats |= RTAUDIO_SINT24;
4135 }
4136 else if ( deviceFormat->wBitsPerSample == 32 ) {
4137 info.nativeFormats |= RTAUDIO_SINT32;
4138 }
4139 }
4140
4141 // probed
4142 info.probed = true;
4143
4144Exit:
4145 // release all references
4146 PropVariantClear( &deviceNameProp );
4147 PropVariantClear( &defaultDeviceNameProp );
4148
4149 SAFE_RELEASE( captureDevices );
4150 SAFE_RELEASE( renderDevices );
4151 SAFE_RELEASE( devicePtr );
4152 SAFE_RELEASE( defaultDevicePtr );
4153 SAFE_RELEASE( audioClient );
4154 SAFE_RELEASE( devicePropStore );
4155 SAFE_RELEASE( defaultDevicePropStore );
4156
4157 CoTaskMemFree( deviceFormat );
4158 CoTaskMemFree( closestMatchFormat );
4159
4160 if ( !errorText_.empty() )
4161 error( errorType );
4162 return info;
4163}
4164
4165//-----------------------------------------------------------------------------
4166
4167unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4168{
4169 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4170 if ( getDeviceInfo( i ).isDefaultOutput ) {
4171 return i;
4172 }
4173 }
4174
4175 return 0;
4176}
4177
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: