Merge lp:~marcustomlinson/dspatchables/add_basic_components into lp:dspatchables
- add_basic_components
- Merge into trunk
Proposed by
Marcus Tomlinson
Status: | Merged |
---|---|
Merged at revision: | 2 |
Proposed branch: | lp:~marcustomlinson/dspatchables/add_basic_components |
Merge into: | lp:dspatchables |
Diff against target: |
16367 lines (+15835/-331) 35 files modified
CMakeLists.txt (+7/-0) DspAdder/CMakeLists.txt (+17/-0) DspAdder/DspAdder.cpp (+76/-0) DspAdder/DspAdder.h (+80/-0) DspAudioDevice/CMakeLists.txt (+61/-0) DspAudioDevice/DspAudioDevice.cpp (+427/-0) DspAudioDevice/DspAudioDevice.h (+109/-0) DspAudioDevice/rtaudio/RtAudio.cpp (+10136/-0) DspAudioDevice/rtaudio/RtAudio.h (+1162/-0) DspGain/CMakeLists.txt (+17/-0) DspGain/DspGain.cpp (+86/-0) DspGain/DspGain.h (+65/-0) DspOscillator/CMakeLists.txt (+1/-3) DspOscillator/DspOscillator.cpp (+222/-223) DspOscillator/DspOscillator.h (+105/-105) DspWaveStreamer/CMakeLists.txt (+17/-0) DspWaveStreamer/DspWaveStreamer.cpp (+279/-0) DspWaveStreamer/DspWaveStreamer.h (+103/-0) include/DSPatch.h (+632/-0) include/dspatch/DspCircuit.h (+321/-0) include/dspatch/DspCircuitThread.h (+89/-0) include/dspatch/DspComponent.h (+282/-0) include/dspatch/DspComponentThread.h (+72/-0) include/dspatch/DspParameter.h (+111/-0) include/dspatch/DspPlugin.h (+95/-0) include/dspatch/DspPluginLoader.h (+75/-0) include/dspatch/DspRunType.h (+204/-0) include/dspatch/DspSignal.h (+130/-0) include/dspatch/DspSignalBus.h (+192/-0) include/dspatch/DspThread.h (+43/-0) include/dspatch/DspThreadNull.h (+135/-0) include/dspatch/DspThreadUnix.h (+177/-0) include/dspatch/DspThreadWin.h (+178/-0) include/dspatch/DspWire.h (+58/-0) include/dspatch/DspWireBus.h (+71/-0) |
To merge this branch: | bzr merge lp:~marcustomlinson/dspatchables/add_basic_components |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Marcus Tomlinson | Pending | ||
Review via email: mp+255246@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'CMakeLists.txt' | |||
2 | --- CMakeLists.txt 2014-12-23 19:18:01 +0000 | |||
3 | +++ CMakeLists.txt 2015-04-05 22:23:36 +0000 | |||
4 | @@ -4,4 +4,11 @@ | |||
5 | 4 | 4 | ||
6 | 5 | project(DSPatchables) | 5 | project(DSPatchables) |
7 | 6 | 6 | ||
8 | 7 | include_directories(${CMAKE_SOURCE_DIR}/include) | ||
9 | 8 | link_directories(${CMAKE_SOURCE_DIR}/link) | ||
10 | 9 | |||
11 | 10 | add_subdirectory(DspAdder) | ||
12 | 11 | add_subdirectory(DspAudioDevice) | ||
13 | 12 | add_subdirectory(DspGain) | ||
14 | 7 | add_subdirectory(DspOscillator) | 13 | add_subdirectory(DspOscillator) |
15 | 14 | add_subdirectory(DspWaveStreamer) | ||
16 | 8 | 15 | ||
17 | === added directory 'DspAdder' | |||
18 | === added file 'DspAdder/CMakeLists.txt' | |||
19 | --- DspAdder/CMakeLists.txt 1970-01-01 00:00:00 +0000 | |||
20 | +++ DspAdder/CMakeLists.txt 2015-04-05 22:23:36 +0000 | |||
21 | @@ -0,0 +1,17 @@ | |||
22 | 1 | project(DspAdder) | ||
23 | 2 | |||
24 | 3 | file(GLOB srcs *.cpp) | ||
25 | 4 | file(GLOB hdrs *.h) | ||
26 | 5 | |||
27 | 6 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}) | ||
28 | 7 | |||
29 | 8 | add_library( | ||
30 | 9 | ${PROJECT_NAME} SHARED | ||
31 | 10 | ${srcs} | ||
32 | 11 | ${hdrs} | ||
33 | 12 | ) | ||
34 | 13 | |||
35 | 14 | target_link_libraries( | ||
36 | 15 | ${PROJECT_NAME} | ||
37 | 16 | DSPatch | ||
38 | 17 | ) | ||
39 | 0 | 18 | ||
40 | === added file 'DspAdder/DspAdder.cpp' | |||
41 | --- DspAdder/DspAdder.cpp 1970-01-01 00:00:00 +0000 | |||
42 | +++ DspAdder/DspAdder.cpp 2015-04-05 22:23:36 +0000 | |||
43 | @@ -0,0 +1,76 @@ | |||
44 | 1 | /************************************************************************ | ||
45 | 2 | DSPatch - Cross-Platform, Object-Oriented, Flow-Based Programming Library | ||
46 | 3 | Copyright (c) 2012-2015 Marcus Tomlinson | ||
47 | 4 | |||
48 | 5 | This file is part of DSPatch. | ||
49 | 6 | |||
50 | 7 | GNU Lesser General Public License Usage | ||
51 | 8 | This file may be used under the terms of the GNU Lesser General Public | ||
52 | 9 | License version 3.0 as published by the Free Software Foundation and | ||
53 | 10 | appearing in the file LGPLv3.txt included in the packaging of this | ||
54 | 11 | file. Please review the following information to ensure the GNU Lesser | ||
55 | 12 | General Public License version 3.0 requirements will be met: | ||
56 | 13 | http://www.gnu.org/copyleft/lgpl.html. | ||
57 | 14 | |||
58 | 15 | Other Usage | ||
59 | 16 | Alternatively, this file may be used in accordance with the terms and | ||
60 | 17 | conditions contained in a signed written agreement between you and | ||
61 | 18 | Marcus Tomlinson. | ||
62 | 19 | |||
63 | 20 | DSPatch is distributed in the hope that it will be useful, | ||
64 | 21 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
65 | 22 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
66 | 23 | ************************************************************************/ | ||
67 | 24 | |||
68 | 25 | #include <DspAdder.h> | ||
69 | 26 | |||
70 | 27 | //================================================================================================= | ||
71 | 28 | |||
72 | 29 | DspAdder::DspAdder() | ||
73 | 30 | { | ||
74 | 31 | // add 2 inputs | ||
75 | 32 | AddInput_("Input1"); | ||
76 | 33 | AddInput_("Input2"); | ||
77 | 34 | |||
78 | 35 | // add 1 output | ||
79 | 36 | AddOutput_("Output1"); | ||
80 | 37 | } | ||
81 | 38 | |||
82 | 39 | //------------------------------------------------------------------------------------------------- | ||
83 | 40 | |||
84 | 41 | DspAdder::~DspAdder() | ||
85 | 42 | { | ||
86 | 43 | } | ||
87 | 44 | |||
88 | 45 | //================================================================================================= | ||
89 | 46 | |||
90 | 47 | void DspAdder::Process_(DspSignalBus& inputs, DspSignalBus& outputs) | ||
91 | 48 | { | ||
92 | 49 | // get input values from inputs bus (GetValue() returns true if successful) | ||
93 | 50 | if (!inputs.GetValue(0, _stream1)) | ||
94 | 51 | { | ||
95 | 52 | _stream1.assign(_stream1.size(), 0); // clear buffer if no input received | ||
96 | 53 | } | ||
97 | 54 | // do the same to the 2nd input buffer | ||
98 | 55 | if (!inputs.GetValue(1, _stream2)) | ||
99 | 56 | { | ||
100 | 57 | _stream2.assign(_stream2.size(), 0); | ||
101 | 58 | } | ||
102 | 59 | |||
103 | 60 | // ensure that the 2 input buffer sizes match | ||
104 | 61 | if (_stream1.size() == _stream2.size()) | ||
105 | 62 | { | ||
106 | 63 | for (size_t i = 0; i < _stream1.size(); i++) | ||
107 | 64 | { | ||
108 | 65 | _stream1[i] += _stream2[i]; // perform addition element-by-element | ||
109 | 66 | } | ||
110 | 67 | outputs.SetValue(0, _stream1); // set output 1 | ||
111 | 68 | } | ||
112 | 69 | // if input sizes don't match | ||
113 | 70 | else | ||
114 | 71 | { | ||
115 | 72 | outputs.ClearValue(0); // clear the output | ||
116 | 73 | } | ||
117 | 74 | } | ||
118 | 75 | |||
119 | 76 | //================================================================================================= | ||
120 | 0 | 77 | ||
121 | === added file 'DspAdder/DspAdder.h' | |||
122 | --- DspAdder/DspAdder.h 1970-01-01 00:00:00 +0000 | |||
123 | +++ DspAdder/DspAdder.h 2015-04-05 22:23:36 +0000 | |||
124 | @@ -0,0 +1,80 @@ | |||
125 | 1 | /************************************************************************ | ||
126 | 2 | DSPatch - Cross-Platform, Object-Oriented, Flow-Based Programming Library | ||
127 | 3 | Copyright (c) 2012-2015 Marcus Tomlinson | ||
128 | 4 | |||
129 | 5 | This file is part of DSPatch. | ||
130 | 6 | |||
131 | 7 | GNU Lesser General Public License Usage | ||
132 | 8 | This file may be used under the terms of the GNU Lesser General Public | ||
133 | 9 | License version 3.0 as published by the Free Software Foundation and | ||
134 | 10 | appearing in the file LGPLv3.txt included in the packaging of this | ||
135 | 11 | file. Please review the following information to ensure the GNU Lesser | ||
136 | 12 | General Public License version 3.0 requirements will be met: | ||
137 | 13 | http://www.gnu.org/copyleft/lgpl.html. | ||
138 | 14 | |||
139 | 15 | Other Usage | ||
140 | 16 | Alternatively, this file may be used in accordance with the terms and | ||
141 | 17 | conditions contained in a signed written agreement between you and | ||
142 | 18 | Marcus Tomlinson. | ||
143 | 19 | |||
144 | 20 | DSPatch is distributed in the hope that it will be useful, | ||
145 | 21 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
146 | 22 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
147 | 23 | ************************************************************************/ | ||
148 | 24 | |||
149 | 25 | #ifndef DSPADDER_H | ||
150 | 26 | #define DSPADDER_H | ||
151 | 27 | |||
152 | 28 | #include <DSPatch.h> | ||
153 | 29 | |||
154 | 30 | //================================================================================================= | ||
155 | 31 | /// Example DspComponent: Adder | ||
156 | 32 | |||
157 | 33 | /** This component has 2 inputs and 1 output. The component receives 2 floating-point buffers into | ||
158 | 34 | it's 2 inputs, adds each buffer element of the 1st buffer to the corresponding element of the 2nd | ||
159 | 35 | buffer, then stores the resultant buffer into a 3rd buffer. This resultant buffer is then passed to | ||
160 | 36 | output 1 of the component output bus. */ | ||
161 | 37 | |||
162 | 38 | class DspAdder : public DspComponent | ||
163 | 39 | { | ||
164 | 40 | public: | ||
165 | 41 | //! Component constructor | ||
166 | 42 | /*! When a component is constructed, it's input and output buses must be configured. This is | ||
167 | 43 | achieved by making calls to the base class protected methods: "AddInput_()" and "AddOutput_(). | ||
168 | 44 | These methods must be called once per input / output required. IO signal names are optional | ||
169 | 45 | (Component IO can be referenced by either string ID or index) and can be assigned to each | ||
170 | 46 | input / output by supplying the desired string ID as an argument to the respective AddInput_() | ||
171 | 47 | / AddOutput_() method call.*/ | ||
172 | 48 | |||
173 | 49 | DspAdder(); | ||
174 | 50 | ~DspAdder(); | ||
175 | 51 | |||
176 | 52 | protected: | ||
177 | 53 | //! Virtual process method inherited from DspComponent | ||
178 | 54 | /*! The Process_() method is called from the DSPatch engine when a new set of component input | ||
179 | 55 | signals are ready for processing. The Process() method has 2 arguments: the input bus and the | ||
180 | 56 | output bus. This method's purpose is to pull its required inputs out of the input bus, process | ||
181 | 57 | these inputs, and populate the output bus with the results (see DspSignalBus). */ | ||
182 | 58 | |||
183 | 59 | virtual void Process_(DspSignalBus& inputs, DspSignalBus& outputs); | ||
184 | 60 | |||
185 | 61 | private: | ||
186 | 62 | std::vector<float> _stream1; | ||
187 | 63 | std::vector<float> _stream2; | ||
188 | 64 | }; | ||
189 | 65 | |||
190 | 66 | //================================================================================================= | ||
191 | 67 | |||
192 | 68 | class DspAdderPlugin : public DspPlugin | ||
193 | 69 | { | ||
194 | 70 | DspComponent* Create(std::map<std::string, DspParameter>&) const | ||
195 | 71 | { | ||
196 | 72 | return new DspAdder(); | ||
197 | 73 | } | ||
198 | 74 | }; | ||
199 | 75 | |||
200 | 76 | EXPORT_DSPPLUGIN(DspAdderPlugin) | ||
201 | 77 | |||
202 | 78 | //================================================================================================= | ||
203 | 79 | |||
204 | 80 | #endif // DSPADDER_H | ||
205 | 0 | 81 | ||
206 | === added directory 'DspAudioDevice' | |||
207 | === added file 'DspAudioDevice/CMakeLists.txt' | |||
208 | --- DspAudioDevice/CMakeLists.txt 1970-01-01 00:00:00 +0000 | |||
209 | +++ DspAudioDevice/CMakeLists.txt 2015-04-05 22:23:36 +0000 | |||
210 | @@ -0,0 +1,61 @@ | |||
211 | 1 | project(DspAudioDevice) | ||
212 | 2 | |||
213 | 3 | file(GLOB srcs *.cpp rtaudio/*.cpp) | ||
214 | 4 | file(GLOB hdrs *.h rtaudio/*.h) | ||
215 | 5 | |||
216 | 6 | include_directories( | ||
217 | 7 | ${CMAKE_CURRENT_SOURCE_DIR} | ||
218 | 8 | ${CMAKE_CURRENT_SOURCE_DIR}/rtaudio | ||
219 | 9 | ) | ||
220 | 10 | |||
221 | 11 | add_library( | ||
222 | 12 | ${PROJECT_NAME} SHARED | ||
223 | 13 | ${srcs} | ||
224 | 14 | ${hdrs} | ||
225 | 15 | ) | ||
226 | 16 | |||
227 | 17 | target_link_libraries( | ||
228 | 18 | ${PROJECT_NAME} | ||
229 | 19 | DSPatch | ||
230 | 20 | ) | ||
231 | 21 | |||
232 | 22 | # Definition for RtAudio Windows, using direct sound | ||
233 | 23 | if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") | ||
234 | 24 | add_definitions(-DEXAMPLE_PLUGIN_FILE="${CMAKE_CURRENT_BINARY_DIR}/oscillator-plugin/$<CONFIGURATION>/DspOscillator.dll") | ||
235 | 25 | add_definitions(-D__WINDOWS_WASAPI__) | ||
236 | 26 | |||
237 | 27 | add_custom_command( | ||
238 | 28 | TARGET ${PROJECT_NAME} POST_BUILD | ||
239 | 29 | COMMAND ${CMAKE_COMMAND} -E copy_if_different | ||
240 | 30 | ${CMAKE_BINARY_DIR}/$<CONFIGURATION>/DSPatch.dll | ||
241 | 31 | ${CMAKE_CURRENT_BINARY_DIR}/$<CONFIGURATION> | ||
242 | 32 | ) | ||
243 | 33 | endif(${CMAKE_SYSTEM_NAME} MATCHES "Windows") | ||
244 | 34 | |||
245 | 35 | # Definition for RtAudio Linux, using ALSA | ||
246 | 36 | if(${CMAKE_SYSTEM_NAME} MATCHES "Linux") | ||
247 | 37 | add_definitions(-DEXAMPLE_PLUGIN_FILE="${CMAKE_CURRENT_BINARY_DIR}/oscillator-plugin/libDspOscillator.so") | ||
248 | 38 | add_definitions(-D__LINUX_ALSA__) | ||
249 | 39 | |||
250 | 40 | find_library(ASOUND asound) | ||
251 | 41 | if(NOT ASOUND) | ||
252 | 42 | message(FATAL_ERROR "ALSA not found (Ensure that libasound2-dev is installed)") | ||
253 | 43 | endif() | ||
254 | 44 | |||
255 | 45 | target_link_libraries( | ||
256 | 46 | ${PROJECT_NAME} | ||
257 | 47 | ${ASOUND} | ||
258 | 48 | ) | ||
259 | 49 | endif(${CMAKE_SYSTEM_NAME} MATCHES "Linux") | ||
260 | 50 | |||
261 | 51 | # Definition for RtAudio Mac OSX, using Core Audio | ||
262 | 52 | if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") | ||
263 | 53 | add_definitions(-DEXAMPLE_PLUGIN_FILE="${CMAKE_CURRENT_BINARY_DIR}/oscillator-plugin/$<CONFIGURATION>/libDspOscillator.dylib") | ||
264 | 54 | add_definitions(-D__MACOSX_CORE__) | ||
265 | 55 | |||
266 | 56 | target_link_libraries( | ||
267 | 57 | ${PROJECT_NAME} | ||
268 | 58 | "-framework CoreAudio" | ||
269 | 59 | "-framework CoreFoundation" | ||
270 | 60 | ) | ||
271 | 61 | endif(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") | ||
272 | 0 | 62 | ||
273 | === added file 'DspAudioDevice/DspAudioDevice.cpp' | |||
274 | --- DspAudioDevice/DspAudioDevice.cpp 1970-01-01 00:00:00 +0000 | |||
275 | +++ DspAudioDevice/DspAudioDevice.cpp 2015-04-05 22:23:36 +0000 | |||
276 | @@ -0,0 +1,427 @@ | |||
277 | 1 | /************************************************************************ | ||
278 | 2 | DSPatch - Cross-Platform, Object-Oriented, Flow-Based Programming Library | ||
279 | 3 | Copyright (c) 2012-2015 Marcus Tomlinson | ||
280 | 4 | |||
281 | 5 | This file is part of DSPatch. | ||
282 | 6 | |||
283 | 7 | GNU Lesser General Public License Usage | ||
284 | 8 | This file may be used under the terms of the GNU Lesser General Public | ||
285 | 9 | License version 3.0 as published by the Free Software Foundation and | ||
286 | 10 | appearing in the file LGPLv3.txt included in the packaging of this | ||
287 | 11 | file. Please review the following information to ensure the GNU Lesser | ||
288 | 12 | General Public License version 3.0 requirements will be met: | ||
289 | 13 | http://www.gnu.org/copyleft/lgpl.html. | ||
290 | 14 | |||
291 | 15 | Other Usage | ||
292 | 16 | Alternatively, this file may be used in accordance with the terms and | ||
293 | 17 | conditions contained in a signed written agreement between you and | ||
294 | 18 | Marcus Tomlinson. | ||
295 | 19 | |||
296 | 20 | DSPatch is distributed in the hope that it will be useful, | ||
297 | 21 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
298 | 22 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
299 | 23 | ************************************************************************/ | ||
300 | 24 | |||
301 | 25 | #include <DspAudioDevice.h> | ||
302 | 26 | |||
303 | 27 | #include <RtAudio.h> | ||
304 | 28 | |||
305 | 29 | #include <iostream> | ||
306 | 30 | #include <string.h> | ||
307 | 31 | #include <cstdlib> | ||
308 | 32 | |||
309 | 33 | //================================================================================================= | ||
310 | 34 | |||
311 | 35 | struct RtAudioMembers | ||
312 | 36 | { | ||
313 | 37 | std::vector<RtAudio::DeviceInfo> deviceList; | ||
314 | 38 | |||
315 | 39 | RtAudio audioStream; | ||
316 | 40 | RtAudio::StreamParameters outputParams; | ||
317 | 41 | RtAudio::StreamParameters inputParams; | ||
318 | 42 | }; | ||
319 | 43 | |||
320 | 44 | //================================================================================================= | ||
321 | 45 | |||
322 | 46 | DspAudioDevice::DspAudioDevice() | ||
323 | 47 | : _rtAudio(new RtAudioMembers()) | ||
324 | 48 | , _gotWaitReady(false) | ||
325 | 49 | , _gotSyncReady(true) | ||
326 | 50 | { | ||
327 | 51 | _outputChannels.resize(8); | ||
328 | 52 | for (int i = 0; i < 8; i++) | ||
329 | 53 | { | ||
330 | 54 | AddInput_(); | ||
331 | 55 | } | ||
332 | 56 | |||
333 | 57 | AddInput_("Sample Rate"); | ||
334 | 58 | |||
335 | 59 | _inputChannels.resize(8); | ||
336 | 60 | for (int i = 0; i < 8; i++) | ||
337 | 61 | { | ||
338 | 62 | AddOutput_(); | ||
339 | 63 | } | ||
340 | 64 | |||
341 | 65 | std::vector<std::string> deviceNameList; | ||
342 | 66 | |||
343 | 67 | for (unsigned int i = 0; i < _rtAudio->audioStream.getDeviceCount(); i++) | ||
344 | 68 | { | ||
345 | 69 | _rtAudio->deviceList.push_back(_rtAudio->audioStream.getDeviceInfo(i)); | ||
346 | 70 | deviceNameList.push_back(_rtAudio->audioStream.getDeviceInfo(i).name); | ||
347 | 71 | } | ||
348 | 72 | |||
349 | 73 | pDeviceList = AddParameter_("deviceList", DspParameter(DspParameter::List, deviceNameList)); | ||
350 | 74 | pIsStreaming = AddParameter_("isStreaming", DspParameter(DspParameter::Bool, false)); | ||
351 | 75 | pBufferSize = AddParameter_("bufferSize", DspParameter(DspParameter::Int, 256)); | ||
352 | 76 | pSampleRate = AddParameter_("sampleRate", DspParameter(DspParameter::Int, 44100)); | ||
353 | 77 | |||
354 | 78 | SetDevice(_rtAudio->audioStream.getDefaultOutputDevice()); | ||
355 | 79 | SetBufferSize(GetBufferSize()); | ||
356 | 80 | SetSampleRate(GetSampleRate()); | ||
357 | 81 | } | ||
358 | 82 | |||
359 | 83 | //------------------------------------------------------------------------------------------------- | ||
360 | 84 | |||
361 | 85 | DspAudioDevice::~DspAudioDevice() | ||
362 | 86 | { | ||
363 | 87 | _StopStream(); | ||
364 | 88 | |||
365 | 89 | delete _rtAudio; | ||
366 | 90 | } | ||
367 | 91 | |||
368 | 92 | //------------------------------------------------------------------------------------------------- | ||
369 | 93 | |||
370 | 94 | bool DspAudioDevice::SetDevice(int deviceIndex) | ||
371 | 95 | { | ||
372 | 96 | if (deviceIndex >= 0 && deviceIndex < GetDeviceCount()) | ||
373 | 97 | { | ||
374 | 98 | _StopStream(); | ||
375 | 99 | |||
376 | 100 | SetParameter_(pDeviceList, DspParameter(DspParameter::Int, deviceIndex)); | ||
377 | 101 | |||
378 | 102 | _rtAudio->inputParams.nChannels = _rtAudio->deviceList[deviceIndex].inputChannels; | ||
379 | 103 | _rtAudio->inputParams.deviceId = deviceIndex; | ||
380 | 104 | |||
381 | 105 | _rtAudio->outputParams.nChannels = _rtAudio->deviceList[deviceIndex].outputChannels; | ||
382 | 106 | _rtAudio->outputParams.deviceId = deviceIndex; | ||
383 | 107 | |||
384 | 108 | _StartStream(); | ||
385 | 109 | |||
386 | 110 | return true; | ||
387 | 111 | } | ||
388 | 112 | |||
389 | 113 | return false; | ||
390 | 114 | } | ||
391 | 115 | |||
392 | 116 | //------------------------------------------------------------------------------------------------- | ||
393 | 117 | |||
394 | 118 | std::string DspAudioDevice::GetDeviceName(int deviceIndex) const | ||
395 | 119 | { | ||
396 | 120 | if (deviceIndex >= 0 && deviceIndex < GetDeviceCount()) | ||
397 | 121 | { | ||
398 | 122 | return _rtAudio->deviceList[deviceIndex].name; | ||
399 | 123 | } | ||
400 | 124 | |||
401 | 125 | return ""; | ||
402 | 126 | } | ||
403 | 127 | |||
404 | 128 | //------------------------------------------------------------------------------------------------- | ||
405 | 129 | |||
406 | 130 | int DspAudioDevice::GetDeviceInputCount(int deviceIndex) const | ||
407 | 131 | { | ||
408 | 132 | return _rtAudio->deviceList[deviceIndex].inputChannels; | ||
409 | 133 | } | ||
410 | 134 | |||
411 | 135 | //------------------------------------------------------------------------------------------------- | ||
412 | 136 | |||
413 | 137 | int DspAudioDevice::GetDeviceOutputCount(int deviceIndex) const | ||
414 | 138 | { | ||
415 | 139 | return _rtAudio->deviceList[deviceIndex].outputChannels; | ||
416 | 140 | } | ||
417 | 141 | |||
418 | 142 | //------------------------------------------------------------------------------------------------- | ||
419 | 143 | |||
420 | 144 | int DspAudioDevice::GetCurrentDevice() const | ||
421 | 145 | { | ||
422 | 146 | return *GetParameter_(pDeviceList)->GetInt(); | ||
423 | 147 | } | ||
424 | 148 | |||
425 | 149 | //------------------------------------------------------------------------------------------------- | ||
426 | 150 | |||
427 | 151 | int DspAudioDevice::GetDeviceCount() const | ||
428 | 152 | { | ||
429 | 153 | return GetParameter_(pDeviceList)->GetList()->size(); | ||
430 | 154 | } | ||
431 | 155 | |||
432 | 156 | //------------------------------------------------------------------------------------------------- | ||
433 | 157 | |||
434 | 158 | void DspAudioDevice::SetBufferSize(int bufferSize) | ||
435 | 159 | { | ||
436 | 160 | _StopStream(); | ||
437 | 161 | |||
438 | 162 | SetParameter_(pBufferSize, DspParameter(DspParameter::Int, bufferSize)); | ||
439 | 163 | for (size_t i = 0; i < _inputChannels.size(); i++) | ||
440 | 164 | { | ||
441 | 165 | _inputChannels[i].resize(bufferSize); | ||
442 | 166 | } | ||
443 | 167 | |||
444 | 168 | _StartStream(); | ||
445 | 169 | } | ||
446 | 170 | |||
447 | 171 | //------------------------------------------------------------------------------------------------- | ||
448 | 172 | |||
449 | 173 | void DspAudioDevice::SetSampleRate(int sampleRate) | ||
450 | 174 | { | ||
451 | 175 | _StopStream(); | ||
452 | 176 | SetParameter_(pSampleRate, DspParameter(DspParameter::Int, sampleRate)); | ||
453 | 177 | _StartStream(); | ||
454 | 178 | } | ||
455 | 179 | |||
456 | 180 | //------------------------------------------------------------------------------------------------- | ||
457 | 181 | |||
458 | 182 | bool DspAudioDevice::IsStreaming() const | ||
459 | 183 | { | ||
460 | 184 | return *GetParameter_(pIsStreaming)->GetBool(); | ||
461 | 185 | } | ||
462 | 186 | |||
463 | 187 | //------------------------------------------------------------------------------------------------- | ||
464 | 188 | |||
465 | 189 | int DspAudioDevice::GetBufferSize() const | ||
466 | 190 | { | ||
467 | 191 | return *GetParameter_(pBufferSize)->GetInt(); | ||
468 | 192 | } | ||
469 | 193 | |||
470 | 194 | //------------------------------------------------------------------------------------------------- | ||
471 | 195 | |||
472 | 196 | int DspAudioDevice::GetSampleRate() const | ||
473 | 197 | { | ||
474 | 198 | return *GetParameter_(pSampleRate)->GetInt(); | ||
475 | 199 | } | ||
476 | 200 | |||
477 | 201 | //================================================================================================= | ||
478 | 202 | |||
479 | 203 | void DspAudioDevice::Process_(DspSignalBus& inputs, DspSignalBus& outputs) | ||
480 | 204 | { | ||
481 | 205 | // Wait until the sound card is ready for the next set of buffers | ||
482 | 206 | // ============================================================== | ||
483 | 207 | _syncMutex.Lock(); | ||
484 | 208 | if (!_gotSyncReady) // if haven't already got the release | ||
485 | 209 | { | ||
486 | 210 | _syncCondt.Wait(_syncMutex); // wait for sync | ||
487 | 211 | } | ||
488 | 212 | _gotSyncReady = false; // reset the release flag | ||
489 | 213 | _syncMutex.Unlock(); | ||
490 | 214 | |||
491 | 215 | // Synchronise sample rate with the "Sample Rate" input feed | ||
492 | 216 | // ========================================================= | ||
493 | 217 | int sampleRate; | ||
494 | 218 | if (inputs.GetValue("Sample Rate", sampleRate)) | ||
495 | 219 | { | ||
496 | 220 | if (sampleRate != GetSampleRate()) | ||
497 | 221 | { | ||
498 | 222 | SetSampleRate(sampleRate); | ||
499 | 223 | } | ||
500 | 224 | } | ||
501 | 225 | |||
502 | 226 | // Synchronise buffer size with the size of incoming buffers | ||
503 | 227 | // ========================================================= | ||
504 | 228 | if (inputs.GetValue(0, _outputChannels[0])) | ||
505 | 229 | { | ||
506 | 230 | if (GetBufferSize() != (int)_outputChannels[0].size() && _outputChannels[0].size() != 0) | ||
507 | 231 | { | ||
508 | 232 | SetBufferSize(_outputChannels[0].size()); | ||
509 | 233 | } | ||
510 | 234 | } | ||
511 | 235 | |||
512 | 236 | // Retrieve incoming component buffers for the sound card to output | ||
513 | 237 | // ================================================================ | ||
514 | 238 | for (size_t i = 0; i < _outputChannels.size(); i++) | ||
515 | 239 | { | ||
516 | 240 | if (!inputs.GetValue(i, _outputChannels[i])) | ||
517 | 241 | { | ||
518 | 242 | _outputChannels[i].assign(_outputChannels[i].size(), 0); | ||
519 | 243 | } | ||
520 | 244 | } | ||
521 | 245 | |||
522 | 246 | // Retrieve incoming sound card buffers for the component to output | ||
523 | 247 | // ================================================================ | ||
524 | 248 | for (size_t i = 0; i < _inputChannels.size(); i++) | ||
525 | 249 | { | ||
526 | 250 | outputs.SetValue(i, _inputChannels[i]); | ||
527 | 251 | } | ||
528 | 252 | |||
529 | 253 | // Inform the sound card that buffers are now ready | ||
530 | 254 | // ================================================ | ||
531 | 255 | _buffersMutex.Lock(); | ||
532 | 256 | _gotWaitReady = true; // set release flag | ||
533 | 257 | _waitCondt.WakeAll(); // release sync | ||
534 | 258 | _buffersMutex.Unlock(); | ||
535 | 259 | } | ||
536 | 260 | |||
537 | 261 | //------------------------------------------------------------------------------------------------- | ||
538 | 262 | |||
539 | 263 | bool DspAudioDevice::ParameterUpdating_(int index, DspParameter const& param) | ||
540 | 264 | { | ||
541 | 265 | if (index == pDeviceList) | ||
542 | 266 | { | ||
543 | 267 | return SetDevice(*param.GetInt()); | ||
544 | 268 | } | ||
545 | 269 | else if (index == pBufferSize) | ||
546 | 270 | { | ||
547 | 271 | SetBufferSize(*param.GetInt()); | ||
548 | 272 | return true; | ||
549 | 273 | } | ||
550 | 274 | else if (index == pSampleRate) | ||
551 | 275 | { | ||
552 | 276 | SetSampleRate(*param.GetInt()); | ||
553 | 277 | return true; | ||
554 | 278 | } | ||
555 | 279 | |||
556 | 280 | return false; | ||
557 | 281 | } | ||
558 | 282 | |||
559 | 283 | //================================================================================================= | ||
560 | 284 | |||
561 | 285 | void DspAudioDevice::_SetIsStreaming(bool isStreaming) | ||
562 | 286 | { | ||
563 | 287 | SetParameter_(pIsStreaming, DspParameter(DspParameter::Bool, isStreaming)); | ||
564 | 288 | } | ||
565 | 289 | |||
566 | 290 | //------------------------------------------------------------------------------------------------- | ||
567 | 291 | |||
568 | 292 | void DspAudioDevice::_WaitForBuffer() | ||
569 | 293 | { | ||
570 | 294 | _buffersMutex.Lock(); | ||
571 | 295 | if (!_gotWaitReady) // if haven't already got the release | ||
572 | 296 | { | ||
573 | 297 | _waitCondt.Wait(_buffersMutex); // wait for sync | ||
574 | 298 | } | ||
575 | 299 | _gotWaitReady = false; // reset the release flag | ||
576 | 300 | _buffersMutex.Unlock(); | ||
577 | 301 | } | ||
578 | 302 | |||
579 | 303 | //------------------------------------------------------------------------------------------------- | ||
580 | 304 | |||
581 | 305 | void DspAudioDevice::_SyncBuffer() | ||
582 | 306 | { | ||
583 | 307 | _syncMutex.Lock(); | ||
584 | 308 | _gotSyncReady = true; // set release flag | ||
585 | 309 | _syncCondt.WakeAll(); // release sync | ||
586 | 310 | _syncMutex.Unlock(); | ||
587 | 311 | } | ||
588 | 312 | |||
589 | 313 | //------------------------------------------------------------------------------------------------- | ||
590 | 314 | |||
591 | 315 | void DspAudioDevice::_StopStream() | ||
592 | 316 | { | ||
593 | 317 | _SetIsStreaming(false); | ||
594 | 318 | |||
595 | 319 | _buffersMutex.Lock(); | ||
596 | 320 | _gotWaitReady = true; // set release flag | ||
597 | 321 | _waitCondt.WakeAll(); // release sync | ||
598 | 322 | _buffersMutex.Unlock(); | ||
599 | 323 | |||
600 | 324 | if (_rtAudio->audioStream.isStreamOpen()) | ||
601 | 325 | { | ||
602 | 326 | _rtAudio->audioStream.closeStream(); | ||
603 | 327 | } | ||
604 | 328 | } | ||
605 | 329 | |||
606 | 330 | //------------------------------------------------------------------------------------------------- | ||
607 | 331 | |||
608 | 332 | void DspAudioDevice::_StartStream() | ||
609 | 333 | { | ||
610 | 334 | RtAudio::StreamParameters* inputParams = NULL; | ||
611 | 335 | RtAudio::StreamParameters* outputParams = NULL; | ||
612 | 336 | |||
613 | 337 | if (_rtAudio->inputParams.nChannels != 0) | ||
614 | 338 | { | ||
615 | 339 | inputParams = &_rtAudio->inputParams; | ||
616 | 340 | } | ||
617 | 341 | |||
618 | 342 | if (_rtAudio->outputParams.nChannels != 0) | ||
619 | 343 | { | ||
620 | 344 | outputParams = &_rtAudio->outputParams; | ||
621 | 345 | } | ||
622 | 346 | |||
623 | 347 | RtAudio::StreamOptions options; | ||
624 | 348 | options.flags |= RTAUDIO_SCHEDULE_REALTIME; | ||
625 | 349 | options.flags |= RTAUDIO_NONINTERLEAVED; | ||
626 | 350 | |||
627 | 351 | _rtAudio->audioStream.openStream(outputParams, | ||
628 | 352 | inputParams, | ||
629 | 353 | RTAUDIO_FLOAT32, | ||
630 | 354 | GetSampleRate(), | ||
631 | 355 | (unsigned int*)const_cast<int*>(GetParameter_(pBufferSize)->GetInt()), | ||
632 | 356 | &_StaticCallback, | ||
633 | 357 | this, | ||
634 | 358 | &options); | ||
635 | 359 | |||
636 | 360 | _rtAudio->audioStream.startStream(); | ||
637 | 361 | |||
638 | 362 | while (!_rtAudio->audioStream.isStreamOpen()) | ||
639 | 363 | { | ||
640 | 364 | DspThread::MsSleep(10); | ||
641 | 365 | } | ||
642 | 366 | |||
643 | 367 | _SetIsStreaming(true); | ||
644 | 368 | } | ||
645 | 369 | |||
646 | 370 | //------------------------------------------------------------------------------------------------- | ||
647 | 371 | |||
648 | 372 | int DspAudioDevice::_StaticCallback( | ||
649 | 373 | void* outputBuffer, void* inputBuffer, unsigned int, double, unsigned int, void* userData) | ||
650 | 374 | { | ||
651 | 375 | return (reinterpret_cast<DspAudioDevice*>(userData))->_DynamicCallback(inputBuffer, outputBuffer); | ||
652 | 376 | } | ||
653 | 377 | |||
654 | 378 | //------------------------------------------------------------------------------------------------- | ||
655 | 379 | |||
656 | 380 | int DspAudioDevice::_DynamicCallback(void* inputBuffer, void* outputBuffer) | ||
657 | 381 | { | ||
658 | 382 | _WaitForBuffer(); | ||
659 | 383 | |||
660 | 384 | if (IsStreaming()) | ||
661 | 385 | { | ||
662 | 386 | float* floatOutput = (float*)outputBuffer; | ||
663 | 387 | float* floatInput = (float*)inputBuffer; | ||
664 | 388 | |||
665 | 389 | if (outputBuffer != NULL) | ||
666 | 390 | { | ||
667 | 391 | for (size_t i = 0; i < _outputChannels.size(); i++) | ||
668 | 392 | { | ||
669 | 393 | if (_rtAudio->deviceList[GetCurrentDevice()].outputChannels >= (i + 1)) | ||
670 | 394 | { | ||
671 | 395 | for (size_t j = 0; j < _outputChannels[i].size(); j++) | ||
672 | 396 | { | ||
673 | 397 | *floatOutput++ = _outputChannels[i][j]; | ||
674 | 398 | } | ||
675 | 399 | } | ||
676 | 400 | } | ||
677 | 401 | } | ||
678 | 402 | |||
679 | 403 | if (inputBuffer != NULL) | ||
680 | 404 | { | ||
681 | 405 | for (size_t i = 0; i < _inputChannels.size(); i++) | ||
682 | 406 | { | ||
683 | 407 | if (_rtAudio->deviceList[GetCurrentDevice()].inputChannels >= (i + 1)) | ||
684 | 408 | { | ||
685 | 409 | for (size_t j = 0; j < _inputChannels[i].size(); j++) | ||
686 | 410 | { | ||
687 | 411 | _inputChannels[i][j] = *floatInput++; | ||
688 | 412 | } | ||
689 | 413 | } | ||
690 | 414 | } | ||
691 | 415 | } | ||
692 | 416 | } | ||
693 | 417 | else | ||
694 | 418 | { | ||
695 | 419 | _SyncBuffer(); | ||
696 | 420 | return 1; | ||
697 | 421 | } | ||
698 | 422 | |||
699 | 423 | _SyncBuffer(); | ||
700 | 424 | return 0; | ||
701 | 425 | } | ||
702 | 426 | |||
703 | 427 | //================================================================================================= | ||
704 | 0 | 428 | ||
705 | === added file 'DspAudioDevice/DspAudioDevice.h' | |||
706 | --- DspAudioDevice/DspAudioDevice.h 1970-01-01 00:00:00 +0000 | |||
707 | +++ DspAudioDevice/DspAudioDevice.h 2015-04-05 22:23:36 +0000 | |||
708 | @@ -0,0 +1,109 @@ | |||
709 | 1 | /************************************************************************ | ||
710 | 2 | DSPatch - Cross-Platform, Object-Oriented, Flow-Based Programming Library | ||
711 | 3 | Copyright (c) 2012-2015 Marcus Tomlinson | ||
712 | 4 | |||
713 | 5 | This file is part of DSPatch. | ||
714 | 6 | |||
715 | 7 | GNU Lesser General Public License Usage | ||
716 | 8 | This file may be used under the terms of the GNU Lesser General Public | ||
717 | 9 | License version 3.0 as published by the Free Software Foundation and | ||
718 | 10 | appearing in the file LGPLv3.txt included in the packaging of this | ||
719 | 11 | file. Please review the following information to ensure the GNU Lesser | ||
720 | 12 | General Public License version 3.0 requirements will be met: | ||
721 | 13 | http://www.gnu.org/copyleft/lgpl.html. | ||
722 | 14 | |||
723 | 15 | Other Usage | ||
724 | 16 | Alternatively, this file may be used in accordance with the terms and | ||
725 | 17 | conditions contained in a signed written agreement between you and | ||
726 | 18 | Marcus Tomlinson. | ||
727 | 19 | |||
728 | 20 | DSPatch is distributed in the hope that it will be useful, | ||
729 | 21 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
730 | 22 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
731 | 23 | ************************************************************************/ | ||
732 | 24 | |||
733 | 25 | #ifndef DSPAUDIODEVICE_H | ||
734 | 26 | #define DSPAUDIODEVICE_H | ||
735 | 27 | |||
736 | 28 | #include <DSPatch.h> | ||
737 | 29 | |||
738 | 30 | struct RtAudioMembers; | ||
739 | 31 | |||
740 | 32 | //================================================================================================= | ||
741 | 33 | |||
742 | 34 | class DspAudioDevice : public DspComponent | ||
743 | 35 | { | ||
744 | 36 | public: | ||
745 | 37 | int pDeviceList; // List | ||
746 | 38 | int pIsStreaming; // Bool | ||
747 | 39 | int pBufferSize; // Int | ||
748 | 40 | int pSampleRate; // Int | ||
749 | 41 | |||
750 | 42 | DspAudioDevice(); | ||
751 | 43 | ~DspAudioDevice(); | ||
752 | 44 | |||
753 | 45 | bool SetDevice(int deviceIndex); | ||
754 | 46 | |||
755 | 47 | std::string GetDeviceName(int deviceIndex) const; | ||
756 | 48 | int GetDeviceInputCount(int deviceIndex) const; | ||
757 | 49 | int GetDeviceOutputCount(int deviceIndex) const; | ||
758 | 50 | int GetCurrentDevice() const; | ||
759 | 51 | int GetDeviceCount() const; | ||
760 | 52 | |||
761 | 53 | void SetBufferSize(int bufferSize); | ||
762 | 54 | void SetSampleRate(int sampleRate); | ||
763 | 55 | |||
764 | 56 | bool IsStreaming() const; | ||
765 | 57 | int GetBufferSize() const; | ||
766 | 58 | int GetSampleRate() const; | ||
767 | 59 | |||
768 | 60 | protected: | ||
769 | 61 | virtual void Process_(DspSignalBus& inputs, DspSignalBus& outputs); | ||
770 | 62 | virtual bool ParameterUpdating_(int index, DspParameter const& param); | ||
771 | 63 | |||
772 | 64 | private: | ||
773 | 65 | std::vector< std::vector<float> > _outputChannels; | ||
774 | 66 | std::vector< std::vector<float> > _inputChannels; | ||
775 | 67 | |||
776 | 68 | RtAudioMembers* _rtAudio; | ||
777 | 69 | |||
778 | 70 | DspMutex _buffersMutex; | ||
779 | 71 | DspMutex _syncMutex; | ||
780 | 72 | DspWaitCondition _waitCondt; | ||
781 | 73 | DspWaitCondition _syncCondt; | ||
782 | 74 | bool _gotWaitReady; | ||
783 | 75 | bool _gotSyncReady; | ||
784 | 76 | |||
785 | 77 | void _SetIsStreaming(bool isStreaming); | ||
786 | 78 | |||
787 | 79 | void _WaitForBuffer(); | ||
788 | 80 | void _SyncBuffer(); | ||
789 | 81 | |||
790 | 82 | void _StopStream(); | ||
791 | 83 | void _StartStream(); | ||
792 | 84 | |||
793 | 85 | static int _StaticCallback(void* outputBuffer, | ||
794 | 86 | void* inputBuffer, | ||
795 | 87 | unsigned int nBufferFrames, | ||
796 | 88 | double streamTime, | ||
797 | 89 | unsigned int status, | ||
798 | 90 | void* userData); | ||
799 | 91 | |||
800 | 92 | int _DynamicCallback(void* inputBuffer, void* outputBuffer); | ||
801 | 93 | }; | ||
802 | 94 | |||
803 | 95 | //================================================================================================= | ||
804 | 96 | |||
805 | 97 | class DspAudioDevicePlugin : public DspPlugin | ||
806 | 98 | { | ||
807 | 99 | DspComponent* Create(std::map<std::string, DspParameter>&) const | ||
808 | 100 | { | ||
809 | 101 | return new DspAudioDevice(); | ||
810 | 102 | } | ||
811 | 103 | }; | ||
812 | 104 | |||
813 | 105 | EXPORT_DSPPLUGIN(DspAudioDevicePlugin) | ||
814 | 106 | |||
815 | 107 | //================================================================================================= | ||
816 | 108 | |||
817 | 109 | #endif // DSPAUDIODEVICE_H | ||
818 | 0 | 110 | ||
819 | === added directory 'DspAudioDevice/rtaudio' | |||
820 | === added file 'DspAudioDevice/rtaudio/RtAudio.cpp' | |||
821 | --- DspAudioDevice/rtaudio/RtAudio.cpp 1970-01-01 00:00:00 +0000 | |||
822 | +++ DspAudioDevice/rtaudio/RtAudio.cpp 2015-04-05 22:23:36 +0000 | |||
823 | @@ -0,0 +1,10136 @@ | |||
824 | 1 | /************************************************************************/ | ||
825 | 2 | /*! \class RtAudio | ||
826 | 3 | \brief Realtime audio i/o C++ classes. | ||
827 | 4 | |||
828 | 5 | RtAudio provides a common API (Application Programming Interface) | ||
829 | 6 | for realtime audio input/output across Linux (native ALSA, Jack, | ||
830 | 7 | and OSS), Macintosh OS X (CoreAudio and Jack), and Windows | ||
831 | 8 | (DirectSound, ASIO and WASAPI) operating systems. | ||
832 | 9 | |||
833 | 10 | RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/ | ||
834 | 11 | |||
835 | 12 | RtAudio: realtime audio i/o C++ classes | ||
836 | 13 | Copyright (c) 2001-2014 Gary P. Scavone | ||
837 | 14 | |||
838 | 15 | Permission is hereby granted, free of charge, to any person | ||
839 | 16 | obtaining a copy of this software and associated documentation files | ||
840 | 17 | (the "Software"), to deal in the Software without restriction, | ||
841 | 18 | including without limitation the rights to use, copy, modify, merge, | ||
842 | 19 | publish, distribute, sublicense, and/or sell copies of the Software, | ||
843 | 20 | and to permit persons to whom the Software is furnished to do so, | ||
844 | 21 | subject to the following conditions: | ||
845 | 22 | |||
846 | 23 | The above copyright notice and this permission notice shall be | ||
847 | 24 | included in all copies or substantial portions of the Software. | ||
848 | 25 | |||
849 | 26 | Any person wishing to distribute modifications to the Software is | ||
850 | 27 | asked to send the modifications to the original developer so that | ||
851 | 28 | they can be incorporated into the canonical version. This is, | ||
852 | 29 | however, not a binding provision of this license. | ||
853 | 30 | |||
854 | 31 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
855 | 32 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
856 | 33 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
857 | 34 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR | ||
858 | 35 | ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF | ||
859 | 36 | CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
860 | 37 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
861 | 38 | */ | ||
862 | 39 | /************************************************************************/ | ||
863 | 40 | |||
864 | 41 | // RtAudio: Version 4.1.1 | ||
865 | 42 | |||
866 | 43 | #include "RtAudio.h" | ||
867 | 44 | #include <iostream> | ||
868 | 45 | #include <cstdlib> | ||
869 | 46 | #include <cstring> | ||
870 | 47 | #include <climits> | ||
871 | 48 | |||
872 | 49 | // Static variable definitions. | ||
873 | 50 | const unsigned int RtApi::MAX_SAMPLE_RATES = 14; | ||
874 | 51 | const unsigned int RtApi::SAMPLE_RATES[] = { | ||
875 | 52 | 4000, 5512, 8000, 9600, 11025, 16000, 22050, | ||
876 | 53 | 32000, 44100, 48000, 88200, 96000, 176400, 192000 | ||
877 | 54 | }; | ||
878 | 55 | |||
879 | 56 | #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__) | ||
880 | 57 | #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A) | ||
881 | 58 | #define MUTEX_DESTROY(A) DeleteCriticalSection(A) | ||
882 | 59 | #define MUTEX_LOCK(A) EnterCriticalSection(A) | ||
883 | 60 | #define MUTEX_UNLOCK(A) LeaveCriticalSection(A) | ||
884 | 61 | #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__) | ||
885 | 62 | // pthread API | ||
886 | 63 | #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL) | ||
887 | 64 | #define MUTEX_DESTROY(A) pthread_mutex_destroy(A) | ||
888 | 65 | #define MUTEX_LOCK(A) pthread_mutex_lock(A) | ||
889 | 66 | #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A) | ||
890 | 67 | #else | ||
891 | 68 | #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions | ||
892 | 69 | #define MUTEX_DESTROY(A) abs(*A) // dummy definitions | ||
893 | 70 | #endif | ||
894 | 71 | |||
895 | 72 | // *************************************************** // | ||
896 | 73 | // | ||
897 | 74 | // RtAudio definitions. | ||
898 | 75 | // | ||
899 | 76 | // *************************************************** // | ||
900 | 77 | |||
901 | 78 | std::string RtAudio :: getVersion( void ) throw() | ||
902 | 79 | { | ||
903 | 80 | return RTAUDIO_VERSION; | ||
904 | 81 | } | ||
905 | 82 | |||
906 | 83 | void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw() | ||
907 | 84 | { | ||
908 | 85 | apis.clear(); | ||
909 | 86 | |||
910 | 87 | // The order here will control the order of RtAudio's API search in | ||
911 | 88 | // the constructor. | ||
912 | 89 | #if defined(__UNIX_JACK__) | ||
913 | 90 | apis.push_back( UNIX_JACK ); | ||
914 | 91 | #endif | ||
915 | 92 | #if defined(__LINUX_ALSA__) | ||
916 | 93 | apis.push_back( LINUX_ALSA ); | ||
917 | 94 | #endif | ||
918 | 95 | #if defined(__LINUX_PULSE__) | ||
919 | 96 | apis.push_back( LINUX_PULSE ); | ||
920 | 97 | #endif | ||
921 | 98 | #if defined(__LINUX_OSS__) | ||
922 | 99 | apis.push_back( LINUX_OSS ); | ||
923 | 100 | #endif | ||
924 | 101 | #if defined(__WINDOWS_ASIO__) | ||
925 | 102 | apis.push_back( WINDOWS_ASIO ); | ||
926 | 103 | #endif | ||
927 | 104 | #if defined(__WINDOWS_WASAPI__) | ||
928 | 105 | apis.push_back( WINDOWS_WASAPI ); | ||
929 | 106 | #endif | ||
930 | 107 | #if defined(__WINDOWS_DS__) | ||
931 | 108 | apis.push_back( WINDOWS_DS ); | ||
932 | 109 | #endif | ||
933 | 110 | #if defined(__MACOSX_CORE__) | ||
934 | 111 | apis.push_back( MACOSX_CORE ); | ||
935 | 112 | #endif | ||
936 | 113 | #if defined(__RTAUDIO_DUMMY__) | ||
937 | 114 | apis.push_back( RTAUDIO_DUMMY ); | ||
938 | 115 | #endif | ||
939 | 116 | } | ||
940 | 117 | |||
941 | 118 | void RtAudio :: openRtApi( RtAudio::Api api ) | ||
942 | 119 | { | ||
943 | 120 | if ( rtapi_ ) | ||
944 | 121 | delete rtapi_; | ||
945 | 122 | rtapi_ = 0; | ||
946 | 123 | |||
947 | 124 | #if defined(__UNIX_JACK__) | ||
948 | 125 | if ( api == UNIX_JACK ) | ||
949 | 126 | rtapi_ = new RtApiJack(); | ||
950 | 127 | #endif | ||
951 | 128 | #if defined(__LINUX_ALSA__) | ||
952 | 129 | if ( api == LINUX_ALSA ) | ||
953 | 130 | rtapi_ = new RtApiAlsa(); | ||
954 | 131 | #endif | ||
955 | 132 | #if defined(__LINUX_PULSE__) | ||
956 | 133 | if ( api == LINUX_PULSE ) | ||
957 | 134 | rtapi_ = new RtApiPulse(); | ||
958 | 135 | #endif | ||
959 | 136 | #if defined(__LINUX_OSS__) | ||
960 | 137 | if ( api == LINUX_OSS ) | ||
961 | 138 | rtapi_ = new RtApiOss(); | ||
962 | 139 | #endif | ||
963 | 140 | #if defined(__WINDOWS_ASIO__) | ||
964 | 141 | if ( api == WINDOWS_ASIO ) | ||
965 | 142 | rtapi_ = new RtApiAsio(); | ||
966 | 143 | #endif | ||
967 | 144 | #if defined(__WINDOWS_WASAPI__) | ||
968 | 145 | if ( api == WINDOWS_WASAPI ) | ||
969 | 146 | rtapi_ = new RtApiWasapi(); | ||
970 | 147 | #endif | ||
971 | 148 | #if defined(__WINDOWS_DS__) | ||
972 | 149 | if ( api == WINDOWS_DS ) | ||
973 | 150 | rtapi_ = new RtApiDs(); | ||
974 | 151 | #endif | ||
975 | 152 | #if defined(__MACOSX_CORE__) | ||
976 | 153 | if ( api == MACOSX_CORE ) | ||
977 | 154 | rtapi_ = new RtApiCore(); | ||
978 | 155 | #endif | ||
979 | 156 | #if defined(__RTAUDIO_DUMMY__) | ||
980 | 157 | if ( api == RTAUDIO_DUMMY ) | ||
981 | 158 | rtapi_ = new RtApiDummy(); | ||
982 | 159 | #endif | ||
983 | 160 | } | ||
984 | 161 | |||
985 | 162 | RtAudio :: RtAudio( RtAudio::Api api ) | ||
986 | 163 | { | ||
987 | 164 | rtapi_ = 0; | ||
988 | 165 | |||
989 | 166 | if ( api != UNSPECIFIED ) { | ||
990 | 167 | // Attempt to open the specified API. | ||
991 | 168 | openRtApi( api ); | ||
992 | 169 | if ( rtapi_ ) return; | ||
993 | 170 | |||
994 | 171 | // No compiled support for specified API value. Issue a debug | ||
995 | 172 | // warning and continue as if no API was specified. | ||
996 | 173 | std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl; | ||
997 | 174 | } | ||
998 | 175 | |||
999 | 176 | // Iterate through the compiled APIs and return as soon as we find | ||
1000 | 177 | // one with at least one device or we reach the end of the list. | ||
1001 | 178 | std::vector< RtAudio::Api > apis; | ||
1002 | 179 | getCompiledApi( apis ); | ||
1003 | 180 | for ( unsigned int i=0; i<apis.size(); i++ ) { | ||
1004 | 181 | openRtApi( apis[i] ); | ||
1005 | 182 | if ( rtapi_->getDeviceCount() ) break; | ||
1006 | 183 | } | ||
1007 | 184 | |||
1008 | 185 | if ( rtapi_ ) return; | ||
1009 | 186 | |||
1010 | 187 | // It should not be possible to get here because the preprocessor | ||
1011 | 188 | // definition __RTAUDIO_DUMMY__ is automatically defined if no | ||
1012 | 189 | // API-specific definitions are passed to the compiler. But just in | ||
1013 | 190 | // case something weird happens, we'll thow an error. | ||
1014 | 191 | std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n"; | ||
1015 | 192 | throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) ); | ||
1016 | 193 | } | ||
1017 | 194 | |||
1018 | 195 | RtAudio :: ~RtAudio() throw() | ||
1019 | 196 | { | ||
1020 | 197 | if ( rtapi_ ) | ||
1021 | 198 | delete rtapi_; | ||
1022 | 199 | } | ||
1023 | 200 | |||
1024 | 201 | void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters, | ||
1025 | 202 | RtAudio::StreamParameters *inputParameters, | ||
1026 | 203 | RtAudioFormat format, unsigned int sampleRate, | ||
1027 | 204 | unsigned int *bufferFrames, | ||
1028 | 205 | RtAudioCallback callback, void *userData, | ||
1029 | 206 | RtAudio::StreamOptions *options, | ||
1030 | 207 | RtAudioErrorCallback errorCallback ) | ||
1031 | 208 | { | ||
1032 | 209 | return rtapi_->openStream( outputParameters, inputParameters, format, | ||
1033 | 210 | sampleRate, bufferFrames, callback, | ||
1034 | 211 | userData, options, errorCallback ); | ||
1035 | 212 | } | ||
1036 | 213 | |||
1037 | 214 | // *************************************************** // | ||
1038 | 215 | // | ||
1039 | 216 | // Public RtApi definitions (see end of file for | ||
1040 | 217 | // private or protected utility functions). | ||
1041 | 218 | // | ||
1042 | 219 | // *************************************************** // | ||
1043 | 220 | |||
1044 | 221 | RtApi :: RtApi() | ||
1045 | 222 | { | ||
1046 | 223 | stream_.state = STREAM_CLOSED; | ||
1047 | 224 | stream_.mode = UNINITIALIZED; | ||
1048 | 225 | stream_.apiHandle = 0; | ||
1049 | 226 | stream_.userBuffer[0] = 0; | ||
1050 | 227 | stream_.userBuffer[1] = 0; | ||
1051 | 228 | MUTEX_INITIALIZE( &stream_.mutex ); | ||
1052 | 229 | showWarnings_ = true; | ||
1053 | 230 | firstErrorOccurred_ = false; | ||
1054 | 231 | } | ||
1055 | 232 | |||
1056 | 233 | RtApi :: ~RtApi() | ||
1057 | 234 | { | ||
1058 | 235 | MUTEX_DESTROY( &stream_.mutex ); | ||
1059 | 236 | } | ||
1060 | 237 | |||
1061 | 238 | void RtApi :: openStream( RtAudio::StreamParameters *oParams, | ||
1062 | 239 | RtAudio::StreamParameters *iParams, | ||
1063 | 240 | RtAudioFormat format, unsigned int sampleRate, | ||
1064 | 241 | unsigned int *bufferFrames, | ||
1065 | 242 | RtAudioCallback callback, void *userData, | ||
1066 | 243 | RtAudio::StreamOptions *options, | ||
1067 | 244 | RtAudioErrorCallback errorCallback ) | ||
1068 | 245 | { | ||
1069 | 246 | if ( stream_.state != STREAM_CLOSED ) { | ||
1070 | 247 | errorText_ = "RtApi::openStream: a stream is already open!"; | ||
1071 | 248 | error( RtAudioError::INVALID_USE ); | ||
1072 | 249 | return; | ||
1073 | 250 | } | ||
1074 | 251 | |||
1075 | 252 | // Clear stream information potentially left from a previously open stream. | ||
1076 | 253 | clearStreamInfo(); | ||
1077 | 254 | |||
1078 | 255 | if ( oParams && oParams->nChannels < 1 ) { | ||
1079 | 256 | errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one."; | ||
1080 | 257 | error( RtAudioError::INVALID_USE ); | ||
1081 | 258 | return; | ||
1082 | 259 | } | ||
1083 | 260 | |||
1084 | 261 | if ( iParams && iParams->nChannels < 1 ) { | ||
1085 | 262 | errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one."; | ||
1086 | 263 | error( RtAudioError::INVALID_USE ); | ||
1087 | 264 | return; | ||
1088 | 265 | } | ||
1089 | 266 | |||
1090 | 267 | if ( oParams == NULL && iParams == NULL ) { | ||
1091 | 268 | errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!"; | ||
1092 | 269 | error( RtAudioError::INVALID_USE ); | ||
1093 | 270 | return; | ||
1094 | 271 | } | ||
1095 | 272 | |||
1096 | 273 | if ( formatBytes(format) == 0 ) { | ||
1097 | 274 | errorText_ = "RtApi::openStream: 'format' parameter value is undefined."; | ||
1098 | 275 | error( RtAudioError::INVALID_USE ); | ||
1099 | 276 | return; | ||
1100 | 277 | } | ||
1101 | 278 | |||
1102 | 279 | unsigned int nDevices = getDeviceCount(); | ||
1103 | 280 | unsigned int oChannels = 0; | ||
1104 | 281 | if ( oParams ) { | ||
1105 | 282 | oChannels = oParams->nChannels; | ||
1106 | 283 | if ( oParams->deviceId >= nDevices ) { | ||
1107 | 284 | errorText_ = "RtApi::openStream: output device parameter value is invalid."; | ||
1108 | 285 | error( RtAudioError::INVALID_USE ); | ||
1109 | 286 | return; | ||
1110 | 287 | } | ||
1111 | 288 | } | ||
1112 | 289 | |||
1113 | 290 | unsigned int iChannels = 0; | ||
1114 | 291 | if ( iParams ) { | ||
1115 | 292 | iChannels = iParams->nChannels; | ||
1116 | 293 | if ( iParams->deviceId >= nDevices ) { | ||
1117 | 294 | errorText_ = "RtApi::openStream: input device parameter value is invalid."; | ||
1118 | 295 | error( RtAudioError::INVALID_USE ); | ||
1119 | 296 | return; | ||
1120 | 297 | } | ||
1121 | 298 | } | ||
1122 | 299 | |||
1123 | 300 | bool result; | ||
1124 | 301 | |||
1125 | 302 | if ( oChannels > 0 ) { | ||
1126 | 303 | |||
1127 | 304 | result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel, | ||
1128 | 305 | sampleRate, format, bufferFrames, options ); | ||
1129 | 306 | if ( result == false ) { | ||
1130 | 307 | error( RtAudioError::SYSTEM_ERROR ); | ||
1131 | 308 | return; | ||
1132 | 309 | } | ||
1133 | 310 | } | ||
1134 | 311 | |||
1135 | 312 | if ( iChannels > 0 ) { | ||
1136 | 313 | |||
1137 | 314 | result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel, | ||
1138 | 315 | sampleRate, format, bufferFrames, options ); | ||
1139 | 316 | if ( result == false ) { | ||
1140 | 317 | if ( oChannels > 0 ) closeStream(); | ||
1141 | 318 | error( RtAudioError::SYSTEM_ERROR ); | ||
1142 | 319 | return; | ||
1143 | 320 | } | ||
1144 | 321 | } | ||
1145 | 322 | |||
1146 | 323 | stream_.callbackInfo.callback = (void *) callback; | ||
1147 | 324 | stream_.callbackInfo.userData = userData; | ||
1148 | 325 | stream_.callbackInfo.errorCallback = (void *) errorCallback; | ||
1149 | 326 | |||
1150 | 327 | if ( options ) options->numberOfBuffers = stream_.nBuffers; | ||
1151 | 328 | stream_.state = STREAM_STOPPED; | ||
1152 | 329 | } | ||
1153 | 330 | |||
1154 | 331 | unsigned int RtApi :: getDefaultInputDevice( void ) | ||
1155 | 332 | { | ||
1156 | 333 | // Should be implemented in subclasses if possible. | ||
1157 | 334 | return 0; | ||
1158 | 335 | } | ||
1159 | 336 | |||
1160 | 337 | unsigned int RtApi :: getDefaultOutputDevice( void ) | ||
1161 | 338 | { | ||
1162 | 339 | // Should be implemented in subclasses if possible. | ||
1163 | 340 | return 0; | ||
1164 | 341 | } | ||
1165 | 342 | |||
1166 | 343 | void RtApi :: closeStream( void ) | ||
1167 | 344 | { | ||
1168 | 345 | // MUST be implemented in subclasses! | ||
1169 | 346 | return; | ||
1170 | 347 | } | ||
1171 | 348 | |||
1172 | 349 | bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/, | ||
1173 | 350 | unsigned int /*firstChannel*/, unsigned int /*sampleRate*/, | ||
1174 | 351 | RtAudioFormat /*format*/, unsigned int * /*bufferSize*/, | ||
1175 | 352 | RtAudio::StreamOptions * /*options*/ ) | ||
1176 | 353 | { | ||
1177 | 354 | // MUST be implemented in subclasses! | ||
1178 | 355 | return FAILURE; | ||
1179 | 356 | } | ||
1180 | 357 | |||
1181 | 358 | void RtApi :: tickStreamTime( void ) | ||
1182 | 359 | { | ||
1183 | 360 | // Subclasses that do not provide their own implementation of | ||
1184 | 361 | // getStreamTime should call this function once per buffer I/O to | ||
1185 | 362 | // provide basic stream time support. | ||
1186 | 363 | |||
1187 | 364 | stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate ); | ||
1188 | 365 | |||
1189 | 366 | #if defined( HAVE_GETTIMEOFDAY ) | ||
1190 | 367 | gettimeofday( &stream_.lastTickTimestamp, NULL ); | ||
1191 | 368 | #endif | ||
1192 | 369 | } | ||
1193 | 370 | |||
1194 | 371 | long RtApi :: getStreamLatency( void ) | ||
1195 | 372 | { | ||
1196 | 373 | verifyStream(); | ||
1197 | 374 | |||
1198 | 375 | long totalLatency = 0; | ||
1199 | 376 | if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) | ||
1200 | 377 | totalLatency = stream_.latency[0]; | ||
1201 | 378 | if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) | ||
1202 | 379 | totalLatency += stream_.latency[1]; | ||
1203 | 380 | |||
1204 | 381 | return totalLatency; | ||
1205 | 382 | } | ||
1206 | 383 | |||
1207 | 384 | double RtApi :: getStreamTime( void ) | ||
1208 | 385 | { | ||
1209 | 386 | verifyStream(); | ||
1210 | 387 | |||
1211 | 388 | #if defined( HAVE_GETTIMEOFDAY ) | ||
1212 | 389 | // Return a very accurate estimate of the stream time by | ||
1213 | 390 | // adding in the elapsed time since the last tick. | ||
1214 | 391 | struct timeval then; | ||
1215 | 392 | struct timeval now; | ||
1216 | 393 | |||
1217 | 394 | if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 ) | ||
1218 | 395 | return stream_.streamTime; | ||
1219 | 396 | |||
1220 | 397 | gettimeofday( &now, NULL ); | ||
1221 | 398 | then = stream_.lastTickTimestamp; | ||
1222 | 399 | return stream_.streamTime + | ||
1223 | 400 | ((now.tv_sec + 0.000001 * now.tv_usec) - | ||
1224 | 401 | (then.tv_sec + 0.000001 * then.tv_usec)); | ||
1225 | 402 | #else | ||
1226 | 403 | return stream_.streamTime; | ||
1227 | 404 | #endif | ||
1228 | 405 | } | ||
1229 | 406 | |||
1230 | 407 | void RtApi :: setStreamTime( double time ) | ||
1231 | 408 | { | ||
1232 | 409 | verifyStream(); | ||
1233 | 410 | |||
1234 | 411 | if ( time >= 0.0 ) | ||
1235 | 412 | stream_.streamTime = time; | ||
1236 | 413 | } | ||
1237 | 414 | |||
1238 | 415 | unsigned int RtApi :: getStreamSampleRate( void ) | ||
1239 | 416 | { | ||
1240 | 417 | verifyStream(); | ||
1241 | 418 | |||
1242 | 419 | return stream_.sampleRate; | ||
1243 | 420 | } | ||
1244 | 421 | |||
1245 | 422 | |||
1246 | 423 | // *************************************************** // | ||
1247 | 424 | // | ||
1248 | 425 | // OS/API-specific methods. | ||
1249 | 426 | // | ||
1250 | 427 | // *************************************************** // | ||
1251 | 428 | |||
1252 | 429 | #if defined(__MACOSX_CORE__) | ||
1253 | 430 | |||
1254 | 431 | // The OS X CoreAudio API is designed to use a separate callback | ||
1255 | 432 | // procedure for each of its audio devices. A single RtAudio duplex | ||
1256 | 433 | // stream using two different devices is supported here, though it | ||
1257 | 434 | // cannot be guaranteed to always behave correctly because we cannot | ||
1258 | 435 | // synchronize these two callbacks. | ||
1259 | 436 | // | ||
1260 | 437 | // A property listener is installed for over/underrun information. | ||
1261 | 438 | // However, no functionality is currently provided to allow property | ||
1262 | 439 | // listeners to trigger user handlers because it is unclear what could | ||
1263 | 440 | // be done if a critical stream parameter (buffer size, sample rate, | ||
1264 | 441 | // device disconnect) notification arrived. The listeners entail | ||
1265 | 442 | // quite a bit of extra code and most likely, a user program wouldn't | ||
1266 | 443 | // be prepared for the result anyway. However, we do provide a flag | ||
1267 | 444 | // to the client callback function to inform of an over/underrun. | ||
1268 | 445 | |||
1269 | 446 | // A structure to hold various information related to the CoreAudio API | ||
1270 | 447 | // implementation. | ||
1271 | 448 | struct CoreHandle { | ||
1272 | 449 | AudioDeviceID id[2]; // device ids | ||
1273 | 450 | #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 ) | ||
1274 | 451 | AudioDeviceIOProcID procId[2]; | ||
1275 | 452 | #endif | ||
1276 | 453 | UInt32 iStream[2]; // device stream index (or first if using multiple) | ||
1277 | 454 | UInt32 nStreams[2]; // number of streams to use | ||
1278 | 455 | bool xrun[2]; | ||
1279 | 456 | char *deviceBuffer; | ||
1280 | 457 | pthread_cond_t condition; | ||
1281 | 458 | int drainCounter; // Tracks callback counts when draining | ||
1282 | 459 | bool internalDrain; // Indicates if stop is initiated from callback or not. | ||
1283 | 460 | |||
1284 | 461 | CoreHandle() | ||
1285 | 462 | :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; } | ||
1286 | 463 | }; | ||
1287 | 464 | |||
1288 | 465 | RtApiCore:: RtApiCore() | ||
1289 | 466 | { | ||
1290 | 467 | #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER ) | ||
1291 | 468 | // This is a largely undocumented but absolutely necessary | ||
1292 | 469 | // requirement starting with OS-X 10.6. If not called, queries and | ||
1293 | 470 | // updates to various audio device properties are not handled | ||
1294 | 471 | // correctly. | ||
1295 | 472 | CFRunLoopRef theRunLoop = NULL; | ||
1296 | 473 | AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop, | ||
1297 | 474 | kAudioObjectPropertyScopeGlobal, | ||
1298 | 475 | kAudioObjectPropertyElementMaster }; | ||
1299 | 476 | OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop); | ||
1300 | 477 | if ( result != noErr ) { | ||
1301 | 478 | errorText_ = "RtApiCore::RtApiCore: error setting run loop property!"; | ||
1302 | 479 | error( RtAudioError::WARNING ); | ||
1303 | 480 | } | ||
1304 | 481 | #endif | ||
1305 | 482 | } | ||
1306 | 483 | |||
1307 | 484 | RtApiCore :: ~RtApiCore() | ||
1308 | 485 | { | ||
1309 | 486 | // The subclass destructor gets called before the base class | ||
1310 | 487 | // destructor, so close an existing stream before deallocating | ||
1311 | 488 | // apiDeviceId memory. | ||
1312 | 489 | if ( stream_.state != STREAM_CLOSED ) closeStream(); | ||
1313 | 490 | } | ||
1314 | 491 | |||
1315 | 492 | unsigned int RtApiCore :: getDeviceCount( void ) | ||
1316 | 493 | { | ||
1317 | 494 | // Find out how many audio devices there are, if any. | ||
1318 | 495 | UInt32 dataSize; | ||
1319 | 496 | AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; | ||
1320 | 497 | OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize ); | ||
1321 | 498 | if ( result != noErr ) { | ||
1322 | 499 | errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!"; | ||
1323 | 500 | error( RtAudioError::WARNING ); | ||
1324 | 501 | return 0; | ||
1325 | 502 | } | ||
1326 | 503 | |||
1327 | 504 | return dataSize / sizeof( AudioDeviceID ); | ||
1328 | 505 | } | ||
1329 | 506 | |||
1330 | 507 | unsigned int RtApiCore :: getDefaultInputDevice( void ) | ||
1331 | 508 | { | ||
1332 | 509 | unsigned int nDevices = getDeviceCount(); | ||
1333 | 510 | if ( nDevices <= 1 ) return 0; | ||
1334 | 511 | |||
1335 | 512 | AudioDeviceID id; | ||
1336 | 513 | UInt32 dataSize = sizeof( AudioDeviceID ); | ||
1337 | 514 | AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; | ||
1338 | 515 | OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id ); | ||
1339 | 516 | if ( result != noErr ) { | ||
1340 | 517 | errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device."; | ||
1341 | 518 | error( RtAudioError::WARNING ); | ||
1342 | 519 | return 0; | ||
1343 | 520 | } | ||
1344 | 521 | |||
1345 | 522 | dataSize *= nDevices; | ||
1346 | 523 | AudioDeviceID deviceList[ nDevices ]; | ||
1347 | 524 | property.mSelector = kAudioHardwarePropertyDevices; | ||
1348 | 525 | result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList ); | ||
1349 | 526 | if ( result != noErr ) { | ||
1350 | 527 | errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs."; | ||
1351 | 528 | error( RtAudioError::WARNING ); | ||
1352 | 529 | return 0; | ||
1353 | 530 | } | ||
1354 | 531 | |||
1355 | 532 | for ( unsigned int i=0; i<nDevices; i++ ) | ||
1356 | 533 | if ( id == deviceList[i] ) return i; | ||
1357 | 534 | |||
1358 | 535 | errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!"; | ||
1359 | 536 | error( RtAudioError::WARNING ); | ||
1360 | 537 | return 0; | ||
1361 | 538 | } | ||
1362 | 539 | |||
1363 | 540 | unsigned int RtApiCore :: getDefaultOutputDevice( void ) | ||
1364 | 541 | { | ||
1365 | 542 | unsigned int nDevices = getDeviceCount(); | ||
1366 | 543 | if ( nDevices <= 1 ) return 0; | ||
1367 | 544 | |||
1368 | 545 | AudioDeviceID id; | ||
1369 | 546 | UInt32 dataSize = sizeof( AudioDeviceID ); | ||
1370 | 547 | AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; | ||
1371 | 548 | OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id ); | ||
1372 | 549 | if ( result != noErr ) { | ||
1373 | 550 | errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device."; | ||
1374 | 551 | error( RtAudioError::WARNING ); | ||
1375 | 552 | return 0; | ||
1376 | 553 | } | ||
1377 | 554 | |||
1378 | 555 | dataSize = sizeof( AudioDeviceID ) * nDevices; | ||
1379 | 556 | AudioDeviceID deviceList[ nDevices ]; | ||
1380 | 557 | property.mSelector = kAudioHardwarePropertyDevices; | ||
1381 | 558 | result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList ); | ||
1382 | 559 | if ( result != noErr ) { | ||
1383 | 560 | errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs."; | ||
1384 | 561 | error( RtAudioError::WARNING ); | ||
1385 | 562 | return 0; | ||
1386 | 563 | } | ||
1387 | 564 | |||
1388 | 565 | for ( unsigned int i=0; i<nDevices; i++ ) | ||
1389 | 566 | if ( id == deviceList[i] ) return i; | ||
1390 | 567 | |||
1391 | 568 | errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!"; | ||
1392 | 569 | error( RtAudioError::WARNING ); | ||
1393 | 570 | return 0; | ||
1394 | 571 | } | ||
1395 | 572 | |||
1396 | 573 | RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device ) | ||
1397 | 574 | { | ||
1398 | 575 | RtAudio::DeviceInfo info; | ||
1399 | 576 | info.probed = false; | ||
1400 | 577 | |||
1401 | 578 | // Get device ID | ||
1402 | 579 | unsigned int nDevices = getDeviceCount(); | ||
1403 | 580 | if ( nDevices == 0 ) { | ||
1404 | 581 | errorText_ = "RtApiCore::getDeviceInfo: no devices found!"; | ||
1405 | 582 | error( RtAudioError::INVALID_USE ); | ||
1406 | 583 | return info; | ||
1407 | 584 | } | ||
1408 | 585 | |||
1409 | 586 | if ( device >= nDevices ) { | ||
1410 | 587 | errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!"; | ||
1411 | 588 | error( RtAudioError::INVALID_USE ); | ||
1412 | 589 | return info; | ||
1413 | 590 | } | ||
1414 | 591 | |||
1415 | 592 | AudioDeviceID deviceList[ nDevices ]; | ||
1416 | 593 | UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices; | ||
1417 | 594 | AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices, | ||
1418 | 595 | kAudioObjectPropertyScopeGlobal, | ||
1419 | 596 | kAudioObjectPropertyElementMaster }; | ||
1420 | 597 | OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, | ||
1421 | 598 | 0, NULL, &dataSize, (void *) &deviceList ); | ||
1422 | 599 | if ( result != noErr ) { | ||
1423 | 600 | errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs."; | ||
1424 | 601 | error( RtAudioError::WARNING ); | ||
1425 | 602 | return info; | ||
1426 | 603 | } | ||
1427 | 604 | |||
1428 | 605 | AudioDeviceID id = deviceList[ device ]; | ||
1429 | 606 | |||
1430 | 607 | // Get the device name. | ||
1431 | 608 | info.name.erase(); | ||
1432 | 609 | CFStringRef cfname; | ||
1433 | 610 | dataSize = sizeof( CFStringRef ); | ||
1434 | 611 | property.mSelector = kAudioObjectPropertyManufacturer; | ||
1435 | 612 | result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname ); | ||
1436 | 613 | if ( result != noErr ) { | ||
1437 | 614 | errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer."; | ||
1438 | 615 | errorText_ = errorStream_.str(); | ||
1439 | 616 | error( RtAudioError::WARNING ); | ||
1440 | 617 | return info; | ||
1441 | 618 | } | ||
1442 | 619 | |||
1443 | 620 | //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() ); | ||
1444 | 621 | int length = CFStringGetLength(cfname); | ||
1445 | 622 | char *mname = (char *)malloc(length * 3 + 1); | ||
1446 | 623 | #if defined( UNICODE ) || defined( _UNICODE ) | ||
1447 | 624 | CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8); | ||
1448 | 625 | #else | ||
1449 | 626 | CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding()); | ||
1450 | 627 | #endif | ||
1451 | 628 | info.name.append( (const char *)mname, strlen(mname) ); | ||
1452 | 629 | info.name.append( ": " ); | ||
1453 | 630 | CFRelease( cfname ); | ||
1454 | 631 | free(mname); | ||
1455 | 632 | |||
1456 | 633 | property.mSelector = kAudioObjectPropertyName; | ||
1457 | 634 | result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname ); | ||
1458 | 635 | if ( result != noErr ) { | ||
1459 | 636 | errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name."; | ||
1460 | 637 | errorText_ = errorStream_.str(); | ||
1461 | 638 | error( RtAudioError::WARNING ); | ||
1462 | 639 | return info; | ||
1463 | 640 | } | ||
1464 | 641 | |||
1465 | 642 | //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() ); | ||
1466 | 643 | length = CFStringGetLength(cfname); | ||
1467 | 644 | char *name = (char *)malloc(length * 3 + 1); | ||
1468 | 645 | #if defined( UNICODE ) || defined( _UNICODE ) | ||
1469 | 646 | CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8); | ||
1470 | 647 | #else | ||
1471 | 648 | CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding()); | ||
1472 | 649 | #endif | ||
1473 | 650 | info.name.append( (const char *)name, strlen(name) ); | ||
1474 | 651 | CFRelease( cfname ); | ||
1475 | 652 | free(name); | ||
1476 | 653 | |||
1477 | 654 | // Get the output stream "configuration". | ||
1478 | 655 | AudioBufferList *bufferList = nil; | ||
1479 | 656 | property.mSelector = kAudioDevicePropertyStreamConfiguration; | ||
1480 | 657 | property.mScope = kAudioDevicePropertyScopeOutput; | ||
1481 | 658 | // property.mElement = kAudioObjectPropertyElementWildcard; | ||
1482 | 659 | dataSize = 0; | ||
1483 | 660 | result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize ); | ||
1484 | 661 | if ( result != noErr || dataSize == 0 ) { | ||
1485 | 662 | errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ")."; | ||
1486 | 663 | errorText_ = errorStream_.str(); | ||
1487 | 664 | error( RtAudioError::WARNING ); | ||
1488 | 665 | return info; | ||
1489 | 666 | } | ||
1490 | 667 | |||
1491 | 668 | // Allocate the AudioBufferList. | ||
1492 | 669 | bufferList = (AudioBufferList *) malloc( dataSize ); | ||
1493 | 670 | if ( bufferList == NULL ) { | ||
1494 | 671 | errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList."; | ||
1495 | 672 | error( RtAudioError::WARNING ); | ||
1496 | 673 | return info; | ||
1497 | 674 | } | ||
1498 | 675 | |||
1499 | 676 | result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList ); | ||
1500 | 677 | if ( result != noErr || dataSize == 0 ) { | ||
1501 | 678 | free( bufferList ); | ||
1502 | 679 | errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ")."; | ||
1503 | 680 | errorText_ = errorStream_.str(); | ||
1504 | 681 | error( RtAudioError::WARNING ); | ||
1505 | 682 | return info; | ||
1506 | 683 | } | ||
1507 | 684 | |||
1508 | 685 | // Get output channel information. | ||
1509 | 686 | unsigned int i, nStreams = bufferList->mNumberBuffers; | ||
1510 | 687 | for ( i=0; i<nStreams; i++ ) | ||
1511 | 688 | info.outputChannels += bufferList->mBuffers[i].mNumberChannels; | ||
1512 | 689 | free( bufferList ); | ||
1513 | 690 | |||
1514 | 691 | // Get the input stream "configuration". | ||
1515 | 692 | property.mScope = kAudioDevicePropertyScopeInput; | ||
1516 | 693 | result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize ); | ||
1517 | 694 | if ( result != noErr || dataSize == 0 ) { | ||
1518 | 695 | errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ")."; | ||
1519 | 696 | errorText_ = errorStream_.str(); | ||
1520 | 697 | error( RtAudioError::WARNING ); | ||
1521 | 698 | return info; | ||
1522 | 699 | } | ||
1523 | 700 | |||
1524 | 701 | // Allocate the AudioBufferList. | ||
1525 | 702 | bufferList = (AudioBufferList *) malloc( dataSize ); | ||
1526 | 703 | if ( bufferList == NULL ) { | ||
1527 | 704 | errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList."; | ||
1528 | 705 | error( RtAudioError::WARNING ); | ||
1529 | 706 | return info; | ||
1530 | 707 | } | ||
1531 | 708 | |||
1532 | 709 | result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList ); | ||
1533 | 710 | if (result != noErr || dataSize == 0) { | ||
1534 | 711 | free( bufferList ); | ||
1535 | 712 | errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ")."; | ||
1536 | 713 | errorText_ = errorStream_.str(); | ||
1537 | 714 | error( RtAudioError::WARNING ); | ||
1538 | 715 | return info; | ||
1539 | 716 | } | ||
1540 | 717 | |||
1541 | 718 | // Get input channel information. | ||
1542 | 719 | nStreams = bufferList->mNumberBuffers; | ||
1543 | 720 | for ( i=0; i<nStreams; i++ ) | ||
1544 | 721 | info.inputChannels += bufferList->mBuffers[i].mNumberChannels; | ||
1545 | 722 | free( bufferList ); | ||
1546 | 723 | |||
1547 | 724 | // If device opens for both playback and capture, we determine the channels. | ||
1548 | 725 | if ( info.outputChannels > 0 && info.inputChannels > 0 ) | ||
1549 | 726 | info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels; | ||
1550 | 727 | |||
1551 | 728 | // Probe the device sample rates. | ||
1552 | 729 | bool isInput = false; | ||
1553 | 730 | if ( info.outputChannels == 0 ) isInput = true; | ||
1554 | 731 | |||
1555 | 732 | // Determine the supported sample rates. | ||
1556 | 733 | property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates; | ||
1557 | 734 | if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput; | ||
1558 | 735 | result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize ); | ||
1559 | 736 | if ( result != kAudioHardwareNoError || dataSize == 0 ) { | ||
1560 | 737 | errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info."; | ||
1561 | 738 | errorText_ = errorStream_.str(); | ||
1562 | 739 | error( RtAudioError::WARNING ); | ||
1563 | 740 | return info; | ||
1564 | 741 | } | ||
1565 | 742 | |||
1566 | 743 | UInt32 nRanges = dataSize / sizeof( AudioValueRange ); | ||
1567 | 744 | AudioValueRange rangeList[ nRanges ]; | ||
1568 | 745 | result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList ); | ||
1569 | 746 | if ( result != kAudioHardwareNoError ) { | ||
1570 | 747 | errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates."; | ||
1571 | 748 | errorText_ = errorStream_.str(); | ||
1572 | 749 | error( RtAudioError::WARNING ); | ||
1573 | 750 | return info; | ||
1574 | 751 | } | ||
1575 | 752 | |||
1576 | 753 | // The sample rate reporting mechanism is a bit of a mystery. It | ||
1577 | 754 | // seems that it can either return individual rates or a range of | ||
1578 | 755 | // rates. I assume that if the min / max range values are the same, | ||
1579 | 756 | // then that represents a single supported rate and if the min / max | ||
1580 | 757 | // range values are different, the device supports an arbitrary | ||
1581 | 758 | // range of values (though there might be multiple ranges, so we'll | ||
1582 | 759 | // use the most conservative range). | ||
1583 | 760 | Float64 minimumRate = 1.0, maximumRate = 10000000000.0; | ||
1584 | 761 | bool haveValueRange = false; | ||
1585 | 762 | info.sampleRates.clear(); | ||
1586 | 763 | for ( UInt32 i=0; i<nRanges; i++ ) { | ||
1587 | 764 | if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) | ||
1588 | 765 | info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum ); | ||
1589 | 766 | else { | ||
1590 | 767 | haveValueRange = true; | ||
1591 | 768 | if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum; | ||
1592 | 769 | if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum; | ||
1593 | 770 | } | ||
1594 | 771 | } | ||
1595 | 772 | |||
1596 | 773 | if ( haveValueRange ) { | ||
1597 | 774 | for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) { | ||
1598 | 775 | if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) | ||
1599 | 776 | info.sampleRates.push_back( SAMPLE_RATES[k] ); | ||
1600 | 777 | } | ||
1601 | 778 | } | ||
1602 | 779 | |||
1603 | 780 | // Sort and remove any redundant values | ||
1604 | 781 | std::sort( info.sampleRates.begin(), info.sampleRates.end() ); | ||
1605 | 782 | info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() ); | ||
1606 | 783 | |||
1607 | 784 | if ( info.sampleRates.size() == 0 ) { | ||
1608 | 785 | errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ")."; | ||
1609 | 786 | errorText_ = errorStream_.str(); | ||
1610 | 787 | error( RtAudioError::WARNING ); | ||
1611 | 788 | return info; | ||
1612 | 789 | } | ||
1613 | 790 | |||
1614 | 791 | // CoreAudio always uses 32-bit floating point data for PCM streams. | ||
1615 | 792 | // Thus, any other "physical" formats supported by the device are of | ||
1616 | 793 | // no interest to the client. | ||
1617 | 794 | info.nativeFormats = RTAUDIO_FLOAT32; | ||
1618 | 795 | |||
1619 | 796 | if ( info.outputChannels > 0 ) | ||
1620 | 797 | if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true; | ||
1621 | 798 | if ( info.inputChannels > 0 ) | ||
1622 | 799 | if ( getDefaultInputDevice() == device ) info.isDefaultInput = true; | ||
1623 | 800 | |||
1624 | 801 | info.probed = true; | ||
1625 | 802 | return info; | ||
1626 | 803 | } | ||
1627 | 804 | |||
1628 | 805 | static OSStatus callbackHandler( AudioDeviceID inDevice, | ||
1629 | 806 | const AudioTimeStamp* /*inNow*/, | ||
1630 | 807 | const AudioBufferList* inInputData, | ||
1631 | 808 | const AudioTimeStamp* /*inInputTime*/, | ||
1632 | 809 | AudioBufferList* outOutputData, | ||
1633 | 810 | const AudioTimeStamp* /*inOutputTime*/, | ||
1634 | 811 | void* infoPointer ) | ||
1635 | 812 | { | ||
1636 | 813 | CallbackInfo *info = (CallbackInfo *) infoPointer; | ||
1637 | 814 | |||
1638 | 815 | RtApiCore *object = (RtApiCore *) info->object; | ||
1639 | 816 | if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false ) | ||
1640 | 817 | return kAudioHardwareUnspecifiedError; | ||
1641 | 818 | else | ||
1642 | 819 | return kAudioHardwareNoError; | ||
1643 | 820 | } | ||
1644 | 821 | |||
1645 | 822 | static OSStatus xrunListener( AudioObjectID /*inDevice*/, | ||
1646 | 823 | UInt32 nAddresses, | ||
1647 | 824 | const AudioObjectPropertyAddress properties[], | ||
1648 | 825 | void* handlePointer ) | ||
1649 | 826 | { | ||
1650 | 827 | CoreHandle *handle = (CoreHandle *) handlePointer; | ||
1651 | 828 | for ( UInt32 i=0; i<nAddresses; i++ ) { | ||
1652 | 829 | if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) { | ||
1653 | 830 | if ( properties[i].mScope == kAudioDevicePropertyScopeInput ) | ||
1654 | 831 | handle->xrun[1] = true; | ||
1655 | 832 | else | ||
1656 | 833 | handle->xrun[0] = true; | ||
1657 | 834 | } | ||
1658 | 835 | } | ||
1659 | 836 | |||
1660 | 837 | return kAudioHardwareNoError; | ||
1661 | 838 | } | ||
1662 | 839 | |||
1663 | 840 | static OSStatus rateListener( AudioObjectID inDevice, | ||
1664 | 841 | UInt32 /*nAddresses*/, | ||
1665 | 842 | const AudioObjectPropertyAddress /*properties*/[], | ||
1666 | 843 | void* ratePointer ) | ||
1667 | 844 | { | ||
1668 | 845 | Float64 *rate = (Float64 *) ratePointer; | ||
1669 | 846 | UInt32 dataSize = sizeof( Float64 ); | ||
1670 | 847 | AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate, | ||
1671 | 848 | kAudioObjectPropertyScopeGlobal, | ||
1672 | 849 | kAudioObjectPropertyElementMaster }; | ||
1673 | 850 | AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate ); | ||
1674 | 851 | return kAudioHardwareNoError; | ||
1675 | 852 | } | ||
1676 | 853 | |||
1677 | 854 | bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels, | ||
1678 | 855 | unsigned int firstChannel, unsigned int sampleRate, | ||
1679 | 856 | RtAudioFormat format, unsigned int *bufferSize, | ||
1680 | 857 | RtAudio::StreamOptions *options ) | ||
1681 | 858 | { | ||
1682 | 859 | // Get device ID | ||
1683 | 860 | unsigned int nDevices = getDeviceCount(); | ||
1684 | 861 | if ( nDevices == 0 ) { | ||
1685 | 862 | // This should not happen because a check is made before this function is called. | ||
1686 | 863 | errorText_ = "RtApiCore::probeDeviceOpen: no devices found!"; | ||
1687 | 864 | return FAILURE; | ||
1688 | 865 | } | ||
1689 | 866 | |||
1690 | 867 | if ( device >= nDevices ) { | ||
1691 | 868 | // This should not happen because a check is made before this function is called. | ||
1692 | 869 | errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!"; | ||
1693 | 870 | return FAILURE; | ||
1694 | 871 | } | ||
1695 | 872 | |||
1696 | 873 | AudioDeviceID deviceList[ nDevices ]; | ||
1697 | 874 | UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices; | ||
1698 | 875 | AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices, | ||
1699 | 876 | kAudioObjectPropertyScopeGlobal, | ||
1700 | 877 | kAudioObjectPropertyElementMaster }; | ||
1701 | 878 | OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, | ||
1702 | 879 | 0, NULL, &dataSize, (void *) &deviceList ); | ||
1703 | 880 | if ( result != noErr ) { | ||
1704 | 881 | errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs."; | ||
1705 | 882 | return FAILURE; | ||
1706 | 883 | } | ||
1707 | 884 | |||
1708 | 885 | AudioDeviceID id = deviceList[ device ]; | ||
1709 | 886 | |||
1710 | 887 | // Setup for stream mode. | ||
1711 | 888 | bool isInput = false; | ||
1712 | 889 | if ( mode == INPUT ) { | ||
1713 | 890 | isInput = true; | ||
1714 | 891 | property.mScope = kAudioDevicePropertyScopeInput; | ||
1715 | 892 | } | ||
1716 | 893 | else | ||
1717 | 894 | property.mScope = kAudioDevicePropertyScopeOutput; | ||
1718 | 895 | |||
1719 | 896 | // Get the stream "configuration". | ||
1720 | 897 | AudioBufferList *bufferList = nil; | ||
1721 | 898 | dataSize = 0; | ||
1722 | 899 | property.mSelector = kAudioDevicePropertyStreamConfiguration; | ||
1723 | 900 | result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize ); | ||
1724 | 901 | if ( result != noErr || dataSize == 0 ) { | ||
1725 | 902 | errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ")."; | ||
1726 | 903 | errorText_ = errorStream_.str(); | ||
1727 | 904 | return FAILURE; | ||
1728 | 905 | } | ||
1729 | 906 | |||
1730 | 907 | // Allocate the AudioBufferList. | ||
1731 | 908 | bufferList = (AudioBufferList *) malloc( dataSize ); | ||
1732 | 909 | if ( bufferList == NULL ) { | ||
1733 | 910 | errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList."; | ||
1734 | 911 | return FAILURE; | ||
1735 | 912 | } | ||
1736 | 913 | |||
1737 | 914 | result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList ); | ||
1738 | 915 | if (result != noErr || dataSize == 0) { | ||
1739 | 916 | free( bufferList ); | ||
1740 | 917 | errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ")."; | ||
1741 | 918 | errorText_ = errorStream_.str(); | ||
1742 | 919 | return FAILURE; | ||
1743 | 920 | } | ||
1744 | 921 | |||
1745 | 922 | // Search for one or more streams that contain the desired number of | ||
1746 | 923 | // channels. CoreAudio devices can have an arbitrary number of | ||
1747 | 924 | // streams and each stream can have an arbitrary number of channels. | ||
1748 | 925 | // For each stream, a single buffer of interleaved samples is | ||
1749 | 926 | // provided. RtAudio prefers the use of one stream of interleaved | ||
1750 | 927 | // data or multiple consecutive single-channel streams. However, we | ||
1751 | 928 | // now support multiple consecutive multi-channel streams of | ||
1752 | 929 | // interleaved data as well. | ||
1753 | 930 | UInt32 iStream, offsetCounter = firstChannel; | ||
1754 | 931 | UInt32 nStreams = bufferList->mNumberBuffers; | ||
1755 | 932 | bool monoMode = false; | ||
1756 | 933 | bool foundStream = false; | ||
1757 | 934 | |||
1758 | 935 | // First check that the device supports the requested number of | ||
1759 | 936 | // channels. | ||
1760 | 937 | UInt32 deviceChannels = 0; | ||
1761 | 938 | for ( iStream=0; iStream<nStreams; iStream++ ) | ||
1762 | 939 | deviceChannels += bufferList->mBuffers[iStream].mNumberChannels; | ||
1763 | 940 | |||
1764 | 941 | if ( deviceChannels < ( channels + firstChannel ) ) { | ||
1765 | 942 | free( bufferList ); | ||
1766 | 943 | errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count."; | ||
1767 | 944 | errorText_ = errorStream_.str(); | ||
1768 | 945 | return FAILURE; | ||
1769 | 946 | } | ||
1770 | 947 | |||
1771 | 948 | // Look for a single stream meeting our needs. | ||
1772 | 949 | UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0; | ||
1773 | 950 | for ( iStream=0; iStream<nStreams; iStream++ ) { | ||
1774 | 951 | streamChannels = bufferList->mBuffers[iStream].mNumberChannels; | ||
1775 | 952 | if ( streamChannels >= channels + offsetCounter ) { | ||
1776 | 953 | firstStream = iStream; | ||
1777 | 954 | channelOffset = offsetCounter; | ||
1778 | 955 | foundStream = true; | ||
1779 | 956 | break; | ||
1780 | 957 | } | ||
1781 | 958 | if ( streamChannels > offsetCounter ) break; | ||
1782 | 959 | offsetCounter -= streamChannels; | ||
1783 | 960 | } | ||
1784 | 961 | |||
1785 | 962 | // If we didn't find a single stream above, then we should be able | ||
1786 | 963 | // to meet the channel specification with multiple streams. | ||
1787 | 964 | if ( foundStream == false ) { | ||
1788 | 965 | monoMode = true; | ||
1789 | 966 | offsetCounter = firstChannel; | ||
1790 | 967 | for ( iStream=0; iStream<nStreams; iStream++ ) { | ||
1791 | 968 | streamChannels = bufferList->mBuffers[iStream].mNumberChannels; | ||
1792 | 969 | if ( streamChannels > offsetCounter ) break; | ||
1793 | 970 | offsetCounter -= streamChannels; | ||
1794 | 971 | } | ||
1795 | 972 | |||
1796 | 973 | firstStream = iStream; | ||
1797 | 974 | channelOffset = offsetCounter; | ||
1798 | 975 | Int32 channelCounter = channels + offsetCounter - streamChannels; | ||
1799 | 976 | |||
1800 | 977 | if ( streamChannels > 1 ) monoMode = false; | ||
1801 | 978 | while ( channelCounter > 0 ) { | ||
1802 | 979 | streamChannels = bufferList->mBuffers[++iStream].mNumberChannels; | ||
1803 | 980 | if ( streamChannels > 1 ) monoMode = false; | ||
1804 | 981 | channelCounter -= streamChannels; | ||
1805 | 982 | streamCount++; | ||
1806 | 983 | } | ||
1807 | 984 | } | ||
1808 | 985 | |||
1809 | 986 | free( bufferList ); | ||
1810 | 987 | |||
1811 | 988 | // Determine the buffer size. | ||
1812 | 989 | AudioValueRange bufferRange; | ||
1813 | 990 | dataSize = sizeof( AudioValueRange ); | ||
1814 | 991 | property.mSelector = kAudioDevicePropertyBufferFrameSizeRange; | ||
1815 | 992 | result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange ); | ||
1816 | 993 | |||
1817 | 994 | if ( result != noErr ) { | ||
1818 | 995 | errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ")."; | ||
1819 | 996 | errorText_ = errorStream_.str(); | ||
1820 | 997 | return FAILURE; | ||
1821 | 998 | } | ||
1822 | 999 | |||
1823 | 1000 | if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum; | ||
1824 | 1001 | else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum; | ||
1825 | 1002 | if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum; | ||
1826 | 1003 | |||
1827 | 1004 | // Set the buffer size. For multiple streams, I'm assuming we only | ||
1828 | 1005 | // need to make this setting for the master channel. | ||
1829 | 1006 | UInt32 theSize = (UInt32) *bufferSize; | ||
1830 | 1007 | dataSize = sizeof( UInt32 ); | ||
1831 | 1008 | property.mSelector = kAudioDevicePropertyBufferFrameSize; | ||
1832 | 1009 | result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize ); | ||
1833 | 1010 | |||
1834 | 1011 | if ( result != noErr ) { | ||
1835 | 1012 | errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ")."; | ||
1836 | 1013 | errorText_ = errorStream_.str(); | ||
1837 | 1014 | return FAILURE; | ||
1838 | 1015 | } | ||
1839 | 1016 | |||
1840 | 1017 | // If attempting to setup a duplex stream, the bufferSize parameter | ||
1841 | 1018 | // MUST be the same in both directions! | ||
1842 | 1019 | *bufferSize = theSize; | ||
1843 | 1020 | if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) { | ||
1844 | 1021 | errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ")."; | ||
1845 | 1022 | errorText_ = errorStream_.str(); | ||
1846 | 1023 | return FAILURE; | ||
1847 | 1024 | } | ||
1848 | 1025 | |||
1849 | 1026 | stream_.bufferSize = *bufferSize; | ||
1850 | 1027 | stream_.nBuffers = 1; | ||
1851 | 1028 | |||
1852 | 1029 | // Try to set "hog" mode ... it's not clear to me this is working. | ||
1853 | 1030 | if ( options && options->flags & RTAUDIO_HOG_DEVICE ) { | ||
1854 | 1031 | pid_t hog_pid; | ||
1855 | 1032 | dataSize = sizeof( hog_pid ); | ||
1856 | 1033 | property.mSelector = kAudioDevicePropertyHogMode; | ||
1857 | 1034 | result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid ); | ||
1858 | 1035 | if ( result != noErr ) { | ||
1859 | 1036 | errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!"; | ||
1860 | 1037 | errorText_ = errorStream_.str(); | ||
1861 | 1038 | return FAILURE; | ||
1862 | 1039 | } | ||
1863 | 1040 | |||
1864 | 1041 | if ( hog_pid != getpid() ) { | ||
1865 | 1042 | hog_pid = getpid(); | ||
1866 | 1043 | result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid ); | ||
1867 | 1044 | if ( result != noErr ) { | ||
1868 | 1045 | errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!"; | ||
1869 | 1046 | errorText_ = errorStream_.str(); | ||
1870 | 1047 | return FAILURE; | ||
1871 | 1048 | } | ||
1872 | 1049 | } | ||
1873 | 1050 | } | ||
1874 | 1051 | |||
1875 | 1052 | // Check and if necessary, change the sample rate for the device. | ||
1876 | 1053 | Float64 nominalRate; | ||
1877 | 1054 | dataSize = sizeof( Float64 ); | ||
1878 | 1055 | property.mSelector = kAudioDevicePropertyNominalSampleRate; | ||
1879 | 1056 | result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate ); | ||
1880 | 1057 | if ( result != noErr ) { | ||
1881 | 1058 | errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate."; | ||
1882 | 1059 | errorText_ = errorStream_.str(); | ||
1883 | 1060 | return FAILURE; | ||
1884 | 1061 | } | ||
1885 | 1062 | |||
1886 | 1063 | // Only change the sample rate if off by more than 1 Hz. | ||
1887 | 1064 | if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) { | ||
1888 | 1065 | |||
1889 | 1066 | // Set a property listener for the sample rate change | ||
1890 | 1067 | Float64 reportedRate = 0.0; | ||
1891 | 1068 | AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; | ||
1892 | 1069 | result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate ); | ||
1893 | 1070 | if ( result != noErr ) { | ||
1894 | 1071 | errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ")."; | ||
1895 | 1072 | errorText_ = errorStream_.str(); | ||
1896 | 1073 | return FAILURE; | ||
1897 | 1074 | } | ||
1898 | 1075 | |||
1899 | 1076 | nominalRate = (Float64) sampleRate; | ||
1900 | 1077 | result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate ); | ||
1901 | 1078 | if ( result != noErr ) { | ||
1902 | 1079 | AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate ); | ||
1903 | 1080 | errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ")."; | ||
1904 | 1081 | errorText_ = errorStream_.str(); | ||
1905 | 1082 | return FAILURE; | ||
1906 | 1083 | } | ||
1907 | 1084 | |||
1908 | 1085 | // Now wait until the reported nominal rate is what we just set. | ||
1909 | 1086 | UInt32 microCounter = 0; | ||
1910 | 1087 | while ( reportedRate != nominalRate ) { | ||
1911 | 1088 | microCounter += 5000; | ||
1912 | 1089 | if ( microCounter > 5000000 ) break; | ||
1913 | 1090 | usleep( 5000 ); | ||
1914 | 1091 | } | ||
1915 | 1092 | |||
1916 | 1093 | // Remove the property listener. | ||
1917 | 1094 | AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate ); | ||
1918 | 1095 | |||
1919 | 1096 | if ( microCounter > 5000000 ) { | ||
1920 | 1097 | errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ")."; | ||
1921 | 1098 | errorText_ = errorStream_.str(); | ||
1922 | 1099 | return FAILURE; | ||
1923 | 1100 | } | ||
1924 | 1101 | } | ||
1925 | 1102 | |||
1926 | 1103 | // Now set the stream format for all streams. Also, check the | ||
1927 | 1104 | // physical format of the device and change that if necessary. | ||
1928 | 1105 | AudioStreamBasicDescription description; | ||
1929 | 1106 | dataSize = sizeof( AudioStreamBasicDescription ); | ||
1930 | 1107 | property.mSelector = kAudioStreamPropertyVirtualFormat; | ||
1931 | 1108 | result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description ); | ||
1932 | 1109 | if ( result != noErr ) { | ||
1933 | 1110 | errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ")."; | ||
1934 | 1111 | errorText_ = errorStream_.str(); | ||
1935 | 1112 | return FAILURE; | ||
1936 | 1113 | } | ||
1937 | 1114 | |||
1938 | 1115 | // Set the sample rate and data format id. However, only make the | ||
1939 | 1116 | // change if the sample rate is not within 1.0 of the desired | ||
1940 | 1117 | // rate and the format is not linear pcm. | ||
1941 | 1118 | bool updateFormat = false; | ||
1942 | 1119 | if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) { | ||
1943 | 1120 | description.mSampleRate = (Float64) sampleRate; | ||
1944 | 1121 | updateFormat = true; | ||
1945 | 1122 | } | ||
1946 | 1123 | |||
1947 | 1124 | if ( description.mFormatID != kAudioFormatLinearPCM ) { | ||
1948 | 1125 | description.mFormatID = kAudioFormatLinearPCM; | ||
1949 | 1126 | updateFormat = true; | ||
1950 | 1127 | } | ||
1951 | 1128 | |||
1952 | 1129 | if ( updateFormat ) { | ||
1953 | 1130 | result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description ); | ||
1954 | 1131 | if ( result != noErr ) { | ||
1955 | 1132 | errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ")."; | ||
1956 | 1133 | errorText_ = errorStream_.str(); | ||
1957 | 1134 | return FAILURE; | ||
1958 | 1135 | } | ||
1959 | 1136 | } | ||
1960 | 1137 | |||
1961 | 1138 | // Now check the physical format. | ||
1962 | 1139 | property.mSelector = kAudioStreamPropertyPhysicalFormat; | ||
1963 | 1140 | result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description ); | ||
1964 | 1141 | if ( result != noErr ) { | ||
1965 | 1142 | errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ")."; | ||
1966 | 1143 | errorText_ = errorStream_.str(); | ||
1967 | 1144 | return FAILURE; | ||
1968 | 1145 | } | ||
1969 | 1146 | |||
1970 | 1147 | //std::cout << "Current physical stream format:" << std::endl; | ||
1971 | 1148 | //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl; | ||
1972 | 1149 | //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl; | ||
1973 | 1150 | //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl; | ||
1974 | 1151 | //std::cout << " sample rate = " << description.mSampleRate << std::endl; | ||
1975 | 1152 | |||
1976 | 1153 | if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) { | ||
1977 | 1154 | description.mFormatID = kAudioFormatLinearPCM; | ||
1978 | 1155 | //description.mSampleRate = (Float64) sampleRate; | ||
1979 | 1156 | AudioStreamBasicDescription testDescription = description; | ||
1980 | 1157 | UInt32 formatFlags; | ||
1981 | 1158 | |||
1982 | 1159 | // We'll try higher bit rates first and then work our way down. | ||
1983 | 1160 | std::vector< std::pair<UInt32, UInt32> > physicalFormats; | ||
1984 | 1161 | formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger; | ||
1985 | 1162 | physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) ); | ||
1986 | 1163 | formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat; | ||
1987 | 1164 | physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) ); | ||
1988 | 1165 | physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed | ||
1989 | 1166 | formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh ); | ||
1990 | 1167 | physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low | ||
1991 | 1168 | formatFlags |= kAudioFormatFlagIsAlignedHigh; | ||
1992 | 1169 | physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high | ||
1993 | 1170 | formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat; | ||
1994 | 1171 | physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) ); | ||
1995 | 1172 | physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) ); | ||
1996 | 1173 | |||
1997 | 1174 | bool setPhysicalFormat = false; | ||
1998 | 1175 | for( unsigned int i=0; i<physicalFormats.size(); i++ ) { | ||
1999 | 1176 | testDescription = description; | ||
2000 | 1177 | testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first; | ||
2001 | 1178 | testDescription.mFormatFlags = physicalFormats[i].second; | ||
2002 | 1179 | if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) ) | ||
2003 | 1180 | testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame; | ||
2004 | 1181 | else | ||
2005 | 1182 | testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame; | ||
2006 | 1183 | testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket; | ||
2007 | 1184 | result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription ); | ||
2008 | 1185 | if ( result == noErr ) { | ||
2009 | 1186 | setPhysicalFormat = true; | ||
2010 | 1187 | //std::cout << "Updated physical stream format:" << std::endl; | ||
2011 | 1188 | //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl; | ||
2012 | 1189 | //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl; | ||
2013 | 1190 | //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl; | ||
2014 | 1191 | //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl; | ||
2015 | 1192 | break; | ||
2016 | 1193 | } | ||
2017 | 1194 | } | ||
2018 | 1195 | |||
2019 | 1196 | if ( !setPhysicalFormat ) { | ||
2020 | 1197 | errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ")."; | ||
2021 | 1198 | errorText_ = errorStream_.str(); | ||
2022 | 1199 | return FAILURE; | ||
2023 | 1200 | } | ||
2024 | 1201 | } // done setting virtual/physical formats. | ||
2025 | 1202 | |||
2026 | 1203 | // Get the stream / device latency. | ||
2027 | 1204 | UInt32 latency; | ||
2028 | 1205 | dataSize = sizeof( UInt32 ); | ||
2029 | 1206 | property.mSelector = kAudioDevicePropertyLatency; | ||
2030 | 1207 | if ( AudioObjectHasProperty( id, &property ) == true ) { | ||
2031 | 1208 | result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency ); | ||
2032 | 1209 | if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency; | ||
2033 | 1210 | else { | ||
2034 | 1211 | errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ")."; | ||
2035 | 1212 | errorText_ = errorStream_.str(); | ||
2036 | 1213 | error( RtAudioError::WARNING ); | ||
2037 | 1214 | } | ||
2038 | 1215 | } | ||
2039 | 1216 | |||
2040 | 1217 | // Byte-swapping: According to AudioHardware.h, the stream data will | ||
2041 | 1218 | // always be presented in native-endian format, so we should never | ||
2042 | 1219 | // need to byte swap. | ||
2043 | 1220 | stream_.doByteSwap[mode] = false; | ||
2044 | 1221 | |||
2045 | 1222 | // From the CoreAudio documentation, PCM data must be supplied as | ||
2046 | 1223 | // 32-bit floats. | ||
2047 | 1224 | stream_.userFormat = format; | ||
2048 | 1225 | stream_.deviceFormat[mode] = RTAUDIO_FLOAT32; | ||
2049 | 1226 | |||
2050 | 1227 | if ( streamCount == 1 ) | ||
2051 | 1228 | stream_.nDeviceChannels[mode] = description.mChannelsPerFrame; | ||
2052 | 1229 | else // multiple streams | ||
2053 | 1230 | stream_.nDeviceChannels[mode] = channels; | ||
2054 | 1231 | stream_.nUserChannels[mode] = channels; | ||
2055 | 1232 | stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream | ||
2056 | 1233 | if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false; | ||
2057 | 1234 | else stream_.userInterleaved = true; | ||
2058 | 1235 | stream_.deviceInterleaved[mode] = true; | ||
2059 | 1236 | if ( monoMode == true ) stream_.deviceInterleaved[mode] = false; | ||
2060 | 1237 | |||
2061 | 1238 | // Set flags for buffer conversion. | ||
2062 | 1239 | stream_.doConvertBuffer[mode] = false; | ||
2063 | 1240 | if ( stream_.userFormat != stream_.deviceFormat[mode] ) | ||
2064 | 1241 | stream_.doConvertBuffer[mode] = true; | ||
2065 | 1242 | if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] ) | ||
2066 | 1243 | stream_.doConvertBuffer[mode] = true; | ||
2067 | 1244 | if ( streamCount == 1 ) { | ||
2068 | 1245 | if ( stream_.nUserChannels[mode] > 1 && | ||
2069 | 1246 | stream_.userInterleaved != stream_.deviceInterleaved[mode] ) | ||
2070 | 1247 | stream_.doConvertBuffer[mode] = true; | ||
2071 | 1248 | } | ||
2072 | 1249 | else if ( monoMode && stream_.userInterleaved ) | ||
2073 | 1250 | stream_.doConvertBuffer[mode] = true; | ||
2074 | 1251 | |||
2075 | 1252 | // Allocate our CoreHandle structure for the stream. | ||
2076 | 1253 | CoreHandle *handle = 0; | ||
2077 | 1254 | if ( stream_.apiHandle == 0 ) { | ||
2078 | 1255 | try { | ||
2079 | 1256 | handle = new CoreHandle; | ||
2080 | 1257 | } | ||
2081 | 1258 | catch ( std::bad_alloc& ) { | ||
2082 | 1259 | errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory."; | ||
2083 | 1260 | goto error; | ||
2084 | 1261 | } | ||
2085 | 1262 | |||
2086 | 1263 | if ( pthread_cond_init( &handle->condition, NULL ) ) { | ||
2087 | 1264 | errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable."; | ||
2088 | 1265 | goto error; | ||
2089 | 1266 | } | ||
2090 | 1267 | stream_.apiHandle = (void *) handle; | ||
2091 | 1268 | } | ||
2092 | 1269 | else | ||
2093 | 1270 | handle = (CoreHandle *) stream_.apiHandle; | ||
2094 | 1271 | handle->iStream[mode] = firstStream; | ||
2095 | 1272 | handle->nStreams[mode] = streamCount; | ||
2096 | 1273 | handle->id[mode] = id; | ||
2097 | 1274 | |||
2098 | 1275 | // Allocate necessary internal buffers. | ||
2099 | 1276 | unsigned long bufferBytes; | ||
2100 | 1277 | bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat ); | ||
2101 | 1278 | // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 ); | ||
2102 | 1279 | stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) ); | ||
2103 | 1280 | memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) ); | ||
2104 | 1281 | if ( stream_.userBuffer[mode] == NULL ) { | ||
2105 | 1282 | errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory."; | ||
2106 | 1283 | goto error; | ||
2107 | 1284 | } | ||
2108 | 1285 | |||
2109 | 1286 | // If possible, we will make use of the CoreAudio stream buffers as | ||
2110 | 1287 | // "device buffers". However, we can't do this if using multiple | ||
2111 | 1288 | // streams. | ||
2112 | 1289 | if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) { | ||
2113 | 1290 | |||
2114 | 1291 | bool makeBuffer = true; | ||
2115 | 1292 | bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] ); | ||
2116 | 1293 | if ( mode == INPUT ) { | ||
2117 | 1294 | if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) { | ||
2118 | 1295 | unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] ); | ||
2119 | 1296 | if ( bufferBytes <= bytesOut ) makeBuffer = false; | ||
2120 | 1297 | } | ||
2121 | 1298 | } | ||
2122 | 1299 | |||
2123 | 1300 | if ( makeBuffer ) { | ||
2124 | 1301 | bufferBytes *= *bufferSize; | ||
2125 | 1302 | if ( stream_.deviceBuffer ) free( stream_.deviceBuffer ); | ||
2126 | 1303 | stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 ); | ||
2127 | 1304 | if ( stream_.deviceBuffer == NULL ) { | ||
2128 | 1305 | errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory."; | ||
2129 | 1306 | goto error; | ||
2130 | 1307 | } | ||
2131 | 1308 | } | ||
2132 | 1309 | } | ||
2133 | 1310 | |||
2134 | 1311 | stream_.sampleRate = sampleRate; | ||
2135 | 1312 | stream_.device[mode] = device; | ||
2136 | 1313 | stream_.state = STREAM_STOPPED; | ||
2137 | 1314 | stream_.callbackInfo.object = (void *) this; | ||
2138 | 1315 | |||
2139 | 1316 | // Setup the buffer conversion information structure. | ||
2140 | 1317 | if ( stream_.doConvertBuffer[mode] ) { | ||
2141 | 1318 | if ( streamCount > 1 ) setConvertInfo( mode, 0 ); | ||
2142 | 1319 | else setConvertInfo( mode, channelOffset ); | ||
2143 | 1320 | } | ||
2144 | 1321 | |||
2145 | 1322 | if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device ) | ||
2146 | 1323 | // Only one callback procedure per device. | ||
2147 | 1324 | stream_.mode = DUPLEX; | ||
2148 | 1325 | else { | ||
2149 | 1326 | #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 ) | ||
2150 | 1327 | result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] ); | ||
2151 | 1328 | #else | ||
2152 | 1329 | // deprecated in favor of AudioDeviceCreateIOProcID() | ||
2153 | 1330 | result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo ); | ||
2154 | 1331 | #endif | ||
2155 | 1332 | if ( result != noErr ) { | ||
2156 | 1333 | errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ")."; | ||
2157 | 1334 | errorText_ = errorStream_.str(); | ||
2158 | 1335 | goto error; | ||
2159 | 1336 | } | ||
2160 | 1337 | if ( stream_.mode == OUTPUT && mode == INPUT ) | ||
2161 | 1338 | stream_.mode = DUPLEX; | ||
2162 | 1339 | else | ||
2163 | 1340 | stream_.mode = mode; | ||
2164 | 1341 | } | ||
2165 | 1342 | |||
2166 | 1343 | // Setup the device property listener for over/underload. | ||
2167 | 1344 | property.mSelector = kAudioDeviceProcessorOverload; | ||
2168 | 1345 | property.mScope = kAudioObjectPropertyScopeGlobal; | ||
2169 | 1346 | result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle ); | ||
2170 | 1347 | |||
2171 | 1348 | return SUCCESS; | ||
2172 | 1349 | |||
2173 | 1350 | error: | ||
2174 | 1351 | if ( handle ) { | ||
2175 | 1352 | pthread_cond_destroy( &handle->condition ); | ||
2176 | 1353 | delete handle; | ||
2177 | 1354 | stream_.apiHandle = 0; | ||
2178 | 1355 | } | ||
2179 | 1356 | |||
2180 | 1357 | for ( int i=0; i<2; i++ ) { | ||
2181 | 1358 | if ( stream_.userBuffer[i] ) { | ||
2182 | 1359 | free( stream_.userBuffer[i] ); | ||
2183 | 1360 | stream_.userBuffer[i] = 0; | ||
2184 | 1361 | } | ||
2185 | 1362 | } | ||
2186 | 1363 | |||
2187 | 1364 | if ( stream_.deviceBuffer ) { | ||
2188 | 1365 | free( stream_.deviceBuffer ); | ||
2189 | 1366 | stream_.deviceBuffer = 0; | ||
2190 | 1367 | } | ||
2191 | 1368 | |||
2192 | 1369 | stream_.state = STREAM_CLOSED; | ||
2193 | 1370 | return FAILURE; | ||
2194 | 1371 | } | ||
2195 | 1372 | |||
2196 | 1373 | void RtApiCore :: closeStream( void ) | ||
2197 | 1374 | { | ||
2198 | 1375 | if ( stream_.state == STREAM_CLOSED ) { | ||
2199 | 1376 | errorText_ = "RtApiCore::closeStream(): no open stream to close!"; | ||
2200 | 1377 | error( RtAudioError::WARNING ); | ||
2201 | 1378 | return; | ||
2202 | 1379 | } | ||
2203 | 1380 | |||
2204 | 1381 | CoreHandle *handle = (CoreHandle *) stream_.apiHandle; | ||
2205 | 1382 | if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { | ||
2206 | 1383 | if ( stream_.state == STREAM_RUNNING ) | ||
2207 | 1384 | AudioDeviceStop( handle->id[0], callbackHandler ); | ||
2208 | 1385 | #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 ) | ||
2209 | 1386 | AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] ); | ||
2210 | 1387 | #else | ||
2211 | 1388 | // deprecated in favor of AudioDeviceDestroyIOProcID() | ||
2212 | 1389 | AudioDeviceRemoveIOProc( handle->id[0], callbackHandler ); | ||
2213 | 1390 | #endif | ||
2214 | 1391 | } | ||
2215 | 1392 | |||
2216 | 1393 | if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) { | ||
2217 | 1394 | if ( stream_.state == STREAM_RUNNING ) | ||
2218 | 1395 | AudioDeviceStop( handle->id[1], callbackHandler ); | ||
2219 | 1396 | #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 ) | ||
2220 | 1397 | AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] ); | ||
2221 | 1398 | #else | ||
2222 | 1399 | // deprecated in favor of AudioDeviceDestroyIOProcID() | ||
2223 | 1400 | AudioDeviceRemoveIOProc( handle->id[1], callbackHandler ); | ||
2224 | 1401 | #endif | ||
2225 | 1402 | } | ||
2226 | 1403 | |||
2227 | 1404 | for ( int i=0; i<2; i++ ) { | ||
2228 | 1405 | if ( stream_.userBuffer[i] ) { | ||
2229 | 1406 | free( stream_.userBuffer[i] ); | ||
2230 | 1407 | stream_.userBuffer[i] = 0; | ||
2231 | 1408 | } | ||
2232 | 1409 | } | ||
2233 | 1410 | |||
2234 | 1411 | if ( stream_.deviceBuffer ) { | ||
2235 | 1412 | free( stream_.deviceBuffer ); | ||
2236 | 1413 | stream_.deviceBuffer = 0; | ||
2237 | 1414 | } | ||
2238 | 1415 | |||
2239 | 1416 | // Destroy pthread condition variable. | ||
2240 | 1417 | pthread_cond_destroy( &handle->condition ); | ||
2241 | 1418 | delete handle; | ||
2242 | 1419 | stream_.apiHandle = 0; | ||
2243 | 1420 | |||
2244 | 1421 | stream_.mode = UNINITIALIZED; | ||
2245 | 1422 | stream_.state = STREAM_CLOSED; | ||
2246 | 1423 | } | ||
2247 | 1424 | |||
2248 | 1425 | void RtApiCore :: startStream( void ) | ||
2249 | 1426 | { | ||
2250 | 1427 | verifyStream(); | ||
2251 | 1428 | if ( stream_.state == STREAM_RUNNING ) { | ||
2252 | 1429 | errorText_ = "RtApiCore::startStream(): the stream is already running!"; | ||
2253 | 1430 | error( RtAudioError::WARNING ); | ||
2254 | 1431 | return; | ||
2255 | 1432 | } | ||
2256 | 1433 | |||
2257 | 1434 | OSStatus result = noErr; | ||
2258 | 1435 | CoreHandle *handle = (CoreHandle *) stream_.apiHandle; | ||
2259 | 1436 | if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { | ||
2260 | 1437 | |||
2261 | 1438 | result = AudioDeviceStart( handle->id[0], callbackHandler ); | ||
2262 | 1439 | if ( result != noErr ) { | ||
2263 | 1440 | errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ")."; | ||
2264 | 1441 | errorText_ = errorStream_.str(); | ||
2265 | 1442 | goto unlock; | ||
2266 | 1443 | } | ||
2267 | 1444 | } | ||
2268 | 1445 | |||
2269 | 1446 | if ( stream_.mode == INPUT || | ||
2270 | 1447 | ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) { | ||
2271 | 1448 | |||
2272 | 1449 | result = AudioDeviceStart( handle->id[1], callbackHandler ); | ||
2273 | 1450 | if ( result != noErr ) { | ||
2274 | 1451 | errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ")."; | ||
2275 | 1452 | errorText_ = errorStream_.str(); | ||
2276 | 1453 | goto unlock; | ||
2277 | 1454 | } | ||
2278 | 1455 | } | ||
2279 | 1456 | |||
2280 | 1457 | handle->drainCounter = 0; | ||
2281 | 1458 | handle->internalDrain = false; | ||
2282 | 1459 | stream_.state = STREAM_RUNNING; | ||
2283 | 1460 | |||
2284 | 1461 | unlock: | ||
2285 | 1462 | if ( result == noErr ) return; | ||
2286 | 1463 | error( RtAudioError::SYSTEM_ERROR ); | ||
2287 | 1464 | } | ||
2288 | 1465 | |||
2289 | 1466 | void RtApiCore :: stopStream( void ) | ||
2290 | 1467 | { | ||
2291 | 1468 | verifyStream(); | ||
2292 | 1469 | if ( stream_.state == STREAM_STOPPED ) { | ||
2293 | 1470 | errorText_ = "RtApiCore::stopStream(): the stream is already stopped!"; | ||
2294 | 1471 | error( RtAudioError::WARNING ); | ||
2295 | 1472 | return; | ||
2296 | 1473 | } | ||
2297 | 1474 | |||
2298 | 1475 | OSStatus result = noErr; | ||
2299 | 1476 | CoreHandle *handle = (CoreHandle *) stream_.apiHandle; | ||
2300 | 1477 | if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { | ||
2301 | 1478 | |||
2302 | 1479 | if ( handle->drainCounter == 0 ) { | ||
2303 | 1480 | handle->drainCounter = 2; | ||
2304 | 1481 | pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled | ||
2305 | 1482 | } | ||
2306 | 1483 | |||
2307 | 1484 | result = AudioDeviceStop( handle->id[0], callbackHandler ); | ||
2308 | 1485 | if ( result != noErr ) { | ||
2309 | 1486 | errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ")."; | ||
2310 | 1487 | errorText_ = errorStream_.str(); | ||
2311 | 1488 | goto unlock; | ||
2312 | 1489 | } | ||
2313 | 1490 | } | ||
2314 | 1491 | |||
2315 | 1492 | if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) { | ||
2316 | 1493 | |||
2317 | 1494 | result = AudioDeviceStop( handle->id[1], callbackHandler ); | ||
2318 | 1495 | if ( result != noErr ) { | ||
2319 | 1496 | errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ")."; | ||
2320 | 1497 | errorText_ = errorStream_.str(); | ||
2321 | 1498 | goto unlock; | ||
2322 | 1499 | } | ||
2323 | 1500 | } | ||
2324 | 1501 | |||
2325 | 1502 | stream_.state = STREAM_STOPPED; | ||
2326 | 1503 | |||
2327 | 1504 | unlock: | ||
2328 | 1505 | if ( result == noErr ) return; | ||
2329 | 1506 | error( RtAudioError::SYSTEM_ERROR ); | ||
2330 | 1507 | } | ||
2331 | 1508 | |||
2332 | 1509 | void RtApiCore :: abortStream( void ) | ||
2333 | 1510 | { | ||
2334 | 1511 | verifyStream(); | ||
2335 | 1512 | if ( stream_.state == STREAM_STOPPED ) { | ||
2336 | 1513 | errorText_ = "RtApiCore::abortStream(): the stream is already stopped!"; | ||
2337 | 1514 | error( RtAudioError::WARNING ); | ||
2338 | 1515 | return; | ||
2339 | 1516 | } | ||
2340 | 1517 | |||
2341 | 1518 | CoreHandle *handle = (CoreHandle *) stream_.apiHandle; | ||
2342 | 1519 | handle->drainCounter = 2; | ||
2343 | 1520 | |||
2344 | 1521 | stopStream(); | ||
2345 | 1522 | } | ||
2346 | 1523 | |||
2347 | 1524 | // This function will be called by a spawned thread when the user | ||
2348 | 1525 | // callback function signals that the stream should be stopped or | ||
2349 | 1526 | // aborted. It is better to handle it this way because the | ||
2350 | 1527 | // callbackEvent() function probably should return before the AudioDeviceStop() | ||
2351 | 1528 | // function is called. | ||
2352 | 1529 | static void *coreStopStream( void *ptr ) | ||
2353 | 1530 | { | ||
2354 | 1531 | CallbackInfo *info = (CallbackInfo *) ptr; | ||
2355 | 1532 | RtApiCore *object = (RtApiCore *) info->object; | ||
2356 | 1533 | |||
2357 | 1534 | object->stopStream(); | ||
2358 | 1535 | pthread_exit( NULL ); | ||
2359 | 1536 | } | ||
2360 | 1537 | |||
2361 | 1538 | bool RtApiCore :: callbackEvent( AudioDeviceID deviceId, | ||
2362 | 1539 | const AudioBufferList *inBufferList, | ||
2363 | 1540 | const AudioBufferList *outBufferList ) | ||
2364 | 1541 | { | ||
2365 | 1542 | if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS; | ||
2366 | 1543 | if ( stream_.state == STREAM_CLOSED ) { | ||
2367 | 1544 | errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!"; | ||
2368 | 1545 | error( RtAudioError::WARNING ); | ||
2369 | 1546 | return FAILURE; | ||
2370 | 1547 | } | ||
2371 | 1548 | |||
2372 | 1549 | CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo; | ||
2373 | 1550 | CoreHandle *handle = (CoreHandle *) stream_.apiHandle; | ||
2374 | 1551 | |||
2375 | 1552 | // Check if we were draining the stream and signal is finished. | ||
2376 | 1553 | if ( handle->drainCounter > 3 ) { | ||
2377 | 1554 | ThreadHandle threadId; | ||
2378 | 1555 | |||
2379 | 1556 | stream_.state = STREAM_STOPPING; | ||
2380 | 1557 | if ( handle->internalDrain == true ) | ||
2381 | 1558 | pthread_create( &threadId, NULL, coreStopStream, info ); | ||
2382 | 1559 | else // external call to stopStream() | ||
2383 | 1560 | pthread_cond_signal( &handle->condition ); | ||
2384 | 1561 | return SUCCESS; | ||
2385 | 1562 | } | ||
2386 | 1563 | |||
2387 | 1564 | AudioDeviceID outputDevice = handle->id[0]; | ||
2388 | 1565 | |||
2389 | 1566 | // Invoke user callback to get fresh output data UNLESS we are | ||
2390 | 1567 | // draining stream or duplex mode AND the input/output devices are | ||
2391 | 1568 | // different AND this function is called for the input device. | ||
2392 | 1569 | if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) { | ||
2393 | 1570 | RtAudioCallback callback = (RtAudioCallback) info->callback; | ||
2394 | 1571 | double streamTime = getStreamTime(); | ||
2395 | 1572 | RtAudioStreamStatus status = 0; | ||
2396 | 1573 | if ( stream_.mode != INPUT && handle->xrun[0] == true ) { | ||
2397 | 1574 | status |= RTAUDIO_OUTPUT_UNDERFLOW; | ||
2398 | 1575 | handle->xrun[0] = false; | ||
2399 | 1576 | } | ||
2400 | 1577 | if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) { | ||
2401 | 1578 | status |= RTAUDIO_INPUT_OVERFLOW; | ||
2402 | 1579 | handle->xrun[1] = false; | ||
2403 | 1580 | } | ||
2404 | 1581 | |||
2405 | 1582 | int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1], | ||
2406 | 1583 | stream_.bufferSize, streamTime, status, info->userData ); | ||
2407 | 1584 | if ( cbReturnValue == 2 ) { | ||
2408 | 1585 | stream_.state = STREAM_STOPPING; | ||
2409 | 1586 | handle->drainCounter = 2; | ||
2410 | 1587 | abortStream(); | ||
2411 | 1588 | return SUCCESS; | ||
2412 | 1589 | } | ||
2413 | 1590 | else if ( cbReturnValue == 1 ) { | ||
2414 | 1591 | handle->drainCounter = 1; | ||
2415 | 1592 | handle->internalDrain = true; | ||
2416 | 1593 | } | ||
2417 | 1594 | } | ||
2418 | 1595 | |||
2419 | 1596 | if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) { | ||
2420 | 1597 | |||
2421 | 1598 | if ( handle->drainCounter > 1 ) { // write zeros to the output stream | ||
2422 | 1599 | |||
2423 | 1600 | if ( handle->nStreams[0] == 1 ) { | ||
2424 | 1601 | memset( outBufferList->mBuffers[handle->iStream[0]].mData, | ||
2425 | 1602 | 0, | ||
2426 | 1603 | outBufferList->mBuffers[handle->iStream[0]].mDataByteSize ); | ||
2427 | 1604 | } | ||
2428 | 1605 | else { // fill multiple streams with zeros | ||
2429 | 1606 | for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) { | ||
2430 | 1607 | memset( outBufferList->mBuffers[handle->iStream[0]+i].mData, | ||
2431 | 1608 | 0, | ||
2432 | 1609 | outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize ); | ||
2433 | 1610 | } | ||
2434 | 1611 | } | ||
2435 | 1612 | } | ||
2436 | 1613 | else if ( handle->nStreams[0] == 1 ) { | ||
2437 | 1614 | if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer | ||
2438 | 1615 | convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData, | ||
2439 | 1616 | stream_.userBuffer[0], stream_.convertInfo[0] ); | ||
2440 | 1617 | } | ||
2441 | 1618 | else { // copy from user buffer | ||
2442 | 1619 | memcpy( outBufferList->mBuffers[handle->iStream[0]].mData, | ||
2443 | 1620 | stream_.userBuffer[0], | ||
2444 | 1621 | outBufferList->mBuffers[handle->iStream[0]].mDataByteSize ); | ||
2445 | 1622 | } | ||
2446 | 1623 | } | ||
2447 | 1624 | else { // fill multiple streams | ||
2448 | 1625 | Float32 *inBuffer = (Float32 *) stream_.userBuffer[0]; | ||
2449 | 1626 | if ( stream_.doConvertBuffer[0] ) { | ||
2450 | 1627 | convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] ); | ||
2451 | 1628 | inBuffer = (Float32 *) stream_.deviceBuffer; | ||
2452 | 1629 | } | ||
2453 | 1630 | |||
2454 | 1631 | if ( stream_.deviceInterleaved[0] == false ) { // mono mode | ||
2455 | 1632 | UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize; | ||
2456 | 1633 | for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) { | ||
2457 | 1634 | memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData, | ||
2458 | 1635 | (void *)&inBuffer[i*stream_.bufferSize], bufferBytes ); | ||
2459 | 1636 | } | ||
2460 | 1637 | } | ||
2461 | 1638 | else { // fill multiple multi-channel streams with interleaved data | ||
2462 | 1639 | UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset; | ||
2463 | 1640 | Float32 *out, *in; | ||
2464 | 1641 | |||
2465 | 1642 | bool inInterleaved = ( stream_.userInterleaved ) ? true : false; | ||
2466 | 1643 | UInt32 inChannels = stream_.nUserChannels[0]; | ||
2467 | 1644 | if ( stream_.doConvertBuffer[0] ) { | ||
2468 | 1645 | inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode | ||
2469 | 1646 | inChannels = stream_.nDeviceChannels[0]; | ||
2470 | 1647 | } | ||
2471 | 1648 | |||
2472 | 1649 | if ( inInterleaved ) inOffset = 1; | ||
2473 | 1650 | else inOffset = stream_.bufferSize; | ||
2474 | 1651 | |||
2475 | 1652 | channelsLeft = inChannels; | ||
2476 | 1653 | for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) { | ||
2477 | 1654 | in = inBuffer; | ||
2478 | 1655 | out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData; | ||
2479 | 1656 | streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels; | ||
2480 | 1657 | |||
2481 | 1658 | outJump = 0; | ||
2482 | 1659 | // Account for possible channel offset in first stream | ||
2483 | 1660 | if ( i == 0 && stream_.channelOffset[0] > 0 ) { | ||
2484 | 1661 | streamChannels -= stream_.channelOffset[0]; | ||
2485 | 1662 | outJump = stream_.channelOffset[0]; | ||
2486 | 1663 | out += outJump; | ||
2487 | 1664 | } | ||
2488 | 1665 | |||
2489 | 1666 | // Account for possible unfilled channels at end of the last stream | ||
2490 | 1667 | if ( streamChannels > channelsLeft ) { | ||
2491 | 1668 | outJump = streamChannels - channelsLeft; | ||
2492 | 1669 | streamChannels = channelsLeft; | ||
2493 | 1670 | } | ||
2494 | 1671 | |||
2495 | 1672 | // Determine input buffer offsets and skips | ||
2496 | 1673 | if ( inInterleaved ) { | ||
2497 | 1674 | inJump = inChannels; | ||
2498 | 1675 | in += inChannels - channelsLeft; | ||
2499 | 1676 | } | ||
2500 | 1677 | else { | ||
2501 | 1678 | inJump = 1; | ||
2502 | 1679 | in += (inChannels - channelsLeft) * inOffset; | ||
2503 | 1680 | } | ||
2504 | 1681 | |||
2505 | 1682 | for ( unsigned int i=0; i<stream_.bufferSize; i++ ) { | ||
2506 | 1683 | for ( unsigned int j=0; j<streamChannels; j++ ) { | ||
2507 | 1684 | *out++ = in[j*inOffset]; | ||
2508 | 1685 | } | ||
2509 | 1686 | out += outJump; | ||
2510 | 1687 | in += inJump; | ||
2511 | 1688 | } | ||
2512 | 1689 | channelsLeft -= streamChannels; | ||
2513 | 1690 | } | ||
2514 | 1691 | } | ||
2515 | 1692 | } | ||
2516 | 1693 | } | ||
2517 | 1694 | |||
2518 | 1695 | // Don't bother draining input | ||
2519 | 1696 | if ( handle->drainCounter ) { | ||
2520 | 1697 | handle->drainCounter++; | ||
2521 | 1698 | goto unlock; | ||
2522 | 1699 | } | ||
2523 | 1700 | |||
2524 | 1701 | AudioDeviceID inputDevice; | ||
2525 | 1702 | inputDevice = handle->id[1]; | ||
2526 | 1703 | if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) { | ||
2527 | 1704 | |||
2528 | 1705 | if ( handle->nStreams[1] == 1 ) { | ||
2529 | 1706 | if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer | ||
2530 | 1707 | convertBuffer( stream_.userBuffer[1], | ||
2531 | 1708 | (char *) inBufferList->mBuffers[handle->iStream[1]].mData, | ||
2532 | 1709 | stream_.convertInfo[1] ); | ||
2533 | 1710 | } | ||
2534 | 1711 | else { // copy to user buffer | ||
2535 | 1712 | memcpy( stream_.userBuffer[1], | ||
2536 | 1713 | inBufferList->mBuffers[handle->iStream[1]].mData, | ||
2537 | 1714 | inBufferList->mBuffers[handle->iStream[1]].mDataByteSize ); | ||
2538 | 1715 | } | ||
2539 | 1716 | } | ||
2540 | 1717 | else { // read from multiple streams | ||
2541 | 1718 | Float32 *outBuffer = (Float32 *) stream_.userBuffer[1]; | ||
2542 | 1719 | if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer; | ||
2543 | 1720 | |||
2544 | 1721 | if ( stream_.deviceInterleaved[1] == false ) { // mono mode | ||
2545 | 1722 | UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize; | ||
2546 | 1723 | for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) { | ||
2547 | 1724 | memcpy( (void *)&outBuffer[i*stream_.bufferSize], | ||
2548 | 1725 | inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes ); | ||
2549 | 1726 | } | ||
2550 | 1727 | } | ||
2551 | 1728 | else { // read from multiple multi-channel streams | ||
2552 | 1729 | UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset; | ||
2553 | 1730 | Float32 *out, *in; | ||
2554 | 1731 | |||
2555 | 1732 | bool outInterleaved = ( stream_.userInterleaved ) ? true : false; | ||
2556 | 1733 | UInt32 outChannels = stream_.nUserChannels[1]; | ||
2557 | 1734 | if ( stream_.doConvertBuffer[1] ) { | ||
2558 | 1735 | outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode | ||
2559 | 1736 | outChannels = stream_.nDeviceChannels[1]; | ||
2560 | 1737 | } | ||
2561 | 1738 | |||
2562 | 1739 | if ( outInterleaved ) outOffset = 1; | ||
2563 | 1740 | else outOffset = stream_.bufferSize; | ||
2564 | 1741 | |||
2565 | 1742 | channelsLeft = outChannels; | ||
2566 | 1743 | for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) { | ||
2567 | 1744 | out = outBuffer; | ||
2568 | 1745 | in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData; | ||
2569 | 1746 | streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels; | ||
2570 | 1747 | |||
2571 | 1748 | inJump = 0; | ||
2572 | 1749 | // Account for possible channel offset in first stream | ||
2573 | 1750 | if ( i == 0 && stream_.channelOffset[1] > 0 ) { | ||
2574 | 1751 | streamChannels -= stream_.channelOffset[1]; | ||
2575 | 1752 | inJump = stream_.channelOffset[1]; | ||
2576 | 1753 | in += inJump; | ||
2577 | 1754 | } | ||
2578 | 1755 | |||
2579 | 1756 | // Account for possible unread channels at end of the last stream | ||
2580 | 1757 | if ( streamChannels > channelsLeft ) { | ||
2581 | 1758 | inJump = streamChannels - channelsLeft; | ||
2582 | 1759 | streamChannels = channelsLeft; | ||
2583 | 1760 | } | ||
2584 | 1761 | |||
2585 | 1762 | // Determine output buffer offsets and skips | ||
2586 | 1763 | if ( outInterleaved ) { | ||
2587 | 1764 | outJump = outChannels; | ||
2588 | 1765 | out += outChannels - channelsLeft; | ||
2589 | 1766 | } | ||
2590 | 1767 | else { | ||
2591 | 1768 | outJump = 1; | ||
2592 | 1769 | out += (outChannels - channelsLeft) * outOffset; | ||
2593 | 1770 | } | ||
2594 | 1771 | |||
2595 | 1772 | for ( unsigned int i=0; i<stream_.bufferSize; i++ ) { | ||
2596 | 1773 | for ( unsigned int j=0; j<streamChannels; j++ ) { | ||
2597 | 1774 | out[j*outOffset] = *in++; | ||
2598 | 1775 | } | ||
2599 | 1776 | out += outJump; | ||
2600 | 1777 | in += inJump; | ||
2601 | 1778 | } | ||
2602 | 1779 | channelsLeft -= streamChannels; | ||
2603 | 1780 | } | ||
2604 | 1781 | } | ||
2605 | 1782 | |||
2606 | 1783 | if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer | ||
2607 | 1784 | convertBuffer( stream_.userBuffer[1], | ||
2608 | 1785 | stream_.deviceBuffer, | ||
2609 | 1786 | stream_.convertInfo[1] ); | ||
2610 | 1787 | } | ||
2611 | 1788 | } | ||
2612 | 1789 | } | ||
2613 | 1790 | |||
2614 | 1791 | unlock: | ||
2615 | 1792 | //MUTEX_UNLOCK( &stream_.mutex ); | ||
2616 | 1793 | |||
2617 | 1794 | RtApi::tickStreamTime(); | ||
2618 | 1795 | return SUCCESS; | ||
2619 | 1796 | } | ||
2620 | 1797 | |||
2621 | 1798 | const char* RtApiCore :: getErrorCode( OSStatus code ) | ||
2622 | 1799 | { | ||
2623 | 1800 | switch( code ) { | ||
2624 | 1801 | |||
2625 | 1802 | case kAudioHardwareNotRunningError: | ||
2626 | 1803 | return "kAudioHardwareNotRunningError"; | ||
2627 | 1804 | |||
2628 | 1805 | case kAudioHardwareUnspecifiedError: | ||
2629 | 1806 | return "kAudioHardwareUnspecifiedError"; | ||
2630 | 1807 | |||
2631 | 1808 | case kAudioHardwareUnknownPropertyError: | ||
2632 | 1809 | return "kAudioHardwareUnknownPropertyError"; | ||
2633 | 1810 | |||
2634 | 1811 | case kAudioHardwareBadPropertySizeError: | ||
2635 | 1812 | return "kAudioHardwareBadPropertySizeError"; | ||
2636 | 1813 | |||
2637 | 1814 | case kAudioHardwareIllegalOperationError: | ||
2638 | 1815 | return "kAudioHardwareIllegalOperationError"; | ||
2639 | 1816 | |||
2640 | 1817 | case kAudioHardwareBadObjectError: | ||
2641 | 1818 | return "kAudioHardwareBadObjectError"; | ||
2642 | 1819 | |||
2643 | 1820 | case kAudioHardwareBadDeviceError: | ||
2644 | 1821 | return "kAudioHardwareBadDeviceError"; | ||
2645 | 1822 | |||
2646 | 1823 | case kAudioHardwareBadStreamError: | ||
2647 | 1824 | return "kAudioHardwareBadStreamError"; | ||
2648 | 1825 | |||
2649 | 1826 | case kAudioHardwareUnsupportedOperationError: | ||
2650 | 1827 | return "kAudioHardwareUnsupportedOperationError"; | ||
2651 | 1828 | |||
2652 | 1829 | case kAudioDeviceUnsupportedFormatError: | ||
2653 | 1830 | return "kAudioDeviceUnsupportedFormatError"; | ||
2654 | 1831 | |||
2655 | 1832 | case kAudioDevicePermissionsError: | ||
2656 | 1833 | return "kAudioDevicePermissionsError"; | ||
2657 | 1834 | |||
2658 | 1835 | default: | ||
2659 | 1836 | return "CoreAudio unknown error"; | ||
2660 | 1837 | } | ||
2661 | 1838 | } | ||
2662 | 1839 | |||
2663 | 1840 | //******************** End of __MACOSX_CORE__ *********************// | ||
2664 | 1841 | #endif | ||
2665 | 1842 | |||
2666 | 1843 | #if defined(__UNIX_JACK__) | ||
2667 | 1844 | |||
2668 | 1845 | // JACK is a low-latency audio server, originally written for the | ||
2669 | 1846 | // GNU/Linux operating system and now also ported to OS-X. It can | ||
2670 | 1847 | // connect a number of different applications to an audio device, as | ||
2671 | 1848 | // well as allowing them to share audio between themselves. | ||
2672 | 1849 | // | ||
2673 | 1850 | // When using JACK with RtAudio, "devices" refer to JACK clients that | ||
2674 | 1851 | // have ports connected to the server. The JACK server is typically | ||
2675 | 1852 | // started in a terminal as follows: | ||
2676 | 1853 | // | ||
2677 | 1854 | // .jackd -d alsa -d hw:0 | ||
2678 | 1855 | // | ||
2679 | 1856 | // or through an interface program such as qjackctl. Many of the | ||
2680 | 1857 | // parameters normally set for a stream are fixed by the JACK server | ||
2681 | 1858 | // and can be specified when the JACK server is started. In | ||
2682 | 1859 | // particular, | ||
2683 | 1860 | // | ||
2684 | 1861 | // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4 | ||
2685 | 1862 | // | ||
2686 | 1863 | // specifies a sample rate of 44100 Hz, a buffer size of 512 sample | ||
2687 | 1864 | // frames, and number of buffers = 4. Once the server is running, it | ||
2688 | 1865 | // is not possible to override these values. If the values are not | ||
2689 | 1866 | // specified in the command-line, the JACK server uses default values. | ||
2690 | 1867 | // | ||
2691 | 1868 | // The JACK server does not have to be running when an instance of | ||
2692 | 1869 | // RtApiJack is created, though the function getDeviceCount() will | ||
2693 | 1870 | // report 0 devices found until JACK has been started. When no | ||
2694 | 1871 | // devices are available (i.e., the JACK server is not running), a | ||
2695 | 1872 | // stream cannot be opened. | ||
2696 | 1873 | |||
2697 | 1874 | #include <jack/jack.h> | ||
2698 | 1875 | #include <unistd.h> | ||
2699 | 1876 | #include <cstdio> | ||
2700 | 1877 | |||
2701 | 1878 | // A structure to hold various information related to the Jack API | ||
2702 | 1879 | // implementation. | ||
2703 | 1880 | struct JackHandle { | ||
2704 | 1881 | jack_client_t *client; | ||
2705 | 1882 | jack_port_t **ports[2]; | ||
2706 | 1883 | std::string deviceName[2]; | ||
2707 | 1884 | bool xrun[2]; | ||
2708 | 1885 | pthread_cond_t condition; | ||
2709 | 1886 | int drainCounter; // Tracks callback counts when draining | ||
2710 | 1887 | bool internalDrain; // Indicates if stop is initiated from callback or not. | ||
2711 | 1888 | |||
2712 | 1889 | JackHandle() | ||
2713 | 1890 | :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; } | ||
2714 | 1891 | }; | ||
2715 | 1892 | |||
2716 | 1893 | static void jackSilentError( const char * ) {}; | ||
2717 | 1894 | |||
2718 | 1895 | RtApiJack :: RtApiJack() | ||
2719 | 1896 | { | ||
2720 | 1897 | // Nothing to do here. | ||
2721 | 1898 | #if !defined(__RTAUDIO_DEBUG__) | ||
2722 | 1899 | // Turn off Jack's internal error reporting. | ||
2723 | 1900 | jack_set_error_function( &jackSilentError ); | ||
2724 | 1901 | #endif | ||
2725 | 1902 | } | ||
2726 | 1903 | |||
2727 | 1904 | RtApiJack :: ~RtApiJack() | ||
2728 | 1905 | { | ||
2729 | 1906 | if ( stream_.state != STREAM_CLOSED ) closeStream(); | ||
2730 | 1907 | } | ||
2731 | 1908 | |||
2732 | 1909 | unsigned int RtApiJack :: getDeviceCount( void ) | ||
2733 | 1910 | { | ||
2734 | 1911 | // See if we can become a jack client. | ||
2735 | 1912 | jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption; | ||
2736 | 1913 | jack_status_t *status = NULL; | ||
2737 | 1914 | jack_client_t *client = jack_client_open( "RtApiJackCount", options, status ); | ||
2738 | 1915 | if ( client == 0 ) return 0; | ||
2739 | 1916 | |||
2740 | 1917 | const char **ports; | ||
2741 | 1918 | std::string port, previousPort; | ||
2742 | 1919 | unsigned int nChannels = 0, nDevices = 0; | ||
2743 | 1920 | ports = jack_get_ports( client, NULL, NULL, 0 ); | ||
2744 | 1921 | if ( ports ) { | ||
2745 | 1922 | // Parse the port names up to the first colon (:). | ||
2746 | 1923 | size_t iColon = 0; | ||
2747 | 1924 | do { | ||
2748 | 1925 | port = (char *) ports[ nChannels ]; | ||
2749 | 1926 | iColon = port.find(":"); | ||
2750 | 1927 | if ( iColon != std::string::npos ) { | ||
2751 | 1928 | port = port.substr( 0, iColon + 1 ); | ||
2752 | 1929 | if ( port != previousPort ) { | ||
2753 | 1930 | nDevices++; | ||
2754 | 1931 | previousPort = port; | ||
2755 | 1932 | } | ||
2756 | 1933 | } | ||
2757 | 1934 | } while ( ports[++nChannels] ); | ||
2758 | 1935 | free( ports ); | ||
2759 | 1936 | } | ||
2760 | 1937 | |||
2761 | 1938 | jack_client_close( client ); | ||
2762 | 1939 | return nDevices; | ||
2763 | 1940 | } | ||
2764 | 1941 | |||
2765 | 1942 | RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device ) | ||
2766 | 1943 | { | ||
2767 | 1944 | RtAudio::DeviceInfo info; | ||
2768 | 1945 | info.probed = false; | ||
2769 | 1946 | |||
2770 | 1947 | jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption | ||
2771 | 1948 | jack_status_t *status = NULL; | ||
2772 | 1949 | jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status ); | ||
2773 | 1950 | if ( client == 0 ) { | ||
2774 | 1951 | errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!"; | ||
2775 | 1952 | error( RtAudioError::WARNING ); | ||
2776 | 1953 | return info; | ||
2777 | 1954 | } | ||
2778 | 1955 | |||
2779 | 1956 | const char **ports; | ||
2780 | 1957 | std::string port, previousPort; | ||
2781 | 1958 | unsigned int nPorts = 0, nDevices = 0; | ||
2782 | 1959 | ports = jack_get_ports( client, NULL, NULL, 0 ); | ||
2783 | 1960 | if ( ports ) { | ||
2784 | 1961 | // Parse the port names up to the first colon (:). | ||
2785 | 1962 | size_t iColon = 0; | ||
2786 | 1963 | do { | ||
2787 | 1964 | port = (char *) ports[ nPorts ]; | ||
2788 | 1965 | iColon = port.find(":"); | ||
2789 | 1966 | if ( iColon != std::string::npos ) { | ||
2790 | 1967 | port = port.substr( 0, iColon ); | ||
2791 | 1968 | if ( port != previousPort ) { | ||
2792 | 1969 | if ( nDevices == device ) info.name = port; | ||
2793 | 1970 | nDevices++; | ||
2794 | 1971 | previousPort = port; | ||
2795 | 1972 | } | ||
2796 | 1973 | } | ||
2797 | 1974 | } while ( ports[++nPorts] ); | ||
2798 | 1975 | free( ports ); | ||
2799 | 1976 | } | ||
2800 | 1977 | |||
2801 | 1978 | if ( device >= nDevices ) { | ||
2802 | 1979 | jack_client_close( client ); | ||
2803 | 1980 | errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!"; | ||
2804 | 1981 | error( RtAudioError::INVALID_USE ); | ||
2805 | 1982 | return info; | ||
2806 | 1983 | } | ||
2807 | 1984 | |||
2808 | 1985 | // Get the current jack server sample rate. | ||
2809 | 1986 | info.sampleRates.clear(); | ||
2810 | 1987 | info.sampleRates.push_back( jack_get_sample_rate( client ) ); | ||
2811 | 1988 | |||
2812 | 1989 | // Count the available ports containing the client name as device | ||
2813 | 1990 | // channels. Jack "input ports" equal RtAudio output channels. | ||
2814 | 1991 | unsigned int nChannels = 0; | ||
2815 | 1992 | ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput ); | ||
2816 | 1993 | if ( ports ) { | ||
2817 | 1994 | while ( ports[ nChannels ] ) nChannels++; | ||
2818 | 1995 | free( ports ); | ||
2819 | 1996 | info.outputChannels = nChannels; | ||
2820 | 1997 | } | ||
2821 | 1998 | |||
2822 | 1999 | // Jack "output ports" equal RtAudio input channels. | ||
2823 | 2000 | nChannels = 0; | ||
2824 | 2001 | ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput ); | ||
2825 | 2002 | if ( ports ) { | ||
2826 | 2003 | while ( ports[ nChannels ] ) nChannels++; | ||
2827 | 2004 | free( ports ); | ||
2828 | 2005 | info.inputChannels = nChannels; | ||
2829 | 2006 | } | ||
2830 | 2007 | |||
2831 | 2008 | if ( info.outputChannels == 0 && info.inputChannels == 0 ) { | ||
2832 | 2009 | jack_client_close(client); | ||
2833 | 2010 | errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!"; | ||
2834 | 2011 | error( RtAudioError::WARNING ); | ||
2835 | 2012 | return info; | ||
2836 | 2013 | } | ||
2837 | 2014 | |||
2838 | 2015 | // If device opens for both playback and capture, we determine the channels. | ||
2839 | 2016 | if ( info.outputChannels > 0 && info.inputChannels > 0 ) | ||
2840 | 2017 | info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels; | ||
2841 | 2018 | |||
2842 | 2019 | // Jack always uses 32-bit floats. | ||
2843 | 2020 | info.nativeFormats = RTAUDIO_FLOAT32; | ||
2844 | 2021 | |||
2845 | 2022 | // Jack doesn't provide default devices so we'll use the first available one. | ||
2846 | 2023 | if ( device == 0 && info.outputChannels > 0 ) | ||
2847 | 2024 | info.isDefaultOutput = true; | ||
2848 | 2025 | if ( device == 0 && info.inputChannels > 0 ) | ||
2849 | 2026 | info.isDefaultInput = true; | ||
2850 | 2027 | |||
2851 | 2028 | jack_client_close(client); | ||
2852 | 2029 | info.probed = true; | ||
2853 | 2030 | return info; | ||
2854 | 2031 | } | ||
2855 | 2032 | |||
2856 | 2033 | static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer ) | ||
2857 | 2034 | { | ||
2858 | 2035 | CallbackInfo *info = (CallbackInfo *) infoPointer; | ||
2859 | 2036 | |||
2860 | 2037 | RtApiJack *object = (RtApiJack *) info->object; | ||
2861 | 2038 | if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1; | ||
2862 | 2039 | |||
2863 | 2040 | return 0; | ||
2864 | 2041 | } | ||
2865 | 2042 | |||
2866 | 2043 | // This function will be called by a spawned thread when the Jack | ||
2867 | 2044 | // server signals that it is shutting down. It is necessary to handle | ||
2868 | 2045 | // it this way because the jackShutdown() function must return before | ||
2869 | 2046 | // the jack_deactivate() function (in closeStream()) will return. | ||
2870 | 2047 | static void *jackCloseStream( void *ptr ) | ||
2871 | 2048 | { | ||
2872 | 2049 | CallbackInfo *info = (CallbackInfo *) ptr; | ||
2873 | 2050 | RtApiJack *object = (RtApiJack *) info->object; | ||
2874 | 2051 | |||
2875 | 2052 | object->closeStream(); | ||
2876 | 2053 | |||
2877 | 2054 | pthread_exit( NULL ); | ||
2878 | 2055 | } | ||
2879 | 2056 | static void jackShutdown( void *infoPointer ) | ||
2880 | 2057 | { | ||
2881 | 2058 | CallbackInfo *info = (CallbackInfo *) infoPointer; | ||
2882 | 2059 | RtApiJack *object = (RtApiJack *) info->object; | ||
2883 | 2060 | |||
2884 | 2061 | // Check current stream state. If stopped, then we'll assume this | ||
2885 | 2062 | // was called as a result of a call to RtApiJack::stopStream (the | ||
2886 | 2063 | // deactivation of a client handle causes this function to be called). | ||
2887 | 2064 | // If not, we'll assume the Jack server is shutting down or some | ||
2888 | 2065 | // other problem occurred and we should close the stream. | ||
2889 | 2066 | if ( object->isStreamRunning() == false ) return; | ||
2890 | 2067 | |||
2891 | 2068 | ThreadHandle threadId; | ||
2892 | 2069 | pthread_create( &threadId, NULL, jackCloseStream, info ); | ||
2893 | 2070 | std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl; | ||
2894 | 2071 | } | ||
2895 | 2072 | |||
2896 | 2073 | static int jackXrun( void *infoPointer ) | ||
2897 | 2074 | { | ||
2898 | 2075 | JackHandle *handle = (JackHandle *) infoPointer; | ||
2899 | 2076 | |||
2900 | 2077 | if ( handle->ports[0] ) handle->xrun[0] = true; | ||
2901 | 2078 | if ( handle->ports[1] ) handle->xrun[1] = true; | ||
2902 | 2079 | |||
2903 | 2080 | return 0; | ||
2904 | 2081 | } | ||
2905 | 2082 | |||
2906 | 2083 | bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels, | ||
2907 | 2084 | unsigned int firstChannel, unsigned int sampleRate, | ||
2908 | 2085 | RtAudioFormat format, unsigned int *bufferSize, | ||
2909 | 2086 | RtAudio::StreamOptions *options ) | ||
2910 | 2087 | { | ||
2911 | 2088 | JackHandle *handle = (JackHandle *) stream_.apiHandle; | ||
2912 | 2089 | |||
2913 | 2090 | // Look for jack server and try to become a client (only do once per stream). | ||
2914 | 2091 | jack_client_t *client = 0; | ||
2915 | 2092 | if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) { | ||
2916 | 2093 | jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption; | ||
2917 | 2094 | jack_status_t *status = NULL; | ||
2918 | 2095 | if ( options && !options->streamName.empty() ) | ||
2919 | 2096 | client = jack_client_open( options->streamName.c_str(), jackoptions, status ); | ||
2920 | 2097 | else | ||
2921 | 2098 | client = jack_client_open( "RtApiJack", jackoptions, status ); | ||
2922 | 2099 | if ( client == 0 ) { | ||
2923 | 2100 | errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!"; | ||
2924 | 2101 | error( RtAudioError::WARNING ); | ||
2925 | 2102 | return FAILURE; | ||
2926 | 2103 | } | ||
2927 | 2104 | } | ||
2928 | 2105 | else { | ||
2929 | 2106 | // The handle must have been created on an earlier pass. | ||
2930 | 2107 | client = handle->client; | ||
2931 | 2108 | } | ||
2932 | 2109 | |||
2933 | 2110 | const char **ports; | ||
2934 | 2111 | std::string port, previousPort, deviceName; | ||
2935 | 2112 | unsigned int nPorts = 0, nDevices = 0; | ||
2936 | 2113 | ports = jack_get_ports( client, NULL, NULL, 0 ); | ||
2937 | 2114 | if ( ports ) { | ||
2938 | 2115 | // Parse the port names up to the first colon (:). | ||
2939 | 2116 | size_t iColon = 0; | ||
2940 | 2117 | do { | ||
2941 | 2118 | port = (char *) ports[ nPorts ]; | ||
2942 | 2119 | iColon = port.find(":"); | ||
2943 | 2120 | if ( iColon != std::string::npos ) { | ||
2944 | 2121 | port = port.substr( 0, iColon ); | ||
2945 | 2122 | if ( port != previousPort ) { | ||
2946 | 2123 | if ( nDevices == device ) deviceName = port; | ||
2947 | 2124 | nDevices++; | ||
2948 | 2125 | previousPort = port; | ||
2949 | 2126 | } | ||
2950 | 2127 | } | ||
2951 | 2128 | } while ( ports[++nPorts] ); | ||
2952 | 2129 | free( ports ); | ||
2953 | 2130 | } | ||
2954 | 2131 | |||
2955 | 2132 | if ( device >= nDevices ) { | ||
2956 | 2133 | errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!"; | ||
2957 | 2134 | return FAILURE; | ||
2958 | 2135 | } | ||
2959 | 2136 | |||
2960 | 2137 | // Count the available ports containing the client name as device | ||
2961 | 2138 | // channels. Jack "input ports" equal RtAudio output channels. | ||
2962 | 2139 | unsigned int nChannels = 0; | ||
2963 | 2140 | unsigned long flag = JackPortIsInput; | ||
2964 | 2141 | if ( mode == INPUT ) flag = JackPortIsOutput; | ||
2965 | 2142 | ports = jack_get_ports( client, deviceName.c_str(), NULL, flag ); | ||
2966 | 2143 | if ( ports ) { | ||
2967 | 2144 | while ( ports[ nChannels ] ) nChannels++; | ||
2968 | 2145 | free( ports ); | ||
2969 | 2146 | } | ||
2970 | 2147 | |||
2971 | 2148 | // Compare the jack ports for specified client to the requested number of channels. | ||
2972 | 2149 | if ( nChannels < (channels + firstChannel) ) { | ||
2973 | 2150 | errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ")."; | ||
2974 | 2151 | errorText_ = errorStream_.str(); | ||
2975 | 2152 | return FAILURE; | ||
2976 | 2153 | } | ||
2977 | 2154 | |||
2978 | 2155 | // Check the jack server sample rate. | ||
2979 | 2156 | unsigned int jackRate = jack_get_sample_rate( client ); | ||
2980 | 2157 | if ( sampleRate != jackRate ) { | ||
2981 | 2158 | jack_client_close( client ); | ||
2982 | 2159 | errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ")."; | ||
2983 | 2160 | errorText_ = errorStream_.str(); | ||
2984 | 2161 | return FAILURE; | ||
2985 | 2162 | } | ||
2986 | 2163 | stream_.sampleRate = jackRate; | ||
2987 | 2164 | |||
2988 | 2165 | // Get the latency of the JACK port. | ||
2989 | 2166 | ports = jack_get_ports( client, deviceName.c_str(), NULL, flag ); | ||
2990 | 2167 | if ( ports[ firstChannel ] ) { | ||
2991 | 2168 | // Added by Ge Wang | ||
2992 | 2169 | jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency); | ||
2993 | 2170 | // the range (usually the min and max are equal) | ||
2994 | 2171 | jack_latency_range_t latrange; latrange.min = latrange.max = 0; | ||
2995 | 2172 | // get the latency range | ||
2996 | 2173 | jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange ); | ||
2997 | 2174 | // be optimistic, use the min! | ||
2998 | 2175 | stream_.latency[mode] = latrange.min; | ||
2999 | 2176 | //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) ); | ||
3000 | 2177 | } | ||
3001 | 2178 | free( ports ); | ||
3002 | 2179 | |||
3003 | 2180 | // The jack server always uses 32-bit floating-point data. | ||
3004 | 2181 | stream_.deviceFormat[mode] = RTAUDIO_FLOAT32; | ||
3005 | 2182 | stream_.userFormat = format; | ||
3006 | 2183 | |||
3007 | 2184 | if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false; | ||
3008 | 2185 | else stream_.userInterleaved = true; | ||
3009 | 2186 | |||
3010 | 2187 | // Jack always uses non-interleaved buffers. | ||
3011 | 2188 | stream_.deviceInterleaved[mode] = false; | ||
3012 | 2189 | |||
3013 | 2190 | // Jack always provides host byte-ordered data. | ||
3014 | 2191 | stream_.doByteSwap[mode] = false; | ||
3015 | 2192 | |||
3016 | 2193 | // Get the buffer size. The buffer size and number of buffers | ||
3017 | 2194 | // (periods) is set when the jack server is started. | ||
3018 | 2195 | stream_.bufferSize = (int) jack_get_buffer_size( client ); | ||
3019 | 2196 | *bufferSize = stream_.bufferSize; | ||
3020 | 2197 | |||
3021 | 2198 | stream_.nDeviceChannels[mode] = channels; | ||
3022 | 2199 | stream_.nUserChannels[mode] = channels; | ||
3023 | 2200 | |||
3024 | 2201 | // Set flags for buffer conversion. | ||
3025 | 2202 | stream_.doConvertBuffer[mode] = false; | ||
3026 | 2203 | if ( stream_.userFormat != stream_.deviceFormat[mode] ) | ||
3027 | 2204 | stream_.doConvertBuffer[mode] = true; | ||
3028 | 2205 | if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] && | ||
3029 | 2206 | stream_.nUserChannels[mode] > 1 ) | ||
3030 | 2207 | stream_.doConvertBuffer[mode] = true; | ||
3031 | 2208 | |||
3032 | 2209 | // Allocate our JackHandle structure for the stream. | ||
3033 | 2210 | if ( handle == 0 ) { | ||
3034 | 2211 | try { | ||
3035 | 2212 | handle = new JackHandle; | ||
3036 | 2213 | } | ||
3037 | 2214 | catch ( std::bad_alloc& ) { | ||
3038 | 2215 | errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory."; | ||
3039 | 2216 | goto error; | ||
3040 | 2217 | } | ||
3041 | 2218 | |||
3042 | 2219 | if ( pthread_cond_init(&handle->condition, NULL) ) { | ||
3043 | 2220 | errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable."; | ||
3044 | 2221 | goto error; | ||
3045 | 2222 | } | ||
3046 | 2223 | stream_.apiHandle = (void *) handle; | ||
3047 | 2224 | handle->client = client; | ||
3048 | 2225 | } | ||
3049 | 2226 | handle->deviceName[mode] = deviceName; | ||
3050 | 2227 | |||
3051 | 2228 | // Allocate necessary internal buffers. | ||
3052 | 2229 | unsigned long bufferBytes; | ||
3053 | 2230 | bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat ); | ||
3054 | 2231 | stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 ); | ||
3055 | 2232 | if ( stream_.userBuffer[mode] == NULL ) { | ||
3056 | 2233 | errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory."; | ||
3057 | 2234 | goto error; | ||
3058 | 2235 | } | ||
3059 | 2236 | |||
3060 | 2237 | if ( stream_.doConvertBuffer[mode] ) { | ||
3061 | 2238 | |||
3062 | 2239 | bool makeBuffer = true; | ||
3063 | 2240 | if ( mode == OUTPUT ) | ||
3064 | 2241 | bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] ); | ||
3065 | 2242 | else { // mode == INPUT | ||
3066 | 2243 | bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] ); | ||
3067 | 2244 | if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) { | ||
3068 | 2245 | unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]); | ||
3069 | 2246 | if ( bufferBytes < bytesOut ) makeBuffer = false; | ||
3070 | 2247 | } | ||
3071 | 2248 | } | ||
3072 | 2249 | |||
3073 | 2250 | if ( makeBuffer ) { | ||
3074 | 2251 | bufferBytes *= *bufferSize; | ||
3075 | 2252 | if ( stream_.deviceBuffer ) free( stream_.deviceBuffer ); | ||
3076 | 2253 | stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 ); | ||
3077 | 2254 | if ( stream_.deviceBuffer == NULL ) { | ||
3078 | 2255 | errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory."; | ||
3079 | 2256 | goto error; | ||
3080 | 2257 | } | ||
3081 | 2258 | } | ||
3082 | 2259 | } | ||
3083 | 2260 | |||
3084 | 2261 | // Allocate memory for the Jack ports (channels) identifiers. | ||
3085 | 2262 | handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels ); | ||
3086 | 2263 | if ( handle->ports[mode] == NULL ) { | ||
3087 | 2264 | errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory."; | ||
3088 | 2265 | goto error; | ||
3089 | 2266 | } | ||
3090 | 2267 | |||
3091 | 2268 | stream_.device[mode] = device; | ||
3092 | 2269 | stream_.channelOffset[mode] = firstChannel; | ||
3093 | 2270 | stream_.state = STREAM_STOPPED; | ||
3094 | 2271 | stream_.callbackInfo.object = (void *) this; | ||
3095 | 2272 | |||
3096 | 2273 | if ( stream_.mode == OUTPUT && mode == INPUT ) | ||
3097 | 2274 | // We had already set up the stream for output. | ||
3098 | 2275 | stream_.mode = DUPLEX; | ||
3099 | 2276 | else { | ||
3100 | 2277 | stream_.mode = mode; | ||
3101 | 2278 | jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo ); | ||
3102 | 2279 | jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle ); | ||
3103 | 2280 | jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo ); | ||
3104 | 2281 | } | ||
3105 | 2282 | |||
3106 | 2283 | // Register our ports. | ||
3107 | 2284 | char label[64]; | ||
3108 | 2285 | if ( mode == OUTPUT ) { | ||
3109 | 2286 | for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) { | ||
3110 | 2287 | snprintf( label, 64, "outport %d", i ); | ||
3111 | 2288 | handle->ports[0][i] = jack_port_register( handle->client, (const char *)label, | ||
3112 | 2289 | JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 ); | ||
3113 | 2290 | } | ||
3114 | 2291 | } | ||
3115 | 2292 | else { | ||
3116 | 2293 | for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) { | ||
3117 | 2294 | snprintf( label, 64, "inport %d", i ); | ||
3118 | 2295 | handle->ports[1][i] = jack_port_register( handle->client, (const char *)label, | ||
3119 | 2296 | JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 ); | ||
3120 | 2297 | } | ||
3121 | 2298 | } | ||
3122 | 2299 | |||
3123 | 2300 | // Setup the buffer conversion information structure. We don't use | ||
3124 | 2301 | // buffers to do channel offsets, so we override that parameter | ||
3125 | 2302 | // here. | ||
3126 | 2303 | if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 ); | ||
3127 | 2304 | |||
3128 | 2305 | return SUCCESS; | ||
3129 | 2306 | |||
3130 | 2307 | error: | ||
3131 | 2308 | if ( handle ) { | ||
3132 | 2309 | pthread_cond_destroy( &handle->condition ); | ||
3133 | 2310 | jack_client_close( handle->client ); | ||
3134 | 2311 | |||
3135 | 2312 | if ( handle->ports[0] ) free( handle->ports[0] ); | ||
3136 | 2313 | if ( handle->ports[1] ) free( handle->ports[1] ); | ||
3137 | 2314 | |||
3138 | 2315 | delete handle; | ||
3139 | 2316 | stream_.apiHandle = 0; | ||
3140 | 2317 | } | ||
3141 | 2318 | |||
3142 | 2319 | for ( int i=0; i<2; i++ ) { | ||
3143 | 2320 | if ( stream_.userBuffer[i] ) { | ||
3144 | 2321 | free( stream_.userBuffer[i] ); | ||
3145 | 2322 | stream_.userBuffer[i] = 0; | ||
3146 | 2323 | } | ||
3147 | 2324 | } | ||
3148 | 2325 | |||
3149 | 2326 | if ( stream_.deviceBuffer ) { | ||
3150 | 2327 | free( stream_.deviceBuffer ); | ||
3151 | 2328 | stream_.deviceBuffer = 0; | ||
3152 | 2329 | } | ||
3153 | 2330 | |||
3154 | 2331 | return FAILURE; | ||
3155 | 2332 | } | ||
3156 | 2333 | |||
3157 | 2334 | void RtApiJack :: closeStream( void ) | ||
3158 | 2335 | { | ||
3159 | 2336 | if ( stream_.state == STREAM_CLOSED ) { | ||
3160 | 2337 | errorText_ = "RtApiJack::closeStream(): no open stream to close!"; | ||
3161 | 2338 | error( RtAudioError::WARNING ); | ||
3162 | 2339 | return; | ||
3163 | 2340 | } | ||
3164 | 2341 | |||
3165 | 2342 | JackHandle *handle = (JackHandle *) stream_.apiHandle; | ||
3166 | 2343 | if ( handle ) { | ||
3167 | 2344 | |||
3168 | 2345 | if ( stream_.state == STREAM_RUNNING ) | ||
3169 | 2346 | jack_deactivate( handle->client ); | ||
3170 | 2347 | |||
3171 | 2348 | jack_client_close( handle->client ); | ||
3172 | 2349 | } | ||
3173 | 2350 | |||
3174 | 2351 | if ( handle ) { | ||
3175 | 2352 | if ( handle->ports[0] ) free( handle->ports[0] ); | ||
3176 | 2353 | if ( handle->ports[1] ) free( handle->ports[1] ); | ||
3177 | 2354 | pthread_cond_destroy( &handle->condition ); | ||
3178 | 2355 | delete handle; | ||
3179 | 2356 | stream_.apiHandle = 0; | ||
3180 | 2357 | } | ||
3181 | 2358 | |||
3182 | 2359 | for ( int i=0; i<2; i++ ) { | ||
3183 | 2360 | if ( stream_.userBuffer[i] ) { | ||
3184 | 2361 | free( stream_.userBuffer[i] ); | ||
3185 | 2362 | stream_.userBuffer[i] = 0; | ||
3186 | 2363 | } | ||
3187 | 2364 | } | ||
3188 | 2365 | |||
3189 | 2366 | if ( stream_.deviceBuffer ) { | ||
3190 | 2367 | free( stream_.deviceBuffer ); | ||
3191 | 2368 | stream_.deviceBuffer = 0; | ||
3192 | 2369 | } | ||
3193 | 2370 | |||
3194 | 2371 | stream_.mode = UNINITIALIZED; | ||
3195 | 2372 | stream_.state = STREAM_CLOSED; | ||
3196 | 2373 | } | ||
3197 | 2374 | |||
3198 | 2375 | void RtApiJack :: startStream( void ) | ||
3199 | 2376 | { | ||
3200 | 2377 | verifyStream(); | ||
3201 | 2378 | if ( stream_.state == STREAM_RUNNING ) { | ||
3202 | 2379 | errorText_ = "RtApiJack::startStream(): the stream is already running!"; | ||
3203 | 2380 | error( RtAudioError::WARNING ); | ||
3204 | 2381 | return; | ||
3205 | 2382 | } | ||
3206 | 2383 | |||
3207 | 2384 | JackHandle *handle = (JackHandle *) stream_.apiHandle; | ||
3208 | 2385 | int result = jack_activate( handle->client ); | ||
3209 | 2386 | if ( result ) { | ||
3210 | 2387 | errorText_ = "RtApiJack::startStream(): unable to activate JACK client!"; | ||
3211 | 2388 | goto unlock; | ||
3212 | 2389 | } | ||
3213 | 2390 | |||
3214 | 2391 | const char **ports; | ||
3215 | 2392 | |||
3216 | 2393 | // Get the list of available ports. | ||
3217 | 2394 | if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { | ||
3218 | 2395 | result = 1; | ||
3219 | 2396 | ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput); | ||
3220 | 2397 | if ( ports == NULL) { | ||
3221 | 2398 | errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!"; | ||
3222 | 2399 | goto unlock; | ||
3223 | 2400 | } | ||
3224 | 2401 | |||
3225 | 2402 | // Now make the port connections. Since RtAudio wasn't designed to | ||
3226 | 2403 | // allow the user to select particular channels of a device, we'll | ||
3227 | 2404 | // just open the first "nChannels" ports with offset. | ||
3228 | 2405 | for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) { | ||
3229 | 2406 | result = 1; | ||
3230 | 2407 | if ( ports[ stream_.channelOffset[0] + i ] ) | ||
3231 | 2408 | result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] ); | ||
3232 | 2409 | if ( result ) { | ||
3233 | 2410 | free( ports ); | ||
3234 | 2411 | errorText_ = "RtApiJack::startStream(): error connecting output ports!"; | ||
3235 | 2412 | goto unlock; | ||
3236 | 2413 | } | ||
3237 | 2414 | } | ||
3238 | 2415 | free(ports); | ||
3239 | 2416 | } | ||
3240 | 2417 | |||
3241 | 2418 | if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) { | ||
3242 | 2419 | result = 1; | ||
3243 | 2420 | ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput ); | ||
3244 | 2421 | if ( ports == NULL) { | ||
3245 | 2422 | errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!"; | ||
3246 | 2423 | goto unlock; | ||
3247 | 2424 | } | ||
3248 | 2425 | |||
3249 | 2426 | // Now make the port connections. See note above. | ||
3250 | 2427 | for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) { | ||
3251 | 2428 | result = 1; | ||
3252 | 2429 | if ( ports[ stream_.channelOffset[1] + i ] ) | ||
3253 | 2430 | result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) ); | ||
3254 | 2431 | if ( result ) { | ||
3255 | 2432 | free( ports ); | ||
3256 | 2433 | errorText_ = "RtApiJack::startStream(): error connecting input ports!"; | ||
3257 | 2434 | goto unlock; | ||
3258 | 2435 | } | ||
3259 | 2436 | } | ||
3260 | 2437 | free(ports); | ||
3261 | 2438 | } | ||
3262 | 2439 | |||
3263 | 2440 | handle->drainCounter = 0; | ||
3264 | 2441 | handle->internalDrain = false; | ||
3265 | 2442 | stream_.state = STREAM_RUNNING; | ||
3266 | 2443 | |||
3267 | 2444 | unlock: | ||
3268 | 2445 | if ( result == 0 ) return; | ||
3269 | 2446 | error( RtAudioError::SYSTEM_ERROR ); | ||
3270 | 2447 | } | ||
3271 | 2448 | |||
3272 | 2449 | void RtApiJack :: stopStream( void ) | ||
3273 | 2450 | { | ||
3274 | 2451 | verifyStream(); | ||
3275 | 2452 | if ( stream_.state == STREAM_STOPPED ) { | ||
3276 | 2453 | errorText_ = "RtApiJack::stopStream(): the stream is already stopped!"; | ||
3277 | 2454 | error( RtAudioError::WARNING ); | ||
3278 | 2455 | return; | ||
3279 | 2456 | } | ||
3280 | 2457 | |||
3281 | 2458 | JackHandle *handle = (JackHandle *) stream_.apiHandle; | ||
3282 | 2459 | if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { | ||
3283 | 2460 | |||
3284 | 2461 | if ( handle->drainCounter == 0 ) { | ||
3285 | 2462 | handle->drainCounter = 2; | ||
3286 | 2463 | pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled | ||
3287 | 2464 | } | ||
3288 | 2465 | } | ||
3289 | 2466 | |||
3290 | 2467 | jack_deactivate( handle->client ); | ||
3291 | 2468 | stream_.state = STREAM_STOPPED; | ||
3292 | 2469 | } | ||
3293 | 2470 | |||
3294 | 2471 | void RtApiJack :: abortStream( void ) | ||
3295 | 2472 | { | ||
3296 | 2473 | verifyStream(); | ||
3297 | 2474 | if ( stream_.state == STREAM_STOPPED ) { | ||
3298 | 2475 | errorText_ = "RtApiJack::abortStream(): the stream is already stopped!"; | ||
3299 | 2476 | error( RtAudioError::WARNING ); | ||
3300 | 2477 | return; | ||
3301 | 2478 | } | ||
3302 | 2479 | |||
3303 | 2480 | JackHandle *handle = (JackHandle *) stream_.apiHandle; | ||
3304 | 2481 | handle->drainCounter = 2; | ||
3305 | 2482 | |||
3306 | 2483 | stopStream(); | ||
3307 | 2484 | } | ||
3308 | 2485 | |||
3309 | 2486 | // This function will be called by a spawned thread when the user | ||
3310 | 2487 | // callback function signals that the stream should be stopped or | ||
3311 | 2488 | // aborted. It is necessary to handle it this way because the | ||
3312 | 2489 | // callbackEvent() function must return before the jack_deactivate() | ||
3313 | 2490 | // function will return. | ||
3314 | 2491 | static void *jackStopStream( void *ptr ) | ||
3315 | 2492 | { | ||
3316 | 2493 | CallbackInfo *info = (CallbackInfo *) ptr; | ||
3317 | 2494 | RtApiJack *object = (RtApiJack *) info->object; | ||
3318 | 2495 | |||
3319 | 2496 | object->stopStream(); | ||
3320 | 2497 | pthread_exit( NULL ); | ||
3321 | 2498 | } | ||
3322 | 2499 | |||
3323 | 2500 | bool RtApiJack :: callbackEvent( unsigned long nframes ) | ||
3324 | 2501 | { | ||
3325 | 2502 | if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS; | ||
3326 | 2503 | if ( stream_.state == STREAM_CLOSED ) { | ||
3327 | 2504 | errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!"; | ||
3328 | 2505 | error( RtAudioError::WARNING ); | ||
3329 | 2506 | return FAILURE; | ||
3330 | 2507 | } | ||
3331 | 2508 | if ( stream_.bufferSize != nframes ) { | ||
3332 | 2509 | errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!"; | ||
3333 | 2510 | error( RtAudioError::WARNING ); | ||
3334 | 2511 | return FAILURE; | ||
3335 | 2512 | } | ||
3336 | 2513 | |||
3337 | 2514 | CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo; | ||
3338 | 2515 | JackHandle *handle = (JackHandle *) stream_.apiHandle; | ||
3339 | 2516 | |||
3340 | 2517 | // Check if we were draining the stream and signal is finished. | ||
3341 | 2518 | if ( handle->drainCounter > 3 ) { | ||
3342 | 2519 | ThreadHandle threadId; | ||
3343 | 2520 | |||
3344 | 2521 | stream_.state = STREAM_STOPPING; | ||
3345 | 2522 | if ( handle->internalDrain == true ) | ||
3346 | 2523 | pthread_create( &threadId, NULL, jackStopStream, info ); | ||
3347 | 2524 | else | ||
3348 | 2525 | pthread_cond_signal( &handle->condition ); | ||
3349 | 2526 | return SUCCESS; | ||
3350 | 2527 | } | ||
3351 | 2528 | |||
3352 | 2529 | // Invoke user callback first, to get fresh output data. | ||
3353 | 2530 | if ( handle->drainCounter == 0 ) { | ||
3354 | 2531 | RtAudioCallback callback = (RtAudioCallback) info->callback; | ||
3355 | 2532 | double streamTime = getStreamTime(); | ||
3356 | 2533 | RtAudioStreamStatus status = 0; | ||
3357 | 2534 | if ( stream_.mode != INPUT && handle->xrun[0] == true ) { | ||
3358 | 2535 | status |= RTAUDIO_OUTPUT_UNDERFLOW; | ||
3359 | 2536 | handle->xrun[0] = false; | ||
3360 | 2537 | } | ||
3361 | 2538 | if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) { | ||
3362 | 2539 | status |= RTAUDIO_INPUT_OVERFLOW; | ||
3363 | 2540 | handle->xrun[1] = false; | ||
3364 | 2541 | } | ||
3365 | 2542 | int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1], | ||
3366 | 2543 | stream_.bufferSize, streamTime, status, info->userData ); | ||
3367 | 2544 | if ( cbReturnValue == 2 ) { | ||
3368 | 2545 | stream_.state = STREAM_STOPPING; | ||
3369 | 2546 | handle->drainCounter = 2; | ||
3370 | 2547 | ThreadHandle id; | ||
3371 | 2548 | pthread_create( &id, NULL, jackStopStream, info ); | ||
3372 | 2549 | return SUCCESS; | ||
3373 | 2550 | } | ||
3374 | 2551 | else if ( cbReturnValue == 1 ) { | ||
3375 | 2552 | handle->drainCounter = 1; | ||
3376 | 2553 | handle->internalDrain = true; | ||
3377 | 2554 | } | ||
3378 | 2555 | } | ||
3379 | 2556 | |||
3380 | 2557 | jack_default_audio_sample_t *jackbuffer; | ||
3381 | 2558 | unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t ); | ||
3382 | 2559 | if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { | ||
3383 | 2560 | |||
3384 | 2561 | if ( handle->drainCounter > 1 ) { // write zeros to the output stream | ||
3385 | 2562 | |||
3386 | 2563 | for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) { | ||
3387 | 2564 | jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes ); | ||
3388 | 2565 | memset( jackbuffer, 0, bufferBytes ); | ||
3389 | 2566 | } | ||
3390 | 2567 | |||
3391 | 2568 | } | ||
3392 | 2569 | else if ( stream_.doConvertBuffer[0] ) { | ||
3393 | 2570 | |||
3394 | 2571 | convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] ); | ||
3395 | 2572 | |||
3396 | 2573 | for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) { | ||
3397 | 2574 | jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes ); | ||
3398 | 2575 | memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes ); | ||
3399 | 2576 | } | ||
3400 | 2577 | } | ||
3401 | 2578 | else { // no buffer conversion | ||
3402 | 2579 | for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) { | ||
3403 | 2580 | jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes ); | ||
3404 | 2581 | memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes ); | ||
3405 | 2582 | } | ||
3406 | 2583 | } | ||
3407 | 2584 | } | ||
3408 | 2585 | |||
3409 | 2586 | // Don't bother draining input | ||
3410 | 2587 | if ( handle->drainCounter ) { | ||
3411 | 2588 | handle->drainCounter++; | ||
3412 | 2589 | goto unlock; | ||
3413 | 2590 | } | ||
3414 | 2591 | |||
3415 | 2592 | if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) { | ||
3416 | 2593 | |||
3417 | 2594 | if ( stream_.doConvertBuffer[1] ) { | ||
3418 | 2595 | for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) { | ||
3419 | 2596 | jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes ); | ||
3420 | 2597 | memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes ); | ||
3421 | 2598 | } | ||
3422 | 2599 | convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] ); | ||
3423 | 2600 | } | ||
3424 | 2601 | else { // no buffer conversion | ||
3425 | 2602 | for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) { | ||
3426 | 2603 | jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes ); | ||
3427 | 2604 | memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes ); | ||
3428 | 2605 | } | ||
3429 | 2606 | } | ||
3430 | 2607 | } | ||
3431 | 2608 | |||
3432 | 2609 | unlock: | ||
3433 | 2610 | RtApi::tickStreamTime(); | ||
3434 | 2611 | return SUCCESS; | ||
3435 | 2612 | } | ||
3436 | 2613 | //******************** End of __UNIX_JACK__ *********************// | ||
3437 | 2614 | #endif | ||
3438 | 2615 | |||
3439 | 2616 | #if defined(__WINDOWS_ASIO__) // ASIO API on Windows | ||
3440 | 2617 | |||
3441 | 2618 | // The ASIO API is designed around a callback scheme, so this | ||
3442 | 2619 | // implementation is similar to that used for OS-X CoreAudio and Linux | ||
3443 | 2620 | // Jack. The primary constraint with ASIO is that it only allows | ||
3444 | 2621 | // access to a single driver at a time. Thus, it is not possible to | ||
3445 | 2622 | // have more than one simultaneous RtAudio stream. | ||
3446 | 2623 | // | ||
3447 | 2624 | // This implementation also requires a number of external ASIO files | ||
3448 | 2625 | // and a few global variables. The ASIO callback scheme does not | ||
3449 | 2626 | // allow for the passing of user data, so we must create a global | ||
3450 | 2627 | // pointer to our callbackInfo structure. | ||
3451 | 2628 | // | ||
3452 | 2629 | // On unix systems, we make use of a pthread condition variable. | ||
3453 | 2630 | // Since there is no equivalent in Windows, I hacked something based | ||
3454 | 2631 | // on information found in | ||
3455 | 2632 | // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html. | ||
3456 | 2633 | |||
3457 | 2634 | #include "asiosys.h" | ||
3458 | 2635 | #include "asio.h" | ||
3459 | 2636 | #include "iasiothiscallresolver.h" | ||
3460 | 2637 | #include "asiodrivers.h" | ||
3461 | 2638 | #include <cmath> | ||
3462 | 2639 | |||
3463 | 2640 | static AsioDrivers drivers; | ||
3464 | 2641 | static ASIOCallbacks asioCallbacks; | ||
3465 | 2642 | static ASIODriverInfo driverInfo; | ||
3466 | 2643 | static CallbackInfo *asioCallbackInfo; | ||
3467 | 2644 | static bool asioXRun; | ||
3468 | 2645 | |||
3469 | 2646 | struct AsioHandle { | ||
3470 | 2647 | int drainCounter; // Tracks callback counts when draining | ||
3471 | 2648 | bool internalDrain; // Indicates if stop is initiated from callback or not. | ||
3472 | 2649 | ASIOBufferInfo *bufferInfos; | ||
3473 | 2650 | HANDLE condition; | ||
3474 | 2651 | |||
3475 | 2652 | AsioHandle() | ||
3476 | 2653 | :drainCounter(0), internalDrain(false), bufferInfos(0) {} | ||
3477 | 2654 | }; | ||
3478 | 2655 | |||
3479 | 2656 | // Function declarations (definitions at end of section) | ||
3480 | 2657 | static const char* getAsioErrorString( ASIOError result ); | ||
3481 | 2658 | static void sampleRateChanged( ASIOSampleRate sRate ); | ||
3482 | 2659 | static long asioMessages( long selector, long value, void* message, double* opt ); | ||
3483 | 2660 | |||
3484 | 2661 | RtApiAsio :: RtApiAsio() | ||
3485 | 2662 | { | ||
3486 | 2663 | // ASIO cannot run on a multi-threaded appartment. You can call | ||
3487 | 2664 | // CoInitialize beforehand, but it must be for appartment threading | ||
3488 | 2665 | // (in which case, CoInitilialize will return S_FALSE here). | ||
3489 | 2666 | coInitialized_ = false; | ||
3490 | 2667 | HRESULT hr = CoInitialize( NULL ); | ||
3491 | 2668 | if ( FAILED(hr) ) { | ||
3492 | 2669 | errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)"; | ||
3493 | 2670 | error( RtAudioError::WARNING ); | ||
3494 | 2671 | } | ||
3495 | 2672 | coInitialized_ = true; | ||
3496 | 2673 | |||
3497 | 2674 | drivers.removeCurrentDriver(); | ||
3498 | 2675 | driverInfo.asioVersion = 2; | ||
3499 | 2676 | |||
3500 | 2677 | // See note in DirectSound implementation about GetDesktopWindow(). | ||
3501 | 2678 | driverInfo.sysRef = GetForegroundWindow(); | ||
3502 | 2679 | } | ||
3503 | 2680 | |||
3504 | 2681 | RtApiAsio :: ~RtApiAsio() | ||
3505 | 2682 | { | ||
3506 | 2683 | if ( stream_.state != STREAM_CLOSED ) closeStream(); | ||
3507 | 2684 | if ( coInitialized_ ) CoUninitialize(); | ||
3508 | 2685 | } | ||
3509 | 2686 | |||
3510 | 2687 | unsigned int RtApiAsio :: getDeviceCount( void ) | ||
3511 | 2688 | { | ||
3512 | 2689 | return (unsigned int) drivers.asioGetNumDev(); | ||
3513 | 2690 | } | ||
3514 | 2691 | |||
3515 | 2692 | RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device ) | ||
3516 | 2693 | { | ||
3517 | 2694 | RtAudio::DeviceInfo info; | ||
3518 | 2695 | info.probed = false; | ||
3519 | 2696 | |||
3520 | 2697 | // Get device ID | ||
3521 | 2698 | unsigned int nDevices = getDeviceCount(); | ||
3522 | 2699 | if ( nDevices == 0 ) { | ||
3523 | 2700 | errorText_ = "RtApiAsio::getDeviceInfo: no devices found!"; | ||
3524 | 2701 | error( RtAudioError::INVALID_USE ); | ||
3525 | 2702 | return info; | ||
3526 | 2703 | } | ||
3527 | 2704 | |||
3528 | 2705 | if ( device >= nDevices ) { | ||
3529 | 2706 | errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!"; | ||
3530 | 2707 | error( RtAudioError::INVALID_USE ); | ||
3531 | 2708 | return info; | ||
3532 | 2709 | } | ||
3533 | 2710 | |||
3534 | 2711 | // If a stream is already open, we cannot probe other devices. Thus, use the saved results. | ||
3535 | 2712 | if ( stream_.state != STREAM_CLOSED ) { | ||
3536 | 2713 | if ( device >= devices_.size() ) { | ||
3537 | 2714 | errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened."; | ||
3538 | 2715 | error( RtAudioError::WARNING ); | ||
3539 | 2716 | return info; | ||
3540 | 2717 | } | ||
3541 | 2718 | return devices_[ device ]; | ||
3542 | 2719 | } | ||
3543 | 2720 | |||
3544 | 2721 | char driverName[32]; | ||
3545 | 2722 | ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 ); | ||
3546 | 2723 | if ( result != ASE_OK ) { | ||
3547 | 2724 | errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ")."; | ||
3548 | 2725 | errorText_ = errorStream_.str(); | ||
3549 | 2726 | error( RtAudioError::WARNING ); | ||
3550 | 2727 | return info; | ||
3551 | 2728 | } | ||
3552 | 2729 | |||
3553 | 2730 | info.name = driverName; | ||
3554 | 2731 | |||
3555 | 2732 | if ( !drivers.loadDriver( driverName ) ) { | ||
3556 | 2733 | errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ")."; | ||
3557 | 2734 | errorText_ = errorStream_.str(); | ||
3558 | 2735 | error( RtAudioError::WARNING ); | ||
3559 | 2736 | return info; | ||
3560 | 2737 | } | ||
3561 | 2738 | |||
3562 | 2739 | result = ASIOInit( &driverInfo ); | ||
3563 | 2740 | if ( result != ASE_OK ) { | ||
3564 | 2741 | errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ")."; | ||
3565 | 2742 | errorText_ = errorStream_.str(); | ||
3566 | 2743 | error( RtAudioError::WARNING ); | ||
3567 | 2744 | return info; | ||
3568 | 2745 | } | ||
3569 | 2746 | |||
3570 | 2747 | // Determine the device channel information. | ||
3571 | 2748 | long inputChannels, outputChannels; | ||
3572 | 2749 | result = ASIOGetChannels( &inputChannels, &outputChannels ); | ||
3573 | 2750 | if ( result != ASE_OK ) { | ||
3574 | 2751 | drivers.removeCurrentDriver(); | ||
3575 | 2752 | errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ")."; | ||
3576 | 2753 | errorText_ = errorStream_.str(); | ||
3577 | 2754 | error( RtAudioError::WARNING ); | ||
3578 | 2755 | return info; | ||
3579 | 2756 | } | ||
3580 | 2757 | |||
3581 | 2758 | info.outputChannels = outputChannels; | ||
3582 | 2759 | info.inputChannels = inputChannels; | ||
3583 | 2760 | if ( info.outputChannels > 0 && info.inputChannels > 0 ) | ||
3584 | 2761 | info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels; | ||
3585 | 2762 | |||
3586 | 2763 | // Determine the supported sample rates. | ||
3587 | 2764 | info.sampleRates.clear(); | ||
3588 | 2765 | for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) { | ||
3589 | 2766 | result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] ); | ||
3590 | 2767 | if ( result == ASE_OK ) | ||
3591 | 2768 | info.sampleRates.push_back( SAMPLE_RATES[i] ); | ||
3592 | 2769 | } | ||
3593 | 2770 | |||
3594 | 2771 | // Determine supported data types ... just check first channel and assume rest are the same. | ||
3595 | 2772 | ASIOChannelInfo channelInfo; | ||
3596 | 2773 | channelInfo.channel = 0; | ||
3597 | 2774 | channelInfo.isInput = true; | ||
3598 | 2775 | if ( info.inputChannels <= 0 ) channelInfo.isInput = false; | ||
3599 | 2776 | result = ASIOGetChannelInfo( &channelInfo ); | ||
3600 | 2777 | if ( result != ASE_OK ) { | ||
3601 | 2778 | drivers.removeCurrentDriver(); | ||
3602 | 2779 | errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ")."; | ||
3603 | 2780 | errorText_ = errorStream_.str(); | ||
3604 | 2781 | error( RtAudioError::WARNING ); | ||
3605 | 2782 | return info; | ||
3606 | 2783 | } | ||
3607 | 2784 | |||
3608 | 2785 | info.nativeFormats = 0; | ||
3609 | 2786 | if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) | ||
3610 | 2787 | info.nativeFormats |= RTAUDIO_SINT16; | ||
3611 | 2788 | else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) | ||
3612 | 2789 | info.nativeFormats |= RTAUDIO_SINT32; | ||
3613 | 2790 | else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) | ||
3614 | 2791 | info.nativeFormats |= RTAUDIO_FLOAT32; | ||
3615 | 2792 | else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) | ||
3616 | 2793 | info.nativeFormats |= RTAUDIO_FLOAT64; | ||
3617 | 2794 | else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) | ||
3618 | 2795 | info.nativeFormats |= RTAUDIO_SINT24; | ||
3619 | 2796 | |||
3620 | 2797 | if ( info.outputChannels > 0 ) | ||
3621 | 2798 | if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true; | ||
3622 | 2799 | if ( info.inputChannels > 0 ) | ||
3623 | 2800 | if ( getDefaultInputDevice() == device ) info.isDefaultInput = true; | ||
3624 | 2801 | |||
3625 | 2802 | info.probed = true; | ||
3626 | 2803 | drivers.removeCurrentDriver(); | ||
3627 | 2804 | return info; | ||
3628 | 2805 | } | ||
3629 | 2806 | |||
3630 | 2807 | static void bufferSwitch( long index, ASIOBool /*processNow*/ ) | ||
3631 | 2808 | { | ||
3632 | 2809 | RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object; | ||
3633 | 2810 | object->callbackEvent( index ); | ||
3634 | 2811 | } | ||
3635 | 2812 | |||
3636 | 2813 | void RtApiAsio :: saveDeviceInfo( void ) | ||
3637 | 2814 | { | ||
3638 | 2815 | devices_.clear(); | ||
3639 | 2816 | |||
3640 | 2817 | unsigned int nDevices = getDeviceCount(); | ||
3641 | 2818 | devices_.resize( nDevices ); | ||
3642 | 2819 | for ( unsigned int i=0; i<nDevices; i++ ) | ||
3643 | 2820 | devices_[i] = getDeviceInfo( i ); | ||
3644 | 2821 | } | ||
3645 | 2822 | |||
3646 | 2823 | bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels, | ||
3647 | 2824 | unsigned int firstChannel, unsigned int sampleRate, | ||
3648 | 2825 | RtAudioFormat format, unsigned int *bufferSize, | ||
3649 | 2826 | RtAudio::StreamOptions *options ) | ||
3650 | 2827 | { | ||
3651 | 2828 | // For ASIO, a duplex stream MUST use the same driver. | ||
3652 | 2829 | if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) { | ||
3653 | 2830 | errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!"; | ||
3654 | 2831 | return FAILURE; | ||
3655 | 2832 | } | ||
3656 | 2833 | |||
3657 | 2834 | char driverName[32]; | ||
3658 | 2835 | ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 ); | ||
3659 | 2836 | if ( result != ASE_OK ) { | ||
3660 | 2837 | errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ")."; | ||
3661 | 2838 | errorText_ = errorStream_.str(); | ||
3662 | 2839 | return FAILURE; | ||
3663 | 2840 | } | ||
3664 | 2841 | |||
3665 | 2842 | // Only load the driver once for duplex stream. | ||
3666 | 2843 | if ( mode != INPUT || stream_.mode != OUTPUT ) { | ||
3667 | 2844 | // The getDeviceInfo() function will not work when a stream is open | ||
3668 | 2845 | // because ASIO does not allow multiple devices to run at the same | ||
3669 | 2846 | // time. Thus, we'll probe the system before opening a stream and | ||
3670 | 2847 | // save the results for use by getDeviceInfo(). | ||
3671 | 2848 | this->saveDeviceInfo(); | ||
3672 | 2849 | |||
3673 | 2850 | if ( !drivers.loadDriver( driverName ) ) { | ||
3674 | 2851 | errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ")."; | ||
3675 | 2852 | errorText_ = errorStream_.str(); | ||
3676 | 2853 | return FAILURE; | ||
3677 | 2854 | } | ||
3678 | 2855 | |||
3679 | 2856 | result = ASIOInit( &driverInfo ); | ||
3680 | 2857 | if ( result != ASE_OK ) { | ||
3681 | 2858 | errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ")."; | ||
3682 | 2859 | errorText_ = errorStream_.str(); | ||
3683 | 2860 | return FAILURE; | ||
3684 | 2861 | } | ||
3685 | 2862 | } | ||
3686 | 2863 | |||
3687 | 2864 | // Check the device channel count. | ||
3688 | 2865 | long inputChannels, outputChannels; | ||
3689 | 2866 | result = ASIOGetChannels( &inputChannels, &outputChannels ); | ||
3690 | 2867 | if ( result != ASE_OK ) { | ||
3691 | 2868 | drivers.removeCurrentDriver(); | ||
3692 | 2869 | errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ")."; | ||
3693 | 2870 | errorText_ = errorStream_.str(); | ||
3694 | 2871 | return FAILURE; | ||
3695 | 2872 | } | ||
3696 | 2873 | |||
3697 | 2874 | if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) || | ||
3698 | 2875 | ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) { | ||
3699 | 2876 | drivers.removeCurrentDriver(); | ||
3700 | 2877 | errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ")."; | ||
3701 | 2878 | errorText_ = errorStream_.str(); | ||
3702 | 2879 | return FAILURE; | ||
3703 | 2880 | } | ||
3704 | 2881 | stream_.nDeviceChannels[mode] = channels; | ||
3705 | 2882 | stream_.nUserChannels[mode] = channels; | ||
3706 | 2883 | stream_.channelOffset[mode] = firstChannel; | ||
3707 | 2884 | |||
3708 | 2885 | // Verify the sample rate is supported. | ||
3709 | 2886 | result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate ); | ||
3710 | 2887 | if ( result != ASE_OK ) { | ||
3711 | 2888 | drivers.removeCurrentDriver(); | ||
3712 | 2889 | errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ")."; | ||
3713 | 2890 | errorText_ = errorStream_.str(); | ||
3714 | 2891 | return FAILURE; | ||
3715 | 2892 | } | ||
3716 | 2893 | |||
3717 | 2894 | // Get the current sample rate | ||
3718 | 2895 | ASIOSampleRate currentRate; | ||
3719 | 2896 | result = ASIOGetSampleRate( ¤tRate ); | ||
3720 | 2897 | if ( result != ASE_OK ) { | ||
3721 | 2898 | drivers.removeCurrentDriver(); | ||
3722 | 2899 | errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate."; | ||
3723 | 2900 | errorText_ = errorStream_.str(); | ||
3724 | 2901 | return FAILURE; | ||
3725 | 2902 | } | ||
3726 | 2903 | |||
3727 | 2904 | // Set the sample rate only if necessary | ||
3728 | 2905 | if ( currentRate != sampleRate ) { | ||
3729 | 2906 | result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate ); | ||
3730 | 2907 | if ( result != ASE_OK ) { | ||
3731 | 2908 | drivers.removeCurrentDriver(); | ||
3732 | 2909 | errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ")."; | ||
3733 | 2910 | errorText_ = errorStream_.str(); | ||
3734 | 2911 | return FAILURE; | ||
3735 | 2912 | } | ||
3736 | 2913 | } | ||
3737 | 2914 | |||
3738 | 2915 | // Determine the driver data type. | ||
3739 | 2916 | ASIOChannelInfo channelInfo; | ||
3740 | 2917 | channelInfo.channel = 0; | ||
3741 | 2918 | if ( mode == OUTPUT ) channelInfo.isInput = false; | ||
3742 | 2919 | else channelInfo.isInput = true; | ||
3743 | 2920 | result = ASIOGetChannelInfo( &channelInfo ); | ||
3744 | 2921 | if ( result != ASE_OK ) { | ||
3745 | 2922 | drivers.removeCurrentDriver(); | ||
3746 | 2923 | errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format."; | ||
3747 | 2924 | errorText_ = errorStream_.str(); | ||
3748 | 2925 | return FAILURE; | ||
3749 | 2926 | } | ||
3750 | 2927 | |||
3751 | 2928 | // Assuming WINDOWS host is always little-endian. | ||
3752 | 2929 | stream_.doByteSwap[mode] = false; | ||
3753 | 2930 | stream_.userFormat = format; | ||
3754 | 2931 | stream_.deviceFormat[mode] = 0; | ||
3755 | 2932 | if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) { | ||
3756 | 2933 | stream_.deviceFormat[mode] = RTAUDIO_SINT16; | ||
3757 | 2934 | if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true; | ||
3758 | 2935 | } | ||
3759 | 2936 | else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) { | ||
3760 | 2937 | stream_.deviceFormat[mode] = RTAUDIO_SINT32; | ||
3761 | 2938 | if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true; | ||
3762 | 2939 | } | ||
3763 | 2940 | else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) { | ||
3764 | 2941 | stream_.deviceFormat[mode] = RTAUDIO_FLOAT32; | ||
3765 | 2942 | if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true; | ||
3766 | 2943 | } | ||
3767 | 2944 | else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) { | ||
3768 | 2945 | stream_.deviceFormat[mode] = RTAUDIO_FLOAT64; | ||
3769 | 2946 | if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true; | ||
3770 | 2947 | } | ||
3771 | 2948 | else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) { | ||
3772 | 2949 | stream_.deviceFormat[mode] = RTAUDIO_SINT24; | ||
3773 | 2950 | if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true; | ||
3774 | 2951 | } | ||
3775 | 2952 | |||
3776 | 2953 | if ( stream_.deviceFormat[mode] == 0 ) { | ||
3777 | 2954 | drivers.removeCurrentDriver(); | ||
3778 | 2955 | errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio."; | ||
3779 | 2956 | errorText_ = errorStream_.str(); | ||
3780 | 2957 | return FAILURE; | ||
3781 | 2958 | } | ||
3782 | 2959 | |||
3783 | 2960 | // Set the buffer size. For a duplex stream, this will end up | ||
3784 | 2961 | // setting the buffer size based on the input constraints, which | ||
3785 | 2962 | // should be ok. | ||
3786 | 2963 | long minSize, maxSize, preferSize, granularity; | ||
3787 | 2964 | result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity ); | ||
3788 | 2965 | if ( result != ASE_OK ) { | ||
3789 | 2966 | drivers.removeCurrentDriver(); | ||
3790 | 2967 | errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size."; | ||
3791 | 2968 | errorText_ = errorStream_.str(); | ||
3792 | 2969 | return FAILURE; | ||
3793 | 2970 | } | ||
3794 | 2971 | |||
3795 | 2972 | if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize; | ||
3796 | 2973 | else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize; | ||
3797 | 2974 | else if ( granularity == -1 ) { | ||
3798 | 2975 | // Make sure bufferSize is a power of two. | ||
3799 | 2976 | int log2_of_min_size = 0; | ||
3800 | 2977 | int log2_of_max_size = 0; | ||
3801 | 2978 | |||
3802 | 2979 | for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) { | ||
3803 | 2980 | if ( minSize & ((long)1 << i) ) log2_of_min_size = i; | ||
3804 | 2981 | if ( maxSize & ((long)1 << i) ) log2_of_max_size = i; | ||
3805 | 2982 | } | ||
3806 | 2983 | |||
3807 | 2984 | long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) ); | ||
3808 | 2985 | int min_delta_num = log2_of_min_size; | ||
3809 | 2986 | |||
3810 | 2987 | for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) { | ||
3811 | 2988 | long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) ); | ||
3812 | 2989 | if (current_delta < min_delta) { | ||
3813 | 2990 | min_delta = current_delta; | ||
3814 | 2991 | min_delta_num = i; | ||
3815 | 2992 | } | ||
3816 | 2993 | } | ||
3817 | 2994 | |||
3818 | 2995 | *bufferSize = ( (unsigned int)1 << min_delta_num ); | ||
3819 | 2996 | if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize; | ||
3820 | 2997 | else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize; | ||
3821 | 2998 | } | ||
3822 | 2999 | else if ( granularity != 0 ) { | ||
3823 | 3000 | // Set to an even multiple of granularity, rounding up. | ||
3824 | 3001 | *bufferSize = (*bufferSize + granularity-1) / granularity * granularity; | ||
3825 | 3002 | } | ||
3826 | 3003 | |||
3827 | 3004 | if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) { | ||
3828 | 3005 | drivers.removeCurrentDriver(); | ||
3829 | 3006 | errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!"; | ||
3830 | 3007 | return FAILURE; | ||
3831 | 3008 | } | ||
3832 | 3009 | |||
3833 | 3010 | stream_.bufferSize = *bufferSize; | ||
3834 | 3011 | stream_.nBuffers = 2; | ||
3835 | 3012 | |||
3836 | 3013 | if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false; | ||
3837 | 3014 | else stream_.userInterleaved = true; | ||
3838 | 3015 | |||
3839 | 3016 | // ASIO always uses non-interleaved buffers. | ||
3840 | 3017 | stream_.deviceInterleaved[mode] = false; | ||
3841 | 3018 | |||
3842 | 3019 | // Allocate, if necessary, our AsioHandle structure for the stream. | ||
3843 | 3020 | AsioHandle *handle = (AsioHandle *) stream_.apiHandle; | ||
3844 | 3021 | if ( handle == 0 ) { | ||
3845 | 3022 | try { | ||
3846 | 3023 | handle = new AsioHandle; | ||
3847 | 3024 | } | ||
3848 | 3025 | catch ( std::bad_alloc& ) { | ||
3849 | 3026 | //if ( handle == NULL ) { | ||
3850 | 3027 | drivers.removeCurrentDriver(); | ||
3851 | 3028 | errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory."; | ||
3852 | 3029 | return FAILURE; | ||
3853 | 3030 | } | ||
3854 | 3031 | handle->bufferInfos = 0; | ||
3855 | 3032 | |||
3856 | 3033 | // Create a manual-reset event. | ||
3857 | 3034 | handle->condition = CreateEvent( NULL, // no security | ||
3858 | 3035 | TRUE, // manual-reset | ||
3859 | 3036 | FALSE, // non-signaled initially | ||
3860 | 3037 | NULL ); // unnamed | ||
3861 | 3038 | stream_.apiHandle = (void *) handle; | ||
3862 | 3039 | } | ||
3863 | 3040 | |||
3864 | 3041 | // Create the ASIO internal buffers. Since RtAudio sets up input | ||
3865 | 3042 | // and output separately, we'll have to dispose of previously | ||
3866 | 3043 | // created output buffers for a duplex stream. | ||
3867 | 3044 | long inputLatency, outputLatency; | ||
3868 | 3045 | if ( mode == INPUT && stream_.mode == OUTPUT ) { | ||
3869 | 3046 | ASIODisposeBuffers(); | ||
3870 | 3047 | if ( handle->bufferInfos ) free( handle->bufferInfos ); | ||
3871 | 3048 | } | ||
3872 | 3049 | |||
3873 | 3050 | // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure. | ||
3874 | 3051 | bool buffersAllocated = false; | ||
3875 | 3052 | unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1]; | ||
3876 | 3053 | handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) ); | ||
3877 | 3054 | if ( handle->bufferInfos == NULL ) { | ||
3878 | 3055 | errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ")."; | ||
3879 | 3056 | errorText_ = errorStream_.str(); | ||
3880 | 3057 | goto error; | ||
3881 | 3058 | } | ||
3882 | 3059 | |||
3883 | 3060 | ASIOBufferInfo *infos; | ||
3884 | 3061 | infos = handle->bufferInfos; | ||
3885 | 3062 | for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) { | ||
3886 | 3063 | infos->isInput = ASIOFalse; | ||
3887 | 3064 | infos->channelNum = i + stream_.channelOffset[0]; | ||
3888 | 3065 | infos->buffers[0] = infos->buffers[1] = 0; | ||
3889 | 3066 | } | ||
3890 | 3067 | for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) { | ||
3891 | 3068 | infos->isInput = ASIOTrue; | ||
3892 | 3069 | infos->channelNum = i + stream_.channelOffset[1]; | ||
3893 | 3070 | infos->buffers[0] = infos->buffers[1] = 0; | ||
3894 | 3071 | } | ||
3895 | 3072 | |||
3896 | 3073 | // Set up the ASIO callback structure and create the ASIO data buffers. | ||
3897 | 3074 | asioCallbacks.bufferSwitch = &bufferSwitch; | ||
3898 | 3075 | asioCallbacks.sampleRateDidChange = &sampleRateChanged; | ||
3899 | 3076 | asioCallbacks.asioMessage = &asioMessages; | ||
3900 | 3077 | asioCallbacks.bufferSwitchTimeInfo = NULL; | ||
3901 | 3078 | result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks ); | ||
3902 | 3079 | if ( result != ASE_OK ) { | ||
3903 | 3080 | errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers."; | ||
3904 | 3081 | errorText_ = errorStream_.str(); | ||
3905 | 3082 | goto error; | ||
3906 | 3083 | } | ||
3907 | 3084 | buffersAllocated = true; | ||
3908 | 3085 | |||
3909 | 3086 | // Set flags for buffer conversion. | ||
3910 | 3087 | stream_.doConvertBuffer[mode] = false; | ||
3911 | 3088 | if ( stream_.userFormat != stream_.deviceFormat[mode] ) | ||
3912 | 3089 | stream_.doConvertBuffer[mode] = true; | ||
3913 | 3090 | if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] && | ||
3914 | 3091 | stream_.nUserChannels[mode] > 1 ) | ||
3915 | 3092 | stream_.doConvertBuffer[mode] = true; | ||
3916 | 3093 | |||
3917 | 3094 | // Allocate necessary internal buffers | ||
3918 | 3095 | unsigned long bufferBytes; | ||
3919 | 3096 | bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat ); | ||
3920 | 3097 | stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 ); | ||
3921 | 3098 | if ( stream_.userBuffer[mode] == NULL ) { | ||
3922 | 3099 | errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory."; | ||
3923 | 3100 | goto error; | ||
3924 | 3101 | } | ||
3925 | 3102 | |||
3926 | 3103 | if ( stream_.doConvertBuffer[mode] ) { | ||
3927 | 3104 | |||
3928 | 3105 | bool makeBuffer = true; | ||
3929 | 3106 | bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] ); | ||
3930 | 3107 | if ( mode == INPUT ) { | ||
3931 | 3108 | if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) { | ||
3932 | 3109 | unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] ); | ||
3933 | 3110 | if ( bufferBytes <= bytesOut ) makeBuffer = false; | ||
3934 | 3111 | } | ||
3935 | 3112 | } | ||
3936 | 3113 | |||
3937 | 3114 | if ( makeBuffer ) { | ||
3938 | 3115 | bufferBytes *= *bufferSize; | ||
3939 | 3116 | if ( stream_.deviceBuffer ) free( stream_.deviceBuffer ); | ||
3940 | 3117 | stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 ); | ||
3941 | 3118 | if ( stream_.deviceBuffer == NULL ) { | ||
3942 | 3119 | errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory."; | ||
3943 | 3120 | goto error; | ||
3944 | 3121 | } | ||
3945 | 3122 | } | ||
3946 | 3123 | } | ||
3947 | 3124 | |||
3948 | 3125 | stream_.sampleRate = sampleRate; | ||
3949 | 3126 | stream_.device[mode] = device; | ||
3950 | 3127 | stream_.state = STREAM_STOPPED; | ||
3951 | 3128 | asioCallbackInfo = &stream_.callbackInfo; | ||
3952 | 3129 | stream_.callbackInfo.object = (void *) this; | ||
3953 | 3130 | if ( stream_.mode == OUTPUT && mode == INPUT ) | ||
3954 | 3131 | // We had already set up an output stream. | ||
3955 | 3132 | stream_.mode = DUPLEX; | ||
3956 | 3133 | else | ||
3957 | 3134 | stream_.mode = mode; | ||
3958 | 3135 | |||
3959 | 3136 | // Determine device latencies | ||
3960 | 3137 | result = ASIOGetLatencies( &inputLatency, &outputLatency ); | ||
3961 | 3138 | if ( result != ASE_OK ) { | ||
3962 | 3139 | errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency."; | ||
3963 | 3140 | errorText_ = errorStream_.str(); | ||
3964 | 3141 | error( RtAudioError::WARNING); // warn but don't fail | ||
3965 | 3142 | } | ||
3966 | 3143 | else { | ||
3967 | 3144 | stream_.latency[0] = outputLatency; | ||
3968 | 3145 | stream_.latency[1] = inputLatency; | ||
3969 | 3146 | } | ||
3970 | 3147 | |||
3971 | 3148 | // Setup the buffer conversion information structure. We don't use | ||
3972 | 3149 | // buffers to do channel offsets, so we override that parameter | ||
3973 | 3150 | // here. | ||
3974 | 3151 | if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 ); | ||
3975 | 3152 | |||
3976 | 3153 | return SUCCESS; | ||
3977 | 3154 | |||
3978 | 3155 | error: | ||
3979 | 3156 | if ( buffersAllocated ) | ||
3980 | 3157 | ASIODisposeBuffers(); | ||
3981 | 3158 | drivers.removeCurrentDriver(); | ||
3982 | 3159 | |||
3983 | 3160 | if ( handle ) { | ||
3984 | 3161 | CloseHandle( handle->condition ); | ||
3985 | 3162 | if ( handle->bufferInfos ) | ||
3986 | 3163 | free( handle->bufferInfos ); | ||
3987 | 3164 | delete handle; | ||
3988 | 3165 | stream_.apiHandle = 0; | ||
3989 | 3166 | } | ||
3990 | 3167 | |||
3991 | 3168 | for ( int i=0; i<2; i++ ) { | ||
3992 | 3169 | if ( stream_.userBuffer[i] ) { | ||
3993 | 3170 | free( stream_.userBuffer[i] ); | ||
3994 | 3171 | stream_.userBuffer[i] = 0; | ||
3995 | 3172 | } | ||
3996 | 3173 | } | ||
3997 | 3174 | |||
3998 | 3175 | if ( stream_.deviceBuffer ) { | ||
3999 | 3176 | free( stream_.deviceBuffer ); | ||
4000 | 3177 | stream_.deviceBuffer = 0; | ||
4001 | 3178 | } | ||
4002 | 3179 | |||
4003 | 3180 | return FAILURE; | ||
4004 | 3181 | } | ||
4005 | 3182 | |||
4006 | 3183 | void RtApiAsio :: closeStream() | ||
4007 | 3184 | { | ||
4008 | 3185 | if ( stream_.state == STREAM_CLOSED ) { | ||
4009 | 3186 | errorText_ = "RtApiAsio::closeStream(): no open stream to close!"; | ||
4010 | 3187 | error( RtAudioError::WARNING ); | ||
4011 | 3188 | return; | ||
4012 | 3189 | } | ||
4013 | 3190 | |||
4014 | 3191 | if ( stream_.state == STREAM_RUNNING ) { | ||
4015 | 3192 | stream_.state = STREAM_STOPPED; | ||
4016 | 3193 | ASIOStop(); | ||
4017 | 3194 | } | ||
4018 | 3195 | ASIODisposeBuffers(); | ||
4019 | 3196 | drivers.removeCurrentDriver(); | ||
4020 | 3197 | |||
4021 | 3198 | AsioHandle *handle = (AsioHandle *) stream_.apiHandle; | ||
4022 | 3199 | if ( handle ) { | ||
4023 | 3200 | CloseHandle( handle->condition ); | ||
4024 | 3201 | if ( handle->bufferInfos ) | ||
4025 | 3202 | free( handle->bufferInfos ); | ||
4026 | 3203 | delete handle; | ||
4027 | 3204 | stream_.apiHandle = 0; | ||
4028 | 3205 | } | ||
4029 | 3206 | |||
4030 | 3207 | for ( int i=0; i<2; i++ ) { | ||
4031 | 3208 | if ( stream_.userBuffer[i] ) { | ||
4032 | 3209 | free( stream_.userBuffer[i] ); | ||
4033 | 3210 | stream_.userBuffer[i] = 0; | ||
4034 | 3211 | } | ||
4035 | 3212 | } | ||
4036 | 3213 | |||
4037 | 3214 | if ( stream_.deviceBuffer ) { | ||
4038 | 3215 | free( stream_.deviceBuffer ); | ||
4039 | 3216 | stream_.deviceBuffer = 0; | ||
4040 | 3217 | } | ||
4041 | 3218 | |||
4042 | 3219 | stream_.mode = UNINITIALIZED; | ||
4043 | 3220 | stream_.state = STREAM_CLOSED; | ||
4044 | 3221 | } | ||
4045 | 3222 | |||
4046 | 3223 | bool stopThreadCalled = false; | ||
4047 | 3224 | |||
4048 | 3225 | void RtApiAsio :: startStream() | ||
4049 | 3226 | { | ||
4050 | 3227 | verifyStream(); | ||
4051 | 3228 | if ( stream_.state == STREAM_RUNNING ) { | ||
4052 | 3229 | errorText_ = "RtApiAsio::startStream(): the stream is already running!"; | ||
4053 | 3230 | error( RtAudioError::WARNING ); | ||
4054 | 3231 | return; | ||
4055 | 3232 | } | ||
4056 | 3233 | |||
4057 | 3234 | AsioHandle *handle = (AsioHandle *) stream_.apiHandle; | ||
4058 | 3235 | ASIOError result = ASIOStart(); | ||
4059 | 3236 | if ( result != ASE_OK ) { | ||
4060 | 3237 | errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device."; | ||
4061 | 3238 | errorText_ = errorStream_.str(); | ||
4062 | 3239 | goto unlock; | ||
4063 | 3240 | } | ||
4064 | 3241 | |||
4065 | 3242 | handle->drainCounter = 0; | ||
4066 | 3243 | handle->internalDrain = false; | ||
4067 | 3244 | ResetEvent( handle->condition ); | ||
4068 | 3245 | stream_.state = STREAM_RUNNING; | ||
4069 | 3246 | asioXRun = false; | ||
4070 | 3247 | |||
4071 | 3248 | unlock: | ||
4072 | 3249 | stopThreadCalled = false; | ||
4073 | 3250 | |||
4074 | 3251 | if ( result == ASE_OK ) return; | ||
4075 | 3252 | error( RtAudioError::SYSTEM_ERROR ); | ||
4076 | 3253 | } | ||
4077 | 3254 | |||
4078 | 3255 | void RtApiAsio :: stopStream() | ||
4079 | 3256 | { | ||
4080 | 3257 | verifyStream(); | ||
4081 | 3258 | if ( stream_.state == STREAM_STOPPED ) { | ||
4082 | 3259 | errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!"; | ||
4083 | 3260 | error( RtAudioError::WARNING ); | ||
4084 | 3261 | return; | ||
4085 | 3262 | } | ||
4086 | 3263 | |||
4087 | 3264 | AsioHandle *handle = (AsioHandle *) stream_.apiHandle; | ||
4088 | 3265 | if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { | ||
4089 | 3266 | if ( handle->drainCounter == 0 ) { | ||
4090 | 3267 | handle->drainCounter = 2; | ||
4091 | 3268 | WaitForSingleObject( handle->condition, INFINITE ); // block until signaled | ||
4092 | 3269 | } | ||
4093 | 3270 | } | ||
4094 | 3271 | |||
4095 | 3272 | stream_.state = STREAM_STOPPED; | ||
4096 | 3273 | |||
4097 | 3274 | ASIOError result = ASIOStop(); | ||
4098 | 3275 | if ( result != ASE_OK ) { | ||
4099 | 3276 | errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device."; | ||
4100 | 3277 | errorText_ = errorStream_.str(); | ||
4101 | 3278 | } | ||
4102 | 3279 | |||
4103 | 3280 | if ( result == ASE_OK ) return; | ||
4104 | 3281 | error( RtAudioError::SYSTEM_ERROR ); | ||
4105 | 3282 | } | ||
4106 | 3283 | |||
4107 | 3284 | void RtApiAsio :: abortStream() | ||
4108 | 3285 | { | ||
4109 | 3286 | verifyStream(); | ||
4110 | 3287 | if ( stream_.state == STREAM_STOPPED ) { | ||
4111 | 3288 | errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!"; | ||
4112 | 3289 | error( RtAudioError::WARNING ); | ||
4113 | 3290 | return; | ||
4114 | 3291 | } | ||
4115 | 3292 | |||
4116 | 3293 | // The following lines were commented-out because some behavior was | ||
4117 | 3294 | // noted where the device buffers need to be zeroed to avoid | ||
4118 | 3295 | // continuing sound, even when the device buffers are completely | ||
4119 | 3296 | // disposed. So now, calling abort is the same as calling stop. | ||
4120 | 3297 | // AsioHandle *handle = (AsioHandle *) stream_.apiHandle; | ||
4121 | 3298 | // handle->drainCounter = 2; | ||
4122 | 3299 | stopStream(); | ||
4123 | 3300 | } | ||
4124 | 3301 | |||
4125 | 3302 | // This function will be called by a spawned thread when the user | ||
4126 | 3303 | // callback function signals that the stream should be stopped or | ||
4127 | 3304 | // aborted. It is necessary to handle it this way because the | ||
4128 | 3305 | // callbackEvent() function must return before the ASIOStop() | ||
4129 | 3306 | // function will return. | ||
4130 | 3307 | static unsigned __stdcall asioStopStream( void *ptr ) | ||
4131 | 3308 | { | ||
4132 | 3309 | CallbackInfo *info = (CallbackInfo *) ptr; | ||
4133 | 3310 | RtApiAsio *object = (RtApiAsio *) info->object; | ||
4134 | 3311 | |||
4135 | 3312 | object->stopStream(); | ||
4136 | 3313 | _endthreadex( 0 ); | ||
4137 | 3314 | return 0; | ||
4138 | 3315 | } | ||
4139 | 3316 | |||
4140 | 3317 | bool RtApiAsio :: callbackEvent( long bufferIndex ) | ||
4141 | 3318 | { | ||
4142 | 3319 | if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS; | ||
4143 | 3320 | if ( stream_.state == STREAM_CLOSED ) { | ||
4144 | 3321 | errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!"; | ||
4145 | 3322 | error( RtAudioError::WARNING ); | ||
4146 | 3323 | return FAILURE; | ||
4147 | 3324 | } | ||
4148 | 3325 | |||
4149 | 3326 | CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo; | ||
4150 | 3327 | AsioHandle *handle = (AsioHandle *) stream_.apiHandle; | ||
4151 | 3328 | |||
4152 | 3329 | // Check if we were draining the stream and signal if finished. | ||
4153 | 3330 | if ( handle->drainCounter > 3 ) { | ||
4154 | 3331 | |||
4155 | 3332 | stream_.state = STREAM_STOPPING; | ||
4156 | 3333 | if ( handle->internalDrain == false ) | ||
4157 | 3334 | SetEvent( handle->condition ); | ||
4158 | 3335 | else { // spawn a thread to stop the stream | ||
4159 | 3336 | unsigned threadId; | ||
4160 | 3337 | stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream, | ||
4161 | 3338 | &stream_.callbackInfo, 0, &threadId ); | ||
4162 | 3339 | } | ||
4163 | 3340 | return SUCCESS; | ||
4164 | 3341 | } | ||
4165 | 3342 | |||
4166 | 3343 | // Invoke user callback to get fresh output data UNLESS we are | ||
4167 | 3344 | // draining stream. | ||
4168 | 3345 | if ( handle->drainCounter == 0 ) { | ||
4169 | 3346 | RtAudioCallback callback = (RtAudioCallback) info->callback; | ||
4170 | 3347 | double streamTime = getStreamTime(); | ||
4171 | 3348 | RtAudioStreamStatus status = 0; | ||
4172 | 3349 | if ( stream_.mode != INPUT && asioXRun == true ) { | ||
4173 | 3350 | status |= RTAUDIO_OUTPUT_UNDERFLOW; | ||
4174 | 3351 | asioXRun = false; | ||
4175 | 3352 | } | ||
4176 | 3353 | if ( stream_.mode != OUTPUT && asioXRun == true ) { | ||
4177 | 3354 | status |= RTAUDIO_INPUT_OVERFLOW; | ||
4178 | 3355 | asioXRun = false; | ||
4179 | 3356 | } | ||
4180 | 3357 | int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1], | ||
4181 | 3358 | stream_.bufferSize, streamTime, status, info->userData ); | ||
4182 | 3359 | if ( cbReturnValue == 2 ) { | ||
4183 | 3360 | stream_.state = STREAM_STOPPING; | ||
4184 | 3361 | handle->drainCounter = 2; | ||
4185 | 3362 | unsigned threadId; | ||
4186 | 3363 | stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream, | ||
4187 | 3364 | &stream_.callbackInfo, 0, &threadId ); | ||
4188 | 3365 | return SUCCESS; | ||
4189 | 3366 | } | ||
4190 | 3367 | else if ( cbReturnValue == 1 ) { | ||
4191 | 3368 | handle->drainCounter = 1; | ||
4192 | 3369 | handle->internalDrain = true; | ||
4193 | 3370 | } | ||
4194 | 3371 | } | ||
4195 | 3372 | |||
4196 | 3373 | unsigned int nChannels, bufferBytes, i, j; | ||
4197 | 3374 | nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1]; | ||
4198 | 3375 | if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { | ||
4199 | 3376 | |||
4200 | 3377 | bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] ); | ||
4201 | 3378 | |||
4202 | 3379 | if ( handle->drainCounter > 1 ) { // write zeros to the output stream | ||
4203 | 3380 | |||
4204 | 3381 | for ( i=0, j=0; i<nChannels; i++ ) { | ||
4205 | 3382 | if ( handle->bufferInfos[i].isInput != ASIOTrue ) | ||
4206 | 3383 | memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes ); | ||
4207 | 3384 | } | ||
4208 | 3385 | |||
4209 | 3386 | } | ||
4210 | 3387 | else if ( stream_.doConvertBuffer[0] ) { | ||
4211 | 3388 | |||
4212 | 3389 | convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] ); | ||
4213 | 3390 | if ( stream_.doByteSwap[0] ) | ||
4214 | 3391 | byteSwapBuffer( stream_.deviceBuffer, | ||
4215 | 3392 | stream_.bufferSize * stream_.nDeviceChannels[0], | ||
4216 | 3393 | stream_.deviceFormat[0] ); | ||
4217 | 3394 | |||
4218 | 3395 | for ( i=0, j=0; i<nChannels; i++ ) { | ||
4219 | 3396 | if ( handle->bufferInfos[i].isInput != ASIOTrue ) | ||
4220 | 3397 | memcpy( handle->bufferInfos[i].buffers[bufferIndex], | ||
4221 | 3398 | &stream_.deviceBuffer[j++*bufferBytes], bufferBytes ); | ||
4222 | 3399 | } | ||
4223 | 3400 | |||
4224 | 3401 | } | ||
4225 | 3402 | else { | ||
4226 | 3403 | |||
4227 | 3404 | if ( stream_.doByteSwap[0] ) | ||
4228 | 3405 | byteSwapBuffer( stream_.userBuffer[0], | ||
4229 | 3406 | stream_.bufferSize * stream_.nUserChannels[0], | ||
4230 | 3407 | stream_.userFormat ); | ||
4231 | 3408 | |||
4232 | 3409 | for ( i=0, j=0; i<nChannels; i++ ) { | ||
4233 | 3410 | if ( handle->bufferInfos[i].isInput != ASIOTrue ) | ||
4234 | 3411 | memcpy( handle->bufferInfos[i].buffers[bufferIndex], | ||
4235 | 3412 | &stream_.userBuffer[0][bufferBytes*j++], bufferBytes ); | ||
4236 | 3413 | } | ||
4237 | 3414 | |||
4238 | 3415 | } | ||
4239 | 3416 | } | ||
4240 | 3417 | |||
4241 | 3418 | // Don't bother draining input | ||
4242 | 3419 | if ( handle->drainCounter ) { | ||
4243 | 3420 | handle->drainCounter++; | ||
4244 | 3421 | goto unlock; | ||
4245 | 3422 | } | ||
4246 | 3423 | |||
4247 | 3424 | if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) { | ||
4248 | 3425 | |||
4249 | 3426 | bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]); | ||
4250 | 3427 | |||
4251 | 3428 | if (stream_.doConvertBuffer[1]) { | ||
4252 | 3429 | |||
4253 | 3430 | // Always interleave ASIO input data. | ||
4254 | 3431 | for ( i=0, j=0; i<nChannels; i++ ) { | ||
4255 | 3432 | if ( handle->bufferInfos[i].isInput == ASIOTrue ) | ||
4256 | 3433 | memcpy( &stream_.deviceBuffer[j++*bufferBytes], | ||
4257 | 3434 | handle->bufferInfos[i].buffers[bufferIndex], | ||
4258 | 3435 | bufferBytes ); | ||
4259 | 3436 | } | ||
4260 | 3437 | |||
4261 | 3438 | if ( stream_.doByteSwap[1] ) | ||
4262 | 3439 | byteSwapBuffer( stream_.deviceBuffer, | ||
4263 | 3440 | stream_.bufferSize * stream_.nDeviceChannels[1], | ||
4264 | 3441 | stream_.deviceFormat[1] ); | ||
4265 | 3442 | convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] ); | ||
4266 | 3443 | |||
4267 | 3444 | } | ||
4268 | 3445 | else { | ||
4269 | 3446 | for ( i=0, j=0; i<nChannels; i++ ) { | ||
4270 | 3447 | if ( handle->bufferInfos[i].isInput == ASIOTrue ) { | ||
4271 | 3448 | memcpy( &stream_.userBuffer[1][bufferBytes*j++], | ||
4272 | 3449 | handle->bufferInfos[i].buffers[bufferIndex], | ||
4273 | 3450 | bufferBytes ); | ||
4274 | 3451 | } | ||
4275 | 3452 | } | ||
4276 | 3453 | |||
4277 | 3454 | if ( stream_.doByteSwap[1] ) | ||
4278 | 3455 | byteSwapBuffer( stream_.userBuffer[1], | ||
4279 | 3456 | stream_.bufferSize * stream_.nUserChannels[1], | ||
4280 | 3457 | stream_.userFormat ); | ||
4281 | 3458 | } | ||
4282 | 3459 | } | ||
4283 | 3460 | |||
4284 | 3461 | unlock: | ||
4285 | 3462 | // The following call was suggested by Malte Clasen. While the API | ||
4286 | 3463 | // documentation indicates it should not be required, some device | ||
4287 | 3464 | // drivers apparently do not function correctly without it. | ||
4288 | 3465 | ASIOOutputReady(); | ||
4289 | 3466 | |||
4290 | 3467 | RtApi::tickStreamTime(); | ||
4291 | 3468 | return SUCCESS; | ||
4292 | 3469 | } | ||
4293 | 3470 | |||
4294 | 3471 | static void sampleRateChanged( ASIOSampleRate sRate ) | ||
4295 | 3472 | { | ||
4296 | 3473 | // The ASIO documentation says that this usually only happens during | ||
4297 | 3474 | // external sync. Audio processing is not stopped by the driver, | ||
4298 | 3475 | // actual sample rate might not have even changed, maybe only the | ||
4299 | 3476 | // sample rate status of an AES/EBU or S/PDIF digital input at the | ||
4300 | 3477 | // audio device. | ||
4301 | 3478 | |||
4302 | 3479 | RtApi *object = (RtApi *) asioCallbackInfo->object; | ||
4303 | 3480 | try { | ||
4304 | 3481 | object->stopStream(); | ||
4305 | 3482 | } | ||
4306 | 3483 | catch ( RtAudioError &exception ) { | ||
4307 | 3484 | std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl; | ||
4308 | 3485 | return; | ||
4309 | 3486 | } | ||
4310 | 3487 | |||
4311 | 3488 | std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl; | ||
4312 | 3489 | } | ||
4313 | 3490 | |||
4314 | 3491 | static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ ) | ||
4315 | 3492 | { | ||
4316 | 3493 | long ret = 0; | ||
4317 | 3494 | |||
4318 | 3495 | switch( selector ) { | ||
4319 | 3496 | case kAsioSelectorSupported: | ||
4320 | 3497 | if ( value == kAsioResetRequest | ||
4321 | 3498 | || value == kAsioEngineVersion | ||
4322 | 3499 | || value == kAsioResyncRequest | ||
4323 | 3500 | || value == kAsioLatenciesChanged | ||
4324 | 3501 | // The following three were added for ASIO 2.0, you don't | ||
4325 | 3502 | // necessarily have to support them. | ||
4326 | 3503 | || value == kAsioSupportsTimeInfo | ||
4327 | 3504 | || value == kAsioSupportsTimeCode | ||
4328 | 3505 | || value == kAsioSupportsInputMonitor) | ||
4329 | 3506 | ret = 1L; | ||
4330 | 3507 | break; | ||
4331 | 3508 | case kAsioResetRequest: | ||
4332 | 3509 | // Defer the task and perform the reset of the driver during the | ||
4333 | 3510 | // next "safe" situation. You cannot reset the driver right now, | ||
4334 | 3511 | // as this code is called from the driver. Reset the driver is | ||
4335 | 3512 | // done by completely destruct is. I.e. ASIOStop(), | ||
4336 | 3513 | // ASIODisposeBuffers(), Destruction Afterwards you initialize the | ||
4337 | 3514 | // driver again. | ||
4338 | 3515 | std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl; | ||
4339 | 3516 | ret = 1L; | ||
4340 | 3517 | break; | ||
4341 | 3518 | case kAsioResyncRequest: | ||
4342 | 3519 | // This informs the application that the driver encountered some | ||
4343 | 3520 | // non-fatal data loss. It is used for synchronization purposes | ||
4344 | 3521 | // of different media. Added mainly to work around the Win16Mutex | ||
4345 | 3522 | // problems in Windows 95/98 with the Windows Multimedia system, | ||
4346 | 3523 | // which could lose data because the Mutex was held too long by | ||
4347 | 3524 | // another thread. However a driver can issue it in other | ||
4348 | 3525 | // situations, too. | ||
4349 | 3526 | // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl; | ||
4350 | 3527 | asioXRun = true; | ||
4351 | 3528 | ret = 1L; | ||
4352 | 3529 | break; | ||
4353 | 3530 | case kAsioLatenciesChanged: | ||
4354 | 3531 | // This will inform the host application that the drivers were | ||
4355 | 3532 | // latencies changed. Beware, it this does not mean that the | ||
4356 | 3533 | // buffer sizes have changed! You might need to update internal | ||
4357 | 3534 | // delay data. | ||
4358 | 3535 | std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl; | ||
4359 | 3536 | ret = 1L; | ||
4360 | 3537 | break; | ||
4361 | 3538 | case kAsioEngineVersion: | ||
4362 | 3539 | // Return the supported ASIO version of the host application. If | ||
4363 | 3540 | // a host application does not implement this selector, ASIO 1.0 | ||
4364 | 3541 | // is assumed by the driver. | ||
4365 | 3542 | ret = 2L; | ||
4366 | 3543 | break; | ||
4367 | 3544 | case kAsioSupportsTimeInfo: | ||
4368 | 3545 | // Informs the driver whether the | ||
4369 | 3546 | // asioCallbacks.bufferSwitchTimeInfo() callback is supported. | ||
4370 | 3547 | // For compatibility with ASIO 1.0 drivers the host application | ||
4371 | 3548 | // should always support the "old" bufferSwitch method, too. | ||
4372 | 3549 | ret = 0; | ||
4373 | 3550 | break; | ||
4374 | 3551 | case kAsioSupportsTimeCode: | ||
4375 | 3552 | // Informs the driver whether application is interested in time | ||
4376 | 3553 | // code info. If an application does not need to know about time | ||
4377 | 3554 | // code, the driver has less work to do. | ||
4378 | 3555 | ret = 0; | ||
4379 | 3556 | break; | ||
4380 | 3557 | } | ||
4381 | 3558 | return ret; | ||
4382 | 3559 | } | ||
4383 | 3560 | |||
4384 | 3561 | static const char* getAsioErrorString( ASIOError result ) | ||
4385 | 3562 | { | ||
4386 | 3563 | struct Messages | ||
4387 | 3564 | { | ||
4388 | 3565 | ASIOError value; | ||
4389 | 3566 | const char*message; | ||
4390 | 3567 | }; | ||
4391 | 3568 | |||
4392 | 3569 | static const Messages m[] = | ||
4393 | 3570 | { | ||
4394 | 3571 | { ASE_NotPresent, "Hardware input or output is not present or available." }, | ||
4395 | 3572 | { ASE_HWMalfunction, "Hardware is malfunctioning." }, | ||
4396 | 3573 | { ASE_InvalidParameter, "Invalid input parameter." }, | ||
4397 | 3574 | { ASE_InvalidMode, "Invalid mode." }, | ||
4398 | 3575 | { ASE_SPNotAdvancing, "Sample position not advancing." }, | ||
4399 | 3576 | { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." }, | ||
4400 | 3577 | { ASE_NoMemory, "Not enough memory to complete the request." } | ||
4401 | 3578 | }; | ||
4402 | 3579 | |||
4403 | 3580 | for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i ) | ||
4404 | 3581 | if ( m[i].value == result ) return m[i].message; | ||
4405 | 3582 | |||
4406 | 3583 | return "Unknown error."; | ||
4407 | 3584 | } | ||
4408 | 3585 | |||
4409 | 3586 | //******************** End of __WINDOWS_ASIO__ *********************// | ||
4410 | 3587 | #endif | ||
4411 | 3588 | |||
4412 | 3589 | |||
4413 | 3590 | #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API | ||
4414 | 3591 | |||
4415 | 3592 | // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014 | ||
4416 | 3593 | // - Introduces support for the Windows WASAPI API | ||
4417 | 3594 | // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required | ||
4418 | 3595 | // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface | ||
4419 | 3596 | // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user | ||
4420 | 3597 | |||
4421 | 3598 | #ifndef INITGUID | ||
4422 | 3599 | #define INITGUID | ||
4423 | 3600 | #endif | ||
4424 | 3601 | #include <audioclient.h> | ||
4425 | 3602 | #include <avrt.h> | ||
4426 | 3603 | #include <mmdeviceapi.h> | ||
4427 | 3604 | #include <functiondiscoverykeys_devpkey.h> | ||
4428 | 3605 | |||
4429 | 3606 | //============================================================================= | ||
4430 | 3607 | |||
4431 | 3608 | #define SAFE_RELEASE( objectPtr )\ | ||
4432 | 3609 | if ( objectPtr )\ | ||
4433 | 3610 | {\ | ||
4434 | 3611 | objectPtr->Release();\ | ||
4435 | 3612 | objectPtr = NULL;\ | ||
4436 | 3613 | } | ||
4437 | 3614 | |||
4438 | 3615 | typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex ); | ||
4439 | 3616 | |||
4440 | 3617 | //----------------------------------------------------------------------------- | ||
4441 | 3618 | |||
4442 | 3619 | // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size. | ||
4443 | 3620 | // Therefore we must perform all necessary conversions to user buffers in order to satisfy these | ||
4444 | 3621 | // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to | ||
4445 | 3622 | // provide intermediate storage for read / write synchronization. | ||
4446 | 3623 | class WasapiBuffer | ||
4447 | 3624 | { | ||
4448 | 3625 | public: | ||
4449 | 3626 | WasapiBuffer() | ||
4450 | 3627 | : buffer_( NULL ), | ||
4451 | 3628 | bufferSize_( 0 ), | ||
4452 | 3629 | inIndex_( 0 ), | ||
4453 | 3630 | outIndex_( 0 ) {} | ||
4454 | 3631 | |||
4455 | 3632 | ~WasapiBuffer() { | ||
4456 | 3633 | delete buffer_; | ||
4457 | 3634 | } | ||
4458 | 3635 | |||
4459 | 3636 | // sets the length of the internal ring buffer | ||
4460 | 3637 | void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) { | ||
4461 | 3638 | delete buffer_; | ||
4462 | 3639 | |||
4463 | 3640 | buffer_ = ( char* ) calloc( bufferSize, formatBytes ); | ||
4464 | 3641 | |||
4465 | 3642 | bufferSize_ = bufferSize; | ||
4466 | 3643 | inIndex_ = 0; | ||
4467 | 3644 | outIndex_ = 0; | ||
4468 | 3645 | } | ||
4469 | 3646 | |||
4470 | 3647 | // attempt to push a buffer into the ring buffer at the current "in" index | ||
4471 | 3648 | bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format ) | ||
4472 | 3649 | { | ||
4473 | 3650 | if ( !buffer || // incoming buffer is NULL | ||
4474 | 3651 | bufferSize == 0 || // incoming buffer has no data | ||
4475 | 3652 | bufferSize > bufferSize_ ) // incoming buffer too large | ||
4476 | 3653 | { | ||
4477 | 3654 | return false; | ||
4478 | 3655 | } | ||
4479 | 3656 | |||
4480 | 3657 | unsigned int relOutIndex = outIndex_; | ||
4481 | 3658 | unsigned int inIndexEnd = inIndex_ + bufferSize; | ||
4482 | 3659 | if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) { | ||
4483 | 3660 | relOutIndex += bufferSize_; | ||
4484 | 3661 | } | ||
4485 | 3662 | |||
4486 | 3663 | // "in" index can end on the "out" index but cannot begin at it | ||
4487 | 3664 | if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) { | ||
4488 | 3665 | return false; // not enough space between "in" index and "out" index | ||
4489 | 3666 | } | ||
4490 | 3667 | |||
4491 | 3668 | // copy buffer from external to internal | ||
4492 | 3669 | int fromZeroSize = inIndex_ + bufferSize - bufferSize_; | ||
4493 | 3670 | fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize; | ||
4494 | 3671 | int fromInSize = bufferSize - fromZeroSize; | ||
4495 | 3672 | |||
4496 | 3673 | switch( format ) | ||
4497 | 3674 | { | ||
4498 | 3675 | case RTAUDIO_SINT8: | ||
4499 | 3676 | memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) ); | ||
4500 | 3677 | memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) ); | ||
4501 | 3678 | break; | ||
4502 | 3679 | case RTAUDIO_SINT16: | ||
4503 | 3680 | memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) ); | ||
4504 | 3681 | memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) ); | ||
4505 | 3682 | break; | ||
4506 | 3683 | case RTAUDIO_SINT24: | ||
4507 | 3684 | memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) ); | ||
4508 | 3685 | memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) ); | ||
4509 | 3686 | break; | ||
4510 | 3687 | case RTAUDIO_SINT32: | ||
4511 | 3688 | memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) ); | ||
4512 | 3689 | memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) ); | ||
4513 | 3690 | break; | ||
4514 | 3691 | case RTAUDIO_FLOAT32: | ||
4515 | 3692 | memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) ); | ||
4516 | 3693 | memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) ); | ||
4517 | 3694 | break; | ||
4518 | 3695 | case RTAUDIO_FLOAT64: | ||
4519 | 3696 | memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) ); | ||
4520 | 3697 | memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) ); | ||
4521 | 3698 | break; | ||
4522 | 3699 | } | ||
4523 | 3700 | |||
4524 | 3701 | // update "in" index | ||
4525 | 3702 | inIndex_ += bufferSize; | ||
4526 | 3703 | inIndex_ %= bufferSize_; | ||
4527 | 3704 | |||
4528 | 3705 | return true; | ||
4529 | 3706 | } | ||
4530 | 3707 | |||
4531 | 3708 | // attempt to pull a buffer from the ring buffer from the current "out" index | ||
4532 | 3709 | bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format ) | ||
4533 | 3710 | { | ||
4534 | 3711 | if ( !buffer || // incoming buffer is NULL | ||
4535 | 3712 | bufferSize == 0 || // incoming buffer has no data | ||
4536 | 3713 | bufferSize > bufferSize_ ) // incoming buffer too large | ||
4537 | 3714 | { | ||
4538 | 3715 | return false; | ||
4539 | 3716 | } | ||
4540 | 3717 | |||
4541 | 3718 | unsigned int relInIndex = inIndex_; | ||
4542 | 3719 | unsigned int outIndexEnd = outIndex_ + bufferSize; | ||
4543 | 3720 | if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) { | ||
4544 | 3721 | relInIndex += bufferSize_; | ||
4545 | 3722 | } | ||
4546 | 3723 | |||
4547 | 3724 | // "out" index can begin at and end on the "in" index | ||
4548 | 3725 | if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) { | ||
4549 | 3726 | return false; // not enough space between "out" index and "in" index | ||
4550 | 3727 | } | ||
4551 | 3728 | |||
4552 | 3729 | // copy buffer from internal to external | ||
4553 | 3730 | int fromZeroSize = outIndex_ + bufferSize - bufferSize_; | ||
4554 | 3731 | fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize; | ||
4555 | 3732 | int fromOutSize = bufferSize - fromZeroSize; | ||
4556 | 3733 | |||
4557 | 3734 | switch( format ) | ||
4558 | 3735 | { | ||
4559 | 3736 | case RTAUDIO_SINT8: | ||
4560 | 3737 | memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) ); | ||
4561 | 3738 | memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) ); | ||
4562 | 3739 | break; | ||
4563 | 3740 | case RTAUDIO_SINT16: | ||
4564 | 3741 | memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) ); | ||
4565 | 3742 | memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) ); | ||
4566 | 3743 | break; | ||
4567 | 3744 | case RTAUDIO_SINT24: | ||
4568 | 3745 | memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) ); | ||
4569 | 3746 | memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) ); | ||
4570 | 3747 | break; | ||
4571 | 3748 | case RTAUDIO_SINT32: | ||
4572 | 3749 | memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) ); | ||
4573 | 3750 | memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) ); | ||
4574 | 3751 | break; | ||
4575 | 3752 | case RTAUDIO_FLOAT32: | ||
4576 | 3753 | memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) ); | ||
4577 | 3754 | memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) ); | ||
4578 | 3755 | break; | ||
4579 | 3756 | case RTAUDIO_FLOAT64: | ||
4580 | 3757 | memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) ); | ||
4581 | 3758 | memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) ); | ||
4582 | 3759 | break; | ||
4583 | 3760 | } | ||
4584 | 3761 | |||
4585 | 3762 | // update "out" index | ||
4586 | 3763 | outIndex_ += bufferSize; | ||
4587 | 3764 | outIndex_ %= bufferSize_; | ||
4588 | 3765 | |||
4589 | 3766 | return true; | ||
4590 | 3767 | } | ||
4591 | 3768 | |||
4592 | 3769 | private: | ||
4593 | 3770 | char* buffer_; | ||
4594 | 3771 | unsigned int bufferSize_; | ||
4595 | 3772 | unsigned int inIndex_; | ||
4596 | 3773 | unsigned int outIndex_; | ||
4597 | 3774 | }; | ||
4598 | 3775 | |||
4599 | 3776 | //----------------------------------------------------------------------------- | ||
4600 | 3777 | |||
4601 | 3778 | // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate | ||
4602 | 3779 | // between HW and the user. The convertBufferWasapi function is used to perform this conversion | ||
4603 | 3780 | // between HwIn->UserIn and UserOut->HwOut during the stream callback loop. | ||
4604 | 3781 | // This sample rate converter favors speed over quality, and works best with conversions between | ||
4605 | 3782 | // one rate and its multiple. | ||
4606 | 3783 | void convertBufferWasapi( char* outBuffer, | ||
4607 | 3784 | const char* inBuffer, | ||
4608 | 3785 | const unsigned int& channelCount, | ||
4609 | 3786 | const unsigned int& inSampleRate, | ||
4610 | 3787 | const unsigned int& outSampleRate, | ||
4611 | 3788 | const unsigned int& inSampleCount, | ||
4612 | 3789 | unsigned int& outSampleCount, | ||
4613 | 3790 | const RtAudioFormat& format ) | ||
4614 | 3791 | { | ||
4615 | 3792 | // calculate the new outSampleCount and relative sampleStep | ||
4616 | 3793 | float sampleRatio = ( float ) outSampleRate / inSampleRate; | ||
4617 | 3794 | float sampleStep = 1.0f / sampleRatio; | ||
4618 | 3795 | float inSampleFraction = 0.0f; | ||
4619 | 3796 | |||
4620 | 3797 | outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio ); | ||
4621 | 3798 | |||
4622 | 3799 | // frame-by-frame, copy each relative input sample into it's corresponding output sample | ||
4623 | 3800 | for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ ) | ||
4624 | 3801 | { | ||
4625 | 3802 | unsigned int inSample = ( unsigned int ) inSampleFraction; | ||
4626 | 3803 | |||
4627 | 3804 | switch ( format ) | ||
4628 | 3805 | { | ||
4629 | 3806 | case RTAUDIO_SINT8: | ||
4630 | 3807 | memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) ); | ||
4631 | 3808 | break; | ||
4632 | 3809 | case RTAUDIO_SINT16: | ||
4633 | 3810 | memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) ); | ||
4634 | 3811 | break; | ||
4635 | 3812 | case RTAUDIO_SINT24: | ||
4636 | 3813 | memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) ); | ||
4637 | 3814 | break; | ||
4638 | 3815 | case RTAUDIO_SINT32: | ||
4639 | 3816 | memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) ); | ||
4640 | 3817 | break; | ||
4641 | 3818 | case RTAUDIO_FLOAT32: | ||
4642 | 3819 | memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) ); | ||
4643 | 3820 | break; | ||
4644 | 3821 | case RTAUDIO_FLOAT64: | ||
4645 | 3822 | memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) ); | ||
4646 | 3823 | break; | ||
4647 | 3824 | } | ||
4648 | 3825 | |||
4649 | 3826 | // jump to next in sample | ||
4650 | 3827 | inSampleFraction += sampleStep; | ||
4651 | 3828 | } | ||
4652 | 3829 | } | ||
4653 | 3830 | |||
4654 | 3831 | //----------------------------------------------------------------------------- | ||
4655 | 3832 | |||
4656 | 3833 | // A structure to hold various information related to the WASAPI implementation. | ||
4657 | 3834 | struct WasapiHandle | ||
4658 | 3835 | { | ||
4659 | 3836 | IAudioClient* captureAudioClient; | ||
4660 | 3837 | IAudioClient* renderAudioClient; | ||
4661 | 3838 | IAudioCaptureClient* captureClient; | ||
4662 | 3839 | IAudioRenderClient* renderClient; | ||
4663 | 3840 | HANDLE captureEvent; | ||
4664 | 3841 | HANDLE renderEvent; | ||
4665 | 3842 | |||
4666 | 3843 | WasapiHandle() | ||
4667 | 3844 | : captureAudioClient( NULL ), | ||
4668 | 3845 | renderAudioClient( NULL ), | ||
4669 | 3846 | captureClient( NULL ), | ||
4670 | 3847 | renderClient( NULL ), | ||
4671 | 3848 | captureEvent( NULL ), | ||
4672 | 3849 | renderEvent( NULL ) {} | ||
4673 | 3850 | }; | ||
4674 | 3851 | |||
4675 | 3852 | //============================================================================= | ||
4676 | 3853 | |||
4677 | 3854 | RtApiWasapi::RtApiWasapi() | ||
4678 | 3855 | : coInitialized_( false ), deviceEnumerator_( NULL ) | ||
4679 | 3856 | { | ||
4680 | 3857 | // WASAPI can run either apartment or multi-threaded | ||
4681 | 3858 | HRESULT hr = CoInitialize( NULL ); | ||
4682 | 3859 | if ( !FAILED( hr ) ) | ||
4683 | 3860 | coInitialized_ = true; | ||
4684 | 3861 | |||
4685 | 3862 | // Instantiate device enumerator | ||
4686 | 3863 | hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL, | ||
4687 | 3864 | CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ), | ||
4688 | 3865 | ( void** ) &deviceEnumerator_ ); | ||
4689 | 3866 | |||
4690 | 3867 | if ( FAILED( hr ) ) { | ||
4691 | 3868 | errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator"; | ||
4692 | 3869 | error( RtAudioError::DRIVER_ERROR ); | ||
4693 | 3870 | } | ||
4694 | 3871 | } | ||
4695 | 3872 | |||
4696 | 3873 | //----------------------------------------------------------------------------- | ||
4697 | 3874 | |||
4698 | 3875 | RtApiWasapi::~RtApiWasapi() | ||
4699 | 3876 | { | ||
4700 | 3877 | if ( stream_.state != STREAM_CLOSED ) | ||
4701 | 3878 | closeStream(); | ||
4702 | 3879 | |||
4703 | 3880 | SAFE_RELEASE( deviceEnumerator_ ); | ||
4704 | 3881 | |||
4705 | 3882 | // If this object previously called CoInitialize() | ||
4706 | 3883 | if ( coInitialized_ ) | ||
4707 | 3884 | CoUninitialize(); | ||
4708 | 3885 | } | ||
4709 | 3886 | |||
4710 | 3887 | //============================================================================= | ||
4711 | 3888 | |||
4712 | 3889 | unsigned int RtApiWasapi::getDeviceCount( void ) | ||
4713 | 3890 | { | ||
4714 | 3891 | unsigned int captureDeviceCount = 0; | ||
4715 | 3892 | unsigned int renderDeviceCount = 0; | ||
4716 | 3893 | |||
4717 | 3894 | IMMDeviceCollection* captureDevices = NULL; | ||
4718 | 3895 | IMMDeviceCollection* renderDevices = NULL; | ||
4719 | 3896 | |||
4720 | 3897 | // Count capture devices | ||
4721 | 3898 | errorText_.clear(); | ||
4722 | 3899 | HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices ); | ||
4723 | 3900 | if ( FAILED( hr ) ) { | ||
4724 | 3901 | errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection."; | ||
4725 | 3902 | goto Exit; | ||
4726 | 3903 | } | ||
4727 | 3904 | |||
4728 | 3905 | hr = captureDevices->GetCount( &captureDeviceCount ); | ||
4729 | 3906 | if ( FAILED( hr ) ) { | ||
4730 | 3907 | errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count."; | ||
4731 | 3908 | goto Exit; | ||
4732 | 3909 | } | ||
4733 | 3910 | |||
4734 | 3911 | // Count render devices | ||
4735 | 3912 | hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices ); | ||
4736 | 3913 | if ( FAILED( hr ) ) { | ||
4737 | 3914 | errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection."; | ||
4738 | 3915 | goto Exit; | ||
4739 | 3916 | } | ||
4740 | 3917 | |||
4741 | 3918 | hr = renderDevices->GetCount( &renderDeviceCount ); | ||
4742 | 3919 | if ( FAILED( hr ) ) { | ||
4743 | 3920 | errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count."; | ||
4744 | 3921 | goto Exit; | ||
4745 | 3922 | } | ||
4746 | 3923 | |||
4747 | 3924 | Exit: | ||
4748 | 3925 | // release all references | ||
4749 | 3926 | SAFE_RELEASE( captureDevices ); | ||
4750 | 3927 | SAFE_RELEASE( renderDevices ); | ||
4751 | 3928 | |||
4752 | 3929 | if ( errorText_.empty() ) | ||
4753 | 3930 | return captureDeviceCount + renderDeviceCount; | ||
4754 | 3931 | |||
4755 | 3932 | error( RtAudioError::DRIVER_ERROR ); | ||
4756 | 3933 | return 0; | ||
4757 | 3934 | } | ||
4758 | 3935 | |||
4759 | 3936 | //----------------------------------------------------------------------------- | ||
4760 | 3937 | |||
4761 | 3938 | RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device ) | ||
4762 | 3939 | { | ||
4763 | 3940 | RtAudio::DeviceInfo info; | ||
4764 | 3941 | unsigned int captureDeviceCount = 0; | ||
4765 | 3942 | unsigned int renderDeviceCount = 0; | ||
4766 | 3943 | std::wstring deviceName; | ||
4767 | 3944 | std::string defaultDeviceName; | ||
4768 | 3945 | bool isCaptureDevice = false; | ||
4769 | 3946 | |||
4770 | 3947 | PROPVARIANT deviceNameProp; | ||
4771 | 3948 | PROPVARIANT defaultDeviceNameProp; | ||
4772 | 3949 | |||
4773 | 3950 | IMMDeviceCollection* captureDevices = NULL; | ||
4774 | 3951 | IMMDeviceCollection* renderDevices = NULL; | ||
4775 | 3952 | IMMDevice* devicePtr = NULL; | ||
4776 | 3953 | IMMDevice* defaultDevicePtr = NULL; | ||
4777 | 3954 | IAudioClient* audioClient = NULL; | ||
4778 | 3955 | IPropertyStore* devicePropStore = NULL; | ||
4779 | 3956 | IPropertyStore* defaultDevicePropStore = NULL; | ||
4780 | 3957 | |||
4781 | 3958 | WAVEFORMATEX* deviceFormat = NULL; | ||
4782 | 3959 | WAVEFORMATEX* closestMatchFormat = NULL; | ||
4783 | 3960 | |||
4784 | 3961 | // probed | ||
4785 | 3962 | info.probed = false; | ||
4786 | 3963 | |||
4787 | 3964 | // Count capture devices | ||
4788 | 3965 | errorText_.clear(); | ||
4789 | 3966 | RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR; | ||
4790 | 3967 | HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices ); | ||
4791 | 3968 | if ( FAILED( hr ) ) { | ||
4792 | 3969 | errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection."; | ||
4793 | 3970 | goto Exit; | ||
4794 | 3971 | } | ||
4795 | 3972 | |||
4796 | 3973 | hr = captureDevices->GetCount( &captureDeviceCount ); | ||
4797 | 3974 | if ( FAILED( hr ) ) { | ||
4798 | 3975 | errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count."; | ||
4799 | 3976 | goto Exit; | ||
4800 | 3977 | } | ||
4801 | 3978 | |||
4802 | 3979 | // Count render devices | ||
4803 | 3980 | hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices ); | ||
4804 | 3981 | if ( FAILED( hr ) ) { | ||
4805 | 3982 | errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection."; | ||
4806 | 3983 | goto Exit; | ||
4807 | 3984 | } | ||
4808 | 3985 | |||
4809 | 3986 | hr = renderDevices->GetCount( &renderDeviceCount ); | ||
4810 | 3987 | if ( FAILED( hr ) ) { | ||
4811 | 3988 | errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count."; | ||
4812 | 3989 | goto Exit; | ||
4813 | 3990 | } | ||
4814 | 3991 | |||
4815 | 3992 | // validate device index | ||
4816 | 3993 | if ( device >= captureDeviceCount + renderDeviceCount ) { | ||
4817 | 3994 | errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index."; | ||
4818 | 3995 | errorType = RtAudioError::INVALID_USE; | ||
4819 | 3996 | goto Exit; | ||
4820 | 3997 | } | ||
4821 | 3998 | |||
4822 | 3999 | // determine whether index falls within capture or render devices | ||
4823 | 4000 | if ( device >= renderDeviceCount ) { | ||
4824 | 4001 | hr = captureDevices->Item( device - renderDeviceCount, &devicePtr ); | ||
4825 | 4002 | if ( FAILED( hr ) ) { | ||
4826 | 4003 | errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle."; | ||
4827 | 4004 | goto Exit; | ||
4828 | 4005 | } | ||
4829 | 4006 | isCaptureDevice = true; | ||
4830 | 4007 | } | ||
4831 | 4008 | else { | ||
4832 | 4009 | hr = renderDevices->Item( device, &devicePtr ); | ||
4833 | 4010 | if ( FAILED( hr ) ) { | ||
4834 | 4011 | errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle."; | ||
4835 | 4012 | goto Exit; | ||
4836 | 4013 | } | ||
4837 | 4014 | isCaptureDevice = false; | ||
4838 | 4015 | } | ||
4839 | 4016 | |||
4840 | 4017 | // get default device name | ||
4841 | 4018 | if ( isCaptureDevice ) { | ||
4842 | 4019 | hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr ); | ||
4843 | 4020 | if ( FAILED( hr ) ) { | ||
4844 | 4021 | errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle."; | ||
4845 | 4022 | goto Exit; | ||
4846 | 4023 | } | ||
4847 | 4024 | } | ||
4848 | 4025 | else { | ||
4849 | 4026 | hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr ); | ||
4850 | 4027 | if ( FAILED( hr ) ) { | ||
4851 | 4028 | errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle."; | ||
4852 | 4029 | goto Exit; | ||
4853 | 4030 | } | ||
4854 | 4031 | } | ||
4855 | 4032 | |||
4856 | 4033 | hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore ); | ||
4857 | 4034 | if ( FAILED( hr ) ) { | ||
4858 | 4035 | errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store."; | ||
4859 | 4036 | goto Exit; | ||
4860 | 4037 | } | ||
4861 | 4038 | PropVariantInit( &defaultDeviceNameProp ); | ||
4862 | 4039 | |||
4863 | 4040 | hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp ); | ||
4864 | 4041 | if ( FAILED( hr ) ) { | ||
4865 | 4042 | errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName."; | ||
4866 | 4043 | goto Exit; | ||
4867 | 4044 | } | ||
4868 | 4045 | |||
4869 | 4046 | deviceName = defaultDeviceNameProp.pwszVal; | ||
4870 | 4047 | defaultDeviceName = std::string( deviceName.begin(), deviceName.end() ); | ||
4871 | 4048 | |||
4872 | 4049 | // name | ||
4873 | 4050 | hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore ); | ||
4874 | 4051 | if ( FAILED( hr ) ) { | ||
4875 | 4052 | errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store."; | ||
4876 | 4053 | goto Exit; | ||
4877 | 4054 | } | ||
4878 | 4055 | |||
4879 | 4056 | PropVariantInit( &deviceNameProp ); | ||
4880 | 4057 | |||
4881 | 4058 | hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp ); | ||
4882 | 4059 | if ( FAILED( hr ) ) { | ||
4883 | 4060 | errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName."; | ||
4884 | 4061 | goto Exit; | ||
4885 | 4062 | } | ||
4886 | 4063 | |||
4887 | 4064 | deviceName = deviceNameProp.pwszVal; | ||
4888 | 4065 | info.name = std::string( deviceName.begin(), deviceName.end() ); | ||
4889 | 4066 | |||
4890 | 4067 | // is default | ||
4891 | 4068 | if ( isCaptureDevice ) { | ||
4892 | 4069 | info.isDefaultInput = info.name == defaultDeviceName; | ||
4893 | 4070 | info.isDefaultOutput = false; | ||
4894 | 4071 | } | ||
4895 | 4072 | else { | ||
4896 | 4073 | info.isDefaultInput = false; | ||
4897 | 4074 | info.isDefaultOutput = info.name == defaultDeviceName; | ||
4898 | 4075 | } | ||
4899 | 4076 | |||
4900 | 4077 | // channel count | ||
4901 | 4078 | hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient ); | ||
4902 | 4079 | if ( FAILED( hr ) ) { | ||
4903 | 4080 | errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client."; | ||
4904 | 4081 | goto Exit; | ||
4905 | 4082 | } | ||
4906 | 4083 | |||
4907 | 4084 | hr = audioClient->GetMixFormat( &deviceFormat ); | ||
4908 | 4085 | if ( FAILED( hr ) ) { | ||
4909 | 4086 | errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format."; | ||
4910 | 4087 | goto Exit; | ||
4911 | 4088 | } | ||
4912 | 4089 | |||
4913 | 4090 | if ( isCaptureDevice ) { | ||
4914 | 4091 | info.inputChannels = deviceFormat->nChannels; | ||
4915 | 4092 | info.outputChannels = 0; | ||
4916 | 4093 | info.duplexChannels = 0; | ||
4917 | 4094 | } | ||
4918 | 4095 | else { | ||
4919 | 4096 | info.inputChannels = 0; | ||
4920 | 4097 | info.outputChannels = deviceFormat->nChannels; | ||
4921 | 4098 | info.duplexChannels = 0; | ||
4922 | 4099 | } | ||
4923 | 4100 | |||
4924 | 4101 | // sample rates | ||
4925 | 4102 | info.sampleRates.clear(); | ||
4926 | 4103 | |||
4927 | 4104 | // allow support for all sample rates as we have a built-in sample rate converter | ||
4928 | 4105 | for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) { | ||
4929 | 4106 | info.sampleRates.push_back( SAMPLE_RATES[i] ); | ||
4930 | 4107 | } | ||
4931 | 4108 | |||
4932 | 4109 | // native format | ||
4933 | 4110 | info.nativeFormats = 0; | ||
4934 | 4111 | |||
4935 | 4112 | if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT || | ||
4936 | 4113 | ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE && | ||
4937 | 4114 | ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) ) | ||
4938 | 4115 | { | ||
4939 | 4116 | if ( deviceFormat->wBitsPerSample == 32 ) { | ||
4940 | 4117 | info.nativeFormats |= RTAUDIO_FLOAT32; | ||
4941 | 4118 | } | ||
4942 | 4119 | else if ( deviceFormat->wBitsPerSample == 64 ) { | ||
4943 | 4120 | info.nativeFormats |= RTAUDIO_FLOAT64; | ||
4944 | 4121 | } | ||
4945 | 4122 | } | ||
4946 | 4123 | else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM || | ||
4947 | 4124 | ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE && | ||
4948 | 4125 | ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) ) | ||
4949 | 4126 | { | ||
4950 | 4127 | if ( deviceFormat->wBitsPerSample == 8 ) { | ||
4951 | 4128 | info.nativeFormats |= RTAUDIO_SINT8; | ||
4952 | 4129 | } | ||
4953 | 4130 | else if ( deviceFormat->wBitsPerSample == 16 ) { | ||
4954 | 4131 | info.nativeFormats |= RTAUDIO_SINT16; | ||
4955 | 4132 | } | ||
4956 | 4133 | else if ( deviceFormat->wBitsPerSample == 24 ) { | ||
4957 | 4134 | info.nativeFormats |= RTAUDIO_SINT24; | ||
4958 | 4135 | } | ||
4959 | 4136 | else if ( deviceFormat->wBitsPerSample == 32 ) { | ||
4960 | 4137 | info.nativeFormats |= RTAUDIO_SINT32; | ||
4961 | 4138 | } | ||
4962 | 4139 | } | ||
4963 | 4140 | |||
4964 | 4141 | // probed | ||
4965 | 4142 | info.probed = true; | ||
4966 | 4143 | |||
4967 | 4144 | Exit: | ||
4968 | 4145 | // release all references | ||
4969 | 4146 | PropVariantClear( &deviceNameProp ); | ||
4970 | 4147 | PropVariantClear( &defaultDeviceNameProp ); | ||
4971 | 4148 | |||
4972 | 4149 | SAFE_RELEASE( captureDevices ); | ||
4973 | 4150 | SAFE_RELEASE( renderDevices ); | ||
4974 | 4151 | SAFE_RELEASE( devicePtr ); | ||
4975 | 4152 | SAFE_RELEASE( defaultDevicePtr ); | ||
4976 | 4153 | SAFE_RELEASE( audioClient ); | ||
4977 | 4154 | SAFE_RELEASE( devicePropStore ); | ||
4978 | 4155 | SAFE_RELEASE( defaultDevicePropStore ); | ||
4979 | 4156 | |||
4980 | 4157 | CoTaskMemFree( deviceFormat ); | ||
4981 | 4158 | CoTaskMemFree( closestMatchFormat ); | ||
4982 | 4159 | |||
4983 | 4160 | if ( !errorText_.empty() ) | ||
4984 | 4161 | error( errorType ); | ||
4985 | 4162 | return info; | ||
4986 | 4163 | } | ||
4987 | 4164 | |||
4988 | 4165 | //----------------------------------------------------------------------------- | ||
4989 | 4166 | |||
4990 | 4167 | unsigned int RtApiWasapi::getDefaultOutputDevice( void ) | ||
4991 | 4168 | { | ||
4992 | 4169 | for ( unsigned int i = 0; i < getDeviceCount(); i++ ) { | ||
4993 | 4170 | if ( getDeviceInfo( i ).isDefaultOutput ) { | ||
4994 | 4171 | return i; | ||
4995 | 4172 | } | ||
4996 | 4173 | } | ||
4997 | 4174 | |||
4998 | 4175 | return 0; | ||
4999 | 4176 | } | ||
5000 | 4177 |
The diff has been truncated for viewing.