Merge lp:~marcustomlinson/dspatchables/add_basic_components into lp:dspatchables
- add_basic_components
- Merge into trunk
Proposed by
Marcus Tomlinson
Status: | Merged |
---|---|
Merged at revision: | 2 |
Proposed branch: | lp:~marcustomlinson/dspatchables/add_basic_components |
Merge into: | lp:dspatchables |
Diff against target: |
16367 lines (+15835/-331) 35 files modified
CMakeLists.txt (+7/-0) DspAdder/CMakeLists.txt (+17/-0) DspAdder/DspAdder.cpp (+76/-0) DspAdder/DspAdder.h (+80/-0) DspAudioDevice/CMakeLists.txt (+61/-0) DspAudioDevice/DspAudioDevice.cpp (+427/-0) DspAudioDevice/DspAudioDevice.h (+109/-0) DspAudioDevice/rtaudio/RtAudio.cpp (+10136/-0) DspAudioDevice/rtaudio/RtAudio.h (+1162/-0) DspGain/CMakeLists.txt (+17/-0) DspGain/DspGain.cpp (+86/-0) DspGain/DspGain.h (+65/-0) DspOscillator/CMakeLists.txt (+1/-3) DspOscillator/DspOscillator.cpp (+222/-223) DspOscillator/DspOscillator.h (+105/-105) DspWaveStreamer/CMakeLists.txt (+17/-0) DspWaveStreamer/DspWaveStreamer.cpp (+279/-0) DspWaveStreamer/DspWaveStreamer.h (+103/-0) include/DSPatch.h (+632/-0) include/dspatch/DspCircuit.h (+321/-0) include/dspatch/DspCircuitThread.h (+89/-0) include/dspatch/DspComponent.h (+282/-0) include/dspatch/DspComponentThread.h (+72/-0) include/dspatch/DspParameter.h (+111/-0) include/dspatch/DspPlugin.h (+95/-0) include/dspatch/DspPluginLoader.h (+75/-0) include/dspatch/DspRunType.h (+204/-0) include/dspatch/DspSignal.h (+130/-0) include/dspatch/DspSignalBus.h (+192/-0) include/dspatch/DspThread.h (+43/-0) include/dspatch/DspThreadNull.h (+135/-0) include/dspatch/DspThreadUnix.h (+177/-0) include/dspatch/DspThreadWin.h (+178/-0) include/dspatch/DspWire.h (+58/-0) include/dspatch/DspWireBus.h (+71/-0) |
To merge this branch: | bzr merge lp:~marcustomlinson/dspatchables/add_basic_components |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Marcus Tomlinson | Pending | ||
Review via email: mp+255246@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'CMakeLists.txt' |
2 | --- CMakeLists.txt 2014-12-23 19:18:01 +0000 |
3 | +++ CMakeLists.txt 2015-04-05 22:23:36 +0000 |
4 | @@ -4,4 +4,11 @@ |
5 | |
6 | project(DSPatchables) |
7 | |
8 | +include_directories(${CMAKE_SOURCE_DIR}/include) |
9 | +link_directories(${CMAKE_SOURCE_DIR}/link) |
10 | + |
11 | +add_subdirectory(DspAdder) |
12 | +add_subdirectory(DspAudioDevice) |
13 | +add_subdirectory(DspGain) |
14 | add_subdirectory(DspOscillator) |
15 | +add_subdirectory(DspWaveStreamer) |
16 | |
17 | === added directory 'DspAdder' |
18 | === added file 'DspAdder/CMakeLists.txt' |
19 | --- DspAdder/CMakeLists.txt 1970-01-01 00:00:00 +0000 |
20 | +++ DspAdder/CMakeLists.txt 2015-04-05 22:23:36 +0000 |
21 | @@ -0,0 +1,17 @@ |
22 | +project(DspAdder) |
23 | + |
24 | +file(GLOB srcs *.cpp) |
25 | +file(GLOB hdrs *.h) |
26 | + |
27 | +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) |
28 | + |
29 | +add_library( |
30 | + ${PROJECT_NAME} SHARED |
31 | + ${srcs} |
32 | + ${hdrs} |
33 | +) |
34 | + |
35 | +target_link_libraries( |
36 | + ${PROJECT_NAME} |
37 | + DSPatch |
38 | +) |
39 | |
40 | === added file 'DspAdder/DspAdder.cpp' |
41 | --- DspAdder/DspAdder.cpp 1970-01-01 00:00:00 +0000 |
42 | +++ DspAdder/DspAdder.cpp 2015-04-05 22:23:36 +0000 |
43 | @@ -0,0 +1,76 @@ |
44 | +/************************************************************************ |
45 | +DSPatch - Cross-Platform, Object-Oriented, Flow-Based Programming Library |
46 | +Copyright (c) 2012-2015 Marcus Tomlinson |
47 | + |
48 | +This file is part of DSPatch. |
49 | + |
50 | +GNU Lesser General Public License Usage |
51 | +This file may be used under the terms of the GNU Lesser General Public |
52 | +License version 3.0 as published by the Free Software Foundation and |
53 | +appearing in the file LGPLv3.txt included in the packaging of this |
54 | +file. Please review the following information to ensure the GNU Lesser |
55 | +General Public License version 3.0 requirements will be met: |
56 | +http://www.gnu.org/copyleft/lgpl.html. |
57 | + |
58 | +Other Usage |
59 | +Alternatively, this file may be used in accordance with the terms and |
60 | +conditions contained in a signed written agreement between you and |
61 | +Marcus Tomlinson. |
62 | + |
63 | +DSPatch is distributed in the hope that it will be useful, |
64 | +but WITHOUT ANY WARRANTY; without even the implied warranty of |
65 | +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
66 | +************************************************************************/ |
67 | + |
68 | +#include <DspAdder.h> |
69 | + |
70 | +//================================================================================================= |
71 | + |
72 | +DspAdder::DspAdder() |
73 | +{ |
74 | + // add 2 inputs |
75 | + AddInput_("Input1"); |
76 | + AddInput_("Input2"); |
77 | + |
78 | + // add 1 output |
79 | + AddOutput_("Output1"); |
80 | +} |
81 | + |
82 | +//------------------------------------------------------------------------------------------------- |
83 | + |
84 | +DspAdder::~DspAdder() |
85 | +{ |
86 | +} |
87 | + |
88 | +//================================================================================================= |
89 | + |
90 | +void DspAdder::Process_(DspSignalBus& inputs, DspSignalBus& outputs) |
91 | +{ |
92 | + // get input values from inputs bus (GetValue() returns true if successful) |
93 | + if (!inputs.GetValue(0, _stream1)) |
94 | + { |
95 | + _stream1.assign(_stream1.size(), 0); // clear buffer if no input received |
96 | + } |
97 | + // do the same to the 2nd input buffer |
98 | + if (!inputs.GetValue(1, _stream2)) |
99 | + { |
100 | + _stream2.assign(_stream2.size(), 0); |
101 | + } |
102 | + |
103 | + // ensure that the 2 input buffer sizes match |
104 | + if (_stream1.size() == _stream2.size()) |
105 | + { |
106 | + for (size_t i = 0; i < _stream1.size(); i++) |
107 | + { |
108 | + _stream1[i] += _stream2[i]; // perform addition element-by-element |
109 | + } |
110 | + outputs.SetValue(0, _stream1); // set output 1 |
111 | + } |
112 | + // if input sizes don't match |
113 | + else |
114 | + { |
115 | + outputs.ClearValue(0); // clear the output |
116 | + } |
117 | +} |
118 | + |
119 | +//================================================================================================= |
120 | |
121 | === added file 'DspAdder/DspAdder.h' |
122 | --- DspAdder/DspAdder.h 1970-01-01 00:00:00 +0000 |
123 | +++ DspAdder/DspAdder.h 2015-04-05 22:23:36 +0000 |
124 | @@ -0,0 +1,80 @@ |
125 | +/************************************************************************ |
126 | +DSPatch - Cross-Platform, Object-Oriented, Flow-Based Programming Library |
127 | +Copyright (c) 2012-2015 Marcus Tomlinson |
128 | + |
129 | +This file is part of DSPatch. |
130 | + |
131 | +GNU Lesser General Public License Usage |
132 | +This file may be used under the terms of the GNU Lesser General Public |
133 | +License version 3.0 as published by the Free Software Foundation and |
134 | +appearing in the file LGPLv3.txt included in the packaging of this |
135 | +file. Please review the following information to ensure the GNU Lesser |
136 | +General Public License version 3.0 requirements will be met: |
137 | +http://www.gnu.org/copyleft/lgpl.html. |
138 | + |
139 | +Other Usage |
140 | +Alternatively, this file may be used in accordance with the terms and |
141 | +conditions contained in a signed written agreement between you and |
142 | +Marcus Tomlinson. |
143 | + |
144 | +DSPatch is distributed in the hope that it will be useful, |
145 | +but WITHOUT ANY WARRANTY; without even the implied warranty of |
146 | +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
147 | +************************************************************************/ |
148 | + |
149 | +#ifndef DSPADDER_H |
150 | +#define DSPADDER_H |
151 | + |
152 | +#include <DSPatch.h> |
153 | + |
154 | +//================================================================================================= |
155 | +/// Example DspComponent: Adder |
156 | + |
157 | +/** This component has 2 inputs and 1 output. The component receives 2 floating-point buffers into |
158 | +it's 2 inputs, adds each buffer element of the 1st buffer to the corresponding element of the 2nd |
159 | +buffer, then stores the resultant buffer into a 3rd buffer. This resultant buffer is then passed to |
160 | +output 1 of the component output bus. */ |
161 | + |
162 | +class DspAdder : public DspComponent |
163 | +{ |
164 | +public: |
165 | + //! Component constructor |
166 | + /*! When a component is constructed, it's input and output buses must be configured. This is |
167 | + achieved by making calls to the base class protected methods: "AddInput_()" and "AddOutput_(). |
168 | + These methods must be called once per input / output required. IO signal names are optional |
169 | + (Component IO can be referenced by either string ID or index) and can be assigned to each |
170 | + input / output by supplying the desired string ID as an argument to the respective AddInput_() |
171 | + / AddOutput_() method call.*/ |
172 | + |
173 | + DspAdder(); |
174 | + ~DspAdder(); |
175 | + |
176 | +protected: |
177 | + //! Virtual process method inherited from DspComponent |
178 | + /*! The Process_() method is called from the DSPatch engine when a new set of component input |
179 | + signals are ready for processing. The Process() method has 2 arguments: the input bus and the |
180 | + output bus. This method's purpose is to pull its required inputs out of the input bus, process |
181 | + these inputs, and populate the output bus with the results (see DspSignalBus). */ |
182 | + |
183 | + virtual void Process_(DspSignalBus& inputs, DspSignalBus& outputs); |
184 | + |
185 | +private: |
186 | + std::vector<float> _stream1; |
187 | + std::vector<float> _stream2; |
188 | +}; |
189 | + |
190 | +//================================================================================================= |
191 | + |
192 | +class DspAdderPlugin : public DspPlugin |
193 | +{ |
194 | + DspComponent* Create(std::map<std::string, DspParameter>&) const |
195 | + { |
196 | + return new DspAdder(); |
197 | + } |
198 | +}; |
199 | + |
200 | +EXPORT_DSPPLUGIN(DspAdderPlugin) |
201 | + |
202 | +//================================================================================================= |
203 | + |
204 | +#endif // DSPADDER_H |
205 | |
206 | === added directory 'DspAudioDevice' |
207 | === added file 'DspAudioDevice/CMakeLists.txt' |
208 | --- DspAudioDevice/CMakeLists.txt 1970-01-01 00:00:00 +0000 |
209 | +++ DspAudioDevice/CMakeLists.txt 2015-04-05 22:23:36 +0000 |
210 | @@ -0,0 +1,61 @@ |
211 | +project(DspAudioDevice) |
212 | + |
213 | +file(GLOB srcs *.cpp rtaudio/*.cpp) |
214 | +file(GLOB hdrs *.h rtaudio/*.h) |
215 | + |
216 | +include_directories( |
217 | + ${CMAKE_CURRENT_SOURCE_DIR} |
218 | + ${CMAKE_CURRENT_SOURCE_DIR}/rtaudio |
219 | +) |
220 | + |
221 | +add_library( |
222 | + ${PROJECT_NAME} SHARED |
223 | + ${srcs} |
224 | + ${hdrs} |
225 | +) |
226 | + |
227 | +target_link_libraries( |
228 | + ${PROJECT_NAME} |
229 | + DSPatch |
230 | +) |
231 | + |
232 | +# Definition for RtAudio Windows, using direct sound |
233 | +if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") |
234 | + add_definitions(-DEXAMPLE_PLUGIN_FILE="${CMAKE_CURRENT_BINARY_DIR}/oscillator-plugin/$<CONFIGURATION>/DspOscillator.dll") |
235 | + add_definitions(-D__WINDOWS_WASAPI__) |
236 | + |
237 | + add_custom_command( |
238 | + TARGET ${PROJECT_NAME} POST_BUILD |
239 | + COMMAND ${CMAKE_COMMAND} -E copy_if_different |
240 | + ${CMAKE_BINARY_DIR}/$<CONFIGURATION>/DSPatch.dll |
241 | + ${CMAKE_CURRENT_BINARY_DIR}/$<CONFIGURATION> |
242 | + ) |
243 | +endif(${CMAKE_SYSTEM_NAME} MATCHES "Windows") |
244 | + |
245 | +# Definition for RtAudio Linux, using ALSA |
246 | +if(${CMAKE_SYSTEM_NAME} MATCHES "Linux") |
247 | + add_definitions(-DEXAMPLE_PLUGIN_FILE="${CMAKE_CURRENT_BINARY_DIR}/oscillator-plugin/libDspOscillator.so") |
248 | + add_definitions(-D__LINUX_ALSA__) |
249 | + |
250 | + find_library(ASOUND asound) |
251 | + if(NOT ASOUND) |
252 | + message(FATAL_ERROR "ALSA not found (Ensure that libasound2-dev is installed)") |
253 | + endif() |
254 | + |
255 | + target_link_libraries( |
256 | + ${PROJECT_NAME} |
257 | + ${ASOUND} |
258 | + ) |
259 | +endif(${CMAKE_SYSTEM_NAME} MATCHES "Linux") |
260 | + |
261 | +# Definition for RtAudio Mac OSX, using Core Audio |
262 | +if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") |
263 | + add_definitions(-DEXAMPLE_PLUGIN_FILE="${CMAKE_CURRENT_BINARY_DIR}/oscillator-plugin/$<CONFIGURATION>/libDspOscillator.dylib") |
264 | + add_definitions(-D__MACOSX_CORE__) |
265 | + |
266 | + target_link_libraries( |
267 | + ${PROJECT_NAME} |
268 | + "-framework CoreAudio" |
269 | + "-framework CoreFoundation" |
270 | + ) |
271 | +endif(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") |
272 | |
273 | === added file 'DspAudioDevice/DspAudioDevice.cpp' |
274 | --- DspAudioDevice/DspAudioDevice.cpp 1970-01-01 00:00:00 +0000 |
275 | +++ DspAudioDevice/DspAudioDevice.cpp 2015-04-05 22:23:36 +0000 |
276 | @@ -0,0 +1,427 @@ |
277 | +/************************************************************************ |
278 | +DSPatch - Cross-Platform, Object-Oriented, Flow-Based Programming Library |
279 | +Copyright (c) 2012-2015 Marcus Tomlinson |
280 | + |
281 | +This file is part of DSPatch. |
282 | + |
283 | +GNU Lesser General Public License Usage |
284 | +This file may be used under the terms of the GNU Lesser General Public |
285 | +License version 3.0 as published by the Free Software Foundation and |
286 | +appearing in the file LGPLv3.txt included in the packaging of this |
287 | +file. Please review the following information to ensure the GNU Lesser |
288 | +General Public License version 3.0 requirements will be met: |
289 | +http://www.gnu.org/copyleft/lgpl.html. |
290 | + |
291 | +Other Usage |
292 | +Alternatively, this file may be used in accordance with the terms and |
293 | +conditions contained in a signed written agreement between you and |
294 | +Marcus Tomlinson. |
295 | + |
296 | +DSPatch is distributed in the hope that it will be useful, |
297 | +but WITHOUT ANY WARRANTY; without even the implied warranty of |
298 | +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
299 | +************************************************************************/ |
300 | + |
301 | +#include <DspAudioDevice.h> |
302 | + |
303 | +#include <RtAudio.h> |
304 | + |
305 | +#include <iostream> |
306 | +#include <string.h> |
307 | +#include <cstdlib> |
308 | + |
309 | +//================================================================================================= |
310 | + |
311 | +struct RtAudioMembers |
312 | +{ |
313 | + std::vector<RtAudio::DeviceInfo> deviceList; |
314 | + |
315 | + RtAudio audioStream; |
316 | + RtAudio::StreamParameters outputParams; |
317 | + RtAudio::StreamParameters inputParams; |
318 | +}; |
319 | + |
320 | +//================================================================================================= |
321 | + |
322 | +DspAudioDevice::DspAudioDevice() |
323 | + : _rtAudio(new RtAudioMembers()) |
324 | + , _gotWaitReady(false) |
325 | + , _gotSyncReady(true) |
326 | +{ |
327 | + _outputChannels.resize(8); |
328 | + for (int i = 0; i < 8; i++) |
329 | + { |
330 | + AddInput_(); |
331 | + } |
332 | + |
333 | + AddInput_("Sample Rate"); |
334 | + |
335 | + _inputChannels.resize(8); |
336 | + for (int i = 0; i < 8; i++) |
337 | + { |
338 | + AddOutput_(); |
339 | + } |
340 | + |
341 | + std::vector<std::string> deviceNameList; |
342 | + |
343 | + for (unsigned int i = 0; i < _rtAudio->audioStream.getDeviceCount(); i++) |
344 | + { |
345 | + _rtAudio->deviceList.push_back(_rtAudio->audioStream.getDeviceInfo(i)); |
346 | + deviceNameList.push_back(_rtAudio->audioStream.getDeviceInfo(i).name); |
347 | + } |
348 | + |
349 | + pDeviceList = AddParameter_("deviceList", DspParameter(DspParameter::List, deviceNameList)); |
350 | + pIsStreaming = AddParameter_("isStreaming", DspParameter(DspParameter::Bool, false)); |
351 | + pBufferSize = AddParameter_("bufferSize", DspParameter(DspParameter::Int, 256)); |
352 | + pSampleRate = AddParameter_("sampleRate", DspParameter(DspParameter::Int, 44100)); |
353 | + |
354 | + SetDevice(_rtAudio->audioStream.getDefaultOutputDevice()); |
355 | + SetBufferSize(GetBufferSize()); |
356 | + SetSampleRate(GetSampleRate()); |
357 | +} |
358 | + |
359 | +//------------------------------------------------------------------------------------------------- |
360 | + |
361 | +DspAudioDevice::~DspAudioDevice() |
362 | +{ |
363 | + _StopStream(); |
364 | + |
365 | + delete _rtAudio; |
366 | +} |
367 | + |
368 | +//------------------------------------------------------------------------------------------------- |
369 | + |
370 | +bool DspAudioDevice::SetDevice(int deviceIndex) |
371 | +{ |
372 | + if (deviceIndex >= 0 && deviceIndex < GetDeviceCount()) |
373 | + { |
374 | + _StopStream(); |
375 | + |
376 | + SetParameter_(pDeviceList, DspParameter(DspParameter::Int, deviceIndex)); |
377 | + |
378 | + _rtAudio->inputParams.nChannels = _rtAudio->deviceList[deviceIndex].inputChannels; |
379 | + _rtAudio->inputParams.deviceId = deviceIndex; |
380 | + |
381 | + _rtAudio->outputParams.nChannels = _rtAudio->deviceList[deviceIndex].outputChannels; |
382 | + _rtAudio->outputParams.deviceId = deviceIndex; |
383 | + |
384 | + _StartStream(); |
385 | + |
386 | + return true; |
387 | + } |
388 | + |
389 | + return false; |
390 | +} |
391 | + |
392 | +//------------------------------------------------------------------------------------------------- |
393 | + |
394 | +std::string DspAudioDevice::GetDeviceName(int deviceIndex) const |
395 | +{ |
396 | + if (deviceIndex >= 0 && deviceIndex < GetDeviceCount()) |
397 | + { |
398 | + return _rtAudio->deviceList[deviceIndex].name; |
399 | + } |
400 | + |
401 | + return ""; |
402 | +} |
403 | + |
404 | +//------------------------------------------------------------------------------------------------- |
405 | + |
406 | +int DspAudioDevice::GetDeviceInputCount(int deviceIndex) const |
407 | +{ |
408 | + return _rtAudio->deviceList[deviceIndex].inputChannels; |
409 | +} |
410 | + |
411 | +//------------------------------------------------------------------------------------------------- |
412 | + |
413 | +int DspAudioDevice::GetDeviceOutputCount(int deviceIndex) const |
414 | +{ |
415 | + return _rtAudio->deviceList[deviceIndex].outputChannels; |
416 | +} |
417 | + |
418 | +//------------------------------------------------------------------------------------------------- |
419 | + |
420 | +int DspAudioDevice::GetCurrentDevice() const |
421 | +{ |
422 | + return *GetParameter_(pDeviceList)->GetInt(); |
423 | +} |
424 | + |
425 | +//------------------------------------------------------------------------------------------------- |
426 | + |
427 | +int DspAudioDevice::GetDeviceCount() const |
428 | +{ |
429 | + return GetParameter_(pDeviceList)->GetList()->size(); |
430 | +} |
431 | + |
432 | +//------------------------------------------------------------------------------------------------- |
433 | + |
434 | +void DspAudioDevice::SetBufferSize(int bufferSize) |
435 | +{ |
436 | + _StopStream(); |
437 | + |
438 | + SetParameter_(pBufferSize, DspParameter(DspParameter::Int, bufferSize)); |
439 | + for (size_t i = 0; i < _inputChannels.size(); i++) |
440 | + { |
441 | + _inputChannels[i].resize(bufferSize); |
442 | + } |
443 | + |
444 | + _StartStream(); |
445 | +} |
446 | + |
447 | +//------------------------------------------------------------------------------------------------- |
448 | + |
449 | +void DspAudioDevice::SetSampleRate(int sampleRate) |
450 | +{ |
451 | + _StopStream(); |
452 | + SetParameter_(pSampleRate, DspParameter(DspParameter::Int, sampleRate)); |
453 | + _StartStream(); |
454 | +} |
455 | + |
456 | +//------------------------------------------------------------------------------------------------- |
457 | + |
458 | +bool DspAudioDevice::IsStreaming() const |
459 | +{ |
460 | + return *GetParameter_(pIsStreaming)->GetBool(); |
461 | +} |
462 | + |
463 | +//------------------------------------------------------------------------------------------------- |
464 | + |
465 | +int DspAudioDevice::GetBufferSize() const |
466 | +{ |
467 | + return *GetParameter_(pBufferSize)->GetInt(); |
468 | +} |
469 | + |
470 | +//------------------------------------------------------------------------------------------------- |
471 | + |
472 | +int DspAudioDevice::GetSampleRate() const |
473 | +{ |
474 | + return *GetParameter_(pSampleRate)->GetInt(); |
475 | +} |
476 | + |
477 | +//================================================================================================= |
478 | + |
479 | +void DspAudioDevice::Process_(DspSignalBus& inputs, DspSignalBus& outputs) |
480 | +{ |
481 | + // Wait until the sound card is ready for the next set of buffers |
482 | + // ============================================================== |
483 | + _syncMutex.Lock(); |
484 | + if (!_gotSyncReady) // if haven't already got the release |
485 | + { |
486 | + _syncCondt.Wait(_syncMutex); // wait for sync |
487 | + } |
488 | + _gotSyncReady = false; // reset the release flag |
489 | + _syncMutex.Unlock(); |
490 | + |
491 | + // Synchronise sample rate with the "Sample Rate" input feed |
492 | + // ========================================================= |
493 | + int sampleRate; |
494 | + if (inputs.GetValue("Sample Rate", sampleRate)) |
495 | + { |
496 | + if (sampleRate != GetSampleRate()) |
497 | + { |
498 | + SetSampleRate(sampleRate); |
499 | + } |
500 | + } |
501 | + |
502 | + // Synchronise buffer size with the size of incoming buffers |
503 | + // ========================================================= |
504 | + if (inputs.GetValue(0, _outputChannels[0])) |
505 | + { |
506 | + if (GetBufferSize() != (int)_outputChannels[0].size() && _outputChannels[0].size() != 0) |
507 | + { |
508 | + SetBufferSize(_outputChannels[0].size()); |
509 | + } |
510 | + } |
511 | + |
512 | + // Retrieve incoming component buffers for the sound card to output |
513 | + // ================================================================ |
514 | + for (size_t i = 0; i < _outputChannels.size(); i++) |
515 | + { |
516 | + if (!inputs.GetValue(i, _outputChannels[i])) |
517 | + { |
518 | + _outputChannels[i].assign(_outputChannels[i].size(), 0); |
519 | + } |
520 | + } |
521 | + |
522 | + // Retrieve incoming sound card buffers for the component to output |
523 | + // ================================================================ |
524 | + for (size_t i = 0; i < _inputChannels.size(); i++) |
525 | + { |
526 | + outputs.SetValue(i, _inputChannels[i]); |
527 | + } |
528 | + |
529 | + // Inform the sound card that buffers are now ready |
530 | + // ================================================ |
531 | + _buffersMutex.Lock(); |
532 | + _gotWaitReady = true; // set release flag |
533 | + _waitCondt.WakeAll(); // release sync |
534 | + _buffersMutex.Unlock(); |
535 | +} |
536 | + |
537 | +//------------------------------------------------------------------------------------------------- |
538 | + |
539 | +bool DspAudioDevice::ParameterUpdating_(int index, DspParameter const& param) |
540 | +{ |
541 | + if (index == pDeviceList) |
542 | + { |
543 | + return SetDevice(*param.GetInt()); |
544 | + } |
545 | + else if (index == pBufferSize) |
546 | + { |
547 | + SetBufferSize(*param.GetInt()); |
548 | + return true; |
549 | + } |
550 | + else if (index == pSampleRate) |
551 | + { |
552 | + SetSampleRate(*param.GetInt()); |
553 | + return true; |
554 | + } |
555 | + |
556 | + return false; |
557 | +} |
558 | + |
559 | +//================================================================================================= |
560 | + |
561 | +void DspAudioDevice::_SetIsStreaming(bool isStreaming) |
562 | +{ |
563 | + SetParameter_(pIsStreaming, DspParameter(DspParameter::Bool, isStreaming)); |
564 | +} |
565 | + |
566 | +//------------------------------------------------------------------------------------------------- |
567 | + |
568 | +void DspAudioDevice::_WaitForBuffer() |
569 | +{ |
570 | + _buffersMutex.Lock(); |
571 | + if (!_gotWaitReady) // if haven't already got the release |
572 | + { |
573 | + _waitCondt.Wait(_buffersMutex); // wait for sync |
574 | + } |
575 | + _gotWaitReady = false; // reset the release flag |
576 | + _buffersMutex.Unlock(); |
577 | +} |
578 | + |
579 | +//------------------------------------------------------------------------------------------------- |
580 | + |
581 | +void DspAudioDevice::_SyncBuffer() |
582 | +{ |
583 | + _syncMutex.Lock(); |
584 | + _gotSyncReady = true; // set release flag |
585 | + _syncCondt.WakeAll(); // release sync |
586 | + _syncMutex.Unlock(); |
587 | +} |
588 | + |
589 | +//------------------------------------------------------------------------------------------------- |
590 | + |
591 | +void DspAudioDevice::_StopStream() |
592 | +{ |
593 | + _SetIsStreaming(false); |
594 | + |
595 | + _buffersMutex.Lock(); |
596 | + _gotWaitReady = true; // set release flag |
597 | + _waitCondt.WakeAll(); // release sync |
598 | + _buffersMutex.Unlock(); |
599 | + |
600 | + if (_rtAudio->audioStream.isStreamOpen()) |
601 | + { |
602 | + _rtAudio->audioStream.closeStream(); |
603 | + } |
604 | +} |
605 | + |
606 | +//------------------------------------------------------------------------------------------------- |
607 | + |
608 | +void DspAudioDevice::_StartStream() |
609 | +{ |
610 | + RtAudio::StreamParameters* inputParams = NULL; |
611 | + RtAudio::StreamParameters* outputParams = NULL; |
612 | + |
613 | + if (_rtAudio->inputParams.nChannels != 0) |
614 | + { |
615 | + inputParams = &_rtAudio->inputParams; |
616 | + } |
617 | + |
618 | + if (_rtAudio->outputParams.nChannels != 0) |
619 | + { |
620 | + outputParams = &_rtAudio->outputParams; |
621 | + } |
622 | + |
623 | + RtAudio::StreamOptions options; |
624 | + options.flags |= RTAUDIO_SCHEDULE_REALTIME; |
625 | + options.flags |= RTAUDIO_NONINTERLEAVED; |
626 | + |
627 | + _rtAudio->audioStream.openStream(outputParams, |
628 | + inputParams, |
629 | + RTAUDIO_FLOAT32, |
630 | + GetSampleRate(), |
631 | + (unsigned int*)const_cast<int*>(GetParameter_(pBufferSize)->GetInt()), |
632 | + &_StaticCallback, |
633 | + this, |
634 | + &options); |
635 | + |
636 | + _rtAudio->audioStream.startStream(); |
637 | + |
638 | + while (!_rtAudio->audioStream.isStreamOpen()) |
639 | + { |
640 | + DspThread::MsSleep(10); |
641 | + } |
642 | + |
643 | + _SetIsStreaming(true); |
644 | +} |
645 | + |
646 | +//------------------------------------------------------------------------------------------------- |
647 | + |
648 | +int DspAudioDevice::_StaticCallback( |
649 | + void* outputBuffer, void* inputBuffer, unsigned int, double, unsigned int, void* userData) |
650 | +{ |
651 | + return (reinterpret_cast<DspAudioDevice*>(userData))->_DynamicCallback(inputBuffer, outputBuffer); |
652 | +} |
653 | + |
654 | +//------------------------------------------------------------------------------------------------- |
655 | + |
656 | +int DspAudioDevice::_DynamicCallback(void* inputBuffer, void* outputBuffer) |
657 | +{ |
658 | + _WaitForBuffer(); |
659 | + |
660 | + if (IsStreaming()) |
661 | + { |
662 | + float* floatOutput = (float*)outputBuffer; |
663 | + float* floatInput = (float*)inputBuffer; |
664 | + |
665 | + if (outputBuffer != NULL) |
666 | + { |
667 | + for (size_t i = 0; i < _outputChannels.size(); i++) |
668 | + { |
669 | + if (_rtAudio->deviceList[GetCurrentDevice()].outputChannels >= (i + 1)) |
670 | + { |
671 | + for (size_t j = 0; j < _outputChannels[i].size(); j++) |
672 | + { |
673 | + *floatOutput++ = _outputChannels[i][j]; |
674 | + } |
675 | + } |
676 | + } |
677 | + } |
678 | + |
679 | + if (inputBuffer != NULL) |
680 | + { |
681 | + for (size_t i = 0; i < _inputChannels.size(); i++) |
682 | + { |
683 | + if (_rtAudio->deviceList[GetCurrentDevice()].inputChannels >= (i + 1)) |
684 | + { |
685 | + for (size_t j = 0; j < _inputChannels[i].size(); j++) |
686 | + { |
687 | + _inputChannels[i][j] = *floatInput++; |
688 | + } |
689 | + } |
690 | + } |
691 | + } |
692 | + } |
693 | + else |
694 | + { |
695 | + _SyncBuffer(); |
696 | + return 1; |
697 | + } |
698 | + |
699 | + _SyncBuffer(); |
700 | + return 0; |
701 | +} |
702 | + |
703 | +//================================================================================================= |
704 | |
705 | === added file 'DspAudioDevice/DspAudioDevice.h' |
706 | --- DspAudioDevice/DspAudioDevice.h 1970-01-01 00:00:00 +0000 |
707 | +++ DspAudioDevice/DspAudioDevice.h 2015-04-05 22:23:36 +0000 |
708 | @@ -0,0 +1,109 @@ |
709 | +/************************************************************************ |
710 | +DSPatch - Cross-Platform, Object-Oriented, Flow-Based Programming Library |
711 | +Copyright (c) 2012-2015 Marcus Tomlinson |
712 | + |
713 | +This file is part of DSPatch. |
714 | + |
715 | +GNU Lesser General Public License Usage |
716 | +This file may be used under the terms of the GNU Lesser General Public |
717 | +License version 3.0 as published by the Free Software Foundation and |
718 | +appearing in the file LGPLv3.txt included in the packaging of this |
719 | +file. Please review the following information to ensure the GNU Lesser |
720 | +General Public License version 3.0 requirements will be met: |
721 | +http://www.gnu.org/copyleft/lgpl.html. |
722 | + |
723 | +Other Usage |
724 | +Alternatively, this file may be used in accordance with the terms and |
725 | +conditions contained in a signed written agreement between you and |
726 | +Marcus Tomlinson. |
727 | + |
728 | +DSPatch is distributed in the hope that it will be useful, |
729 | +but WITHOUT ANY WARRANTY; without even the implied warranty of |
730 | +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
731 | +************************************************************************/ |
732 | + |
733 | +#ifndef DSPAUDIODEVICE_H |
734 | +#define DSPAUDIODEVICE_H |
735 | + |
736 | +#include <DSPatch.h> |
737 | + |
738 | +struct RtAudioMembers; |
739 | + |
740 | +//================================================================================================= |
741 | + |
742 | +class DspAudioDevice : public DspComponent |
743 | +{ |
744 | +public: |
745 | + int pDeviceList; // List |
746 | + int pIsStreaming; // Bool |
747 | + int pBufferSize; // Int |
748 | + int pSampleRate; // Int |
749 | + |
750 | + DspAudioDevice(); |
751 | + ~DspAudioDevice(); |
752 | + |
753 | + bool SetDevice(int deviceIndex); |
754 | + |
755 | + std::string GetDeviceName(int deviceIndex) const; |
756 | + int GetDeviceInputCount(int deviceIndex) const; |
757 | + int GetDeviceOutputCount(int deviceIndex) const; |
758 | + int GetCurrentDevice() const; |
759 | + int GetDeviceCount() const; |
760 | + |
761 | + void SetBufferSize(int bufferSize); |
762 | + void SetSampleRate(int sampleRate); |
763 | + |
764 | + bool IsStreaming() const; |
765 | + int GetBufferSize() const; |
766 | + int GetSampleRate() const; |
767 | + |
768 | +protected: |
769 | + virtual void Process_(DspSignalBus& inputs, DspSignalBus& outputs); |
770 | + virtual bool ParameterUpdating_(int index, DspParameter const& param); |
771 | + |
772 | +private: |
773 | + std::vector< std::vector<float> > _outputChannels; |
774 | + std::vector< std::vector<float> > _inputChannels; |
775 | + |
776 | + RtAudioMembers* _rtAudio; |
777 | + |
778 | + DspMutex _buffersMutex; |
779 | + DspMutex _syncMutex; |
780 | + DspWaitCondition _waitCondt; |
781 | + DspWaitCondition _syncCondt; |
782 | + bool _gotWaitReady; |
783 | + bool _gotSyncReady; |
784 | + |
785 | + void _SetIsStreaming(bool isStreaming); |
786 | + |
787 | + void _WaitForBuffer(); |
788 | + void _SyncBuffer(); |
789 | + |
790 | + void _StopStream(); |
791 | + void _StartStream(); |
792 | + |
793 | + static int _StaticCallback(void* outputBuffer, |
794 | + void* inputBuffer, |
795 | + unsigned int nBufferFrames, |
796 | + double streamTime, |
797 | + unsigned int status, |
798 | + void* userData); |
799 | + |
800 | + int _DynamicCallback(void* inputBuffer, void* outputBuffer); |
801 | +}; |
802 | + |
803 | +//================================================================================================= |
804 | + |
805 | +class DspAudioDevicePlugin : public DspPlugin |
806 | +{ |
807 | + DspComponent* Create(std::map<std::string, DspParameter>&) const |
808 | + { |
809 | + return new DspAudioDevice(); |
810 | + } |
811 | +}; |
812 | + |
813 | +EXPORT_DSPPLUGIN(DspAudioDevicePlugin) |
814 | + |
815 | +//================================================================================================= |
816 | + |
817 | +#endif // DSPAUDIODEVICE_H |
818 | |
819 | === added directory 'DspAudioDevice/rtaudio' |
820 | === added file 'DspAudioDevice/rtaudio/RtAudio.cpp' |
821 | --- DspAudioDevice/rtaudio/RtAudio.cpp 1970-01-01 00:00:00 +0000 |
822 | +++ DspAudioDevice/rtaudio/RtAudio.cpp 2015-04-05 22:23:36 +0000 |
823 | @@ -0,0 +1,10136 @@ |
824 | +/************************************************************************/ |
825 | +/*! \class RtAudio |
826 | + \brief Realtime audio i/o C++ classes. |
827 | + |
828 | + RtAudio provides a common API (Application Programming Interface) |
829 | + for realtime audio input/output across Linux (native ALSA, Jack, |
830 | + and OSS), Macintosh OS X (CoreAudio and Jack), and Windows |
831 | + (DirectSound, ASIO and WASAPI) operating systems. |
832 | + |
833 | + RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/ |
834 | + |
835 | + RtAudio: realtime audio i/o C++ classes |
836 | + Copyright (c) 2001-2014 Gary P. Scavone |
837 | + |
838 | + Permission is hereby granted, free of charge, to any person |
839 | + obtaining a copy of this software and associated documentation files |
840 | + (the "Software"), to deal in the Software without restriction, |
841 | + including without limitation the rights to use, copy, modify, merge, |
842 | + publish, distribute, sublicense, and/or sell copies of the Software, |
843 | + and to permit persons to whom the Software is furnished to do so, |
844 | + subject to the following conditions: |
845 | + |
846 | + The above copyright notice and this permission notice shall be |
847 | + included in all copies or substantial portions of the Software. |
848 | + |
849 | + Any person wishing to distribute modifications to the Software is |
850 | + asked to send the modifications to the original developer so that |
851 | + they can be incorporated into the canonical version. This is, |
852 | + however, not a binding provision of this license. |
853 | + |
854 | + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
855 | + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
856 | + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
857 | + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR |
858 | + ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF |
859 | + CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
860 | + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
861 | +*/ |
862 | +/************************************************************************/ |
863 | + |
864 | +// RtAudio: Version 4.1.1 |
865 | + |
866 | +#include "RtAudio.h" |
867 | +#include <iostream> |
868 | +#include <cstdlib> |
869 | +#include <cstring> |
870 | +#include <climits> |
871 | + |
872 | +// Static variable definitions. |
873 | +const unsigned int RtApi::MAX_SAMPLE_RATES = 14; |
874 | +const unsigned int RtApi::SAMPLE_RATES[] = { |
875 | + 4000, 5512, 8000, 9600, 11025, 16000, 22050, |
876 | + 32000, 44100, 48000, 88200, 96000, 176400, 192000 |
877 | +}; |
878 | + |
879 | +#if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__) |
880 | + #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A) |
881 | + #define MUTEX_DESTROY(A) DeleteCriticalSection(A) |
882 | + #define MUTEX_LOCK(A) EnterCriticalSection(A) |
883 | + #define MUTEX_UNLOCK(A) LeaveCriticalSection(A) |
884 | +#elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__) |
885 | + // pthread API |
886 | + #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL) |
887 | + #define MUTEX_DESTROY(A) pthread_mutex_destroy(A) |
888 | + #define MUTEX_LOCK(A) pthread_mutex_lock(A) |
889 | + #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A) |
890 | +#else |
891 | + #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions |
892 | + #define MUTEX_DESTROY(A) abs(*A) // dummy definitions |
893 | +#endif |
894 | + |
895 | +// *************************************************** // |
896 | +// |
897 | +// RtAudio definitions. |
898 | +// |
899 | +// *************************************************** // |
900 | + |
901 | +std::string RtAudio :: getVersion( void ) throw() |
902 | +{ |
903 | + return RTAUDIO_VERSION; |
904 | +} |
905 | + |
906 | +void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw() |
907 | +{ |
908 | + apis.clear(); |
909 | + |
910 | + // The order here will control the order of RtAudio's API search in |
911 | + // the constructor. |
912 | +#if defined(__UNIX_JACK__) |
913 | + apis.push_back( UNIX_JACK ); |
914 | +#endif |
915 | +#if defined(__LINUX_ALSA__) |
916 | + apis.push_back( LINUX_ALSA ); |
917 | +#endif |
918 | +#if defined(__LINUX_PULSE__) |
919 | + apis.push_back( LINUX_PULSE ); |
920 | +#endif |
921 | +#if defined(__LINUX_OSS__) |
922 | + apis.push_back( LINUX_OSS ); |
923 | +#endif |
924 | +#if defined(__WINDOWS_ASIO__) |
925 | + apis.push_back( WINDOWS_ASIO ); |
926 | +#endif |
927 | +#if defined(__WINDOWS_WASAPI__) |
928 | + apis.push_back( WINDOWS_WASAPI ); |
929 | +#endif |
930 | +#if defined(__WINDOWS_DS__) |
931 | + apis.push_back( WINDOWS_DS ); |
932 | +#endif |
933 | +#if defined(__MACOSX_CORE__) |
934 | + apis.push_back( MACOSX_CORE ); |
935 | +#endif |
936 | +#if defined(__RTAUDIO_DUMMY__) |
937 | + apis.push_back( RTAUDIO_DUMMY ); |
938 | +#endif |
939 | +} |
940 | + |
941 | +void RtAudio :: openRtApi( RtAudio::Api api ) |
942 | +{ |
943 | + if ( rtapi_ ) |
944 | + delete rtapi_; |
945 | + rtapi_ = 0; |
946 | + |
947 | +#if defined(__UNIX_JACK__) |
948 | + if ( api == UNIX_JACK ) |
949 | + rtapi_ = new RtApiJack(); |
950 | +#endif |
951 | +#if defined(__LINUX_ALSA__) |
952 | + if ( api == LINUX_ALSA ) |
953 | + rtapi_ = new RtApiAlsa(); |
954 | +#endif |
955 | +#if defined(__LINUX_PULSE__) |
956 | + if ( api == LINUX_PULSE ) |
957 | + rtapi_ = new RtApiPulse(); |
958 | +#endif |
959 | +#if defined(__LINUX_OSS__) |
960 | + if ( api == LINUX_OSS ) |
961 | + rtapi_ = new RtApiOss(); |
962 | +#endif |
963 | +#if defined(__WINDOWS_ASIO__) |
964 | + if ( api == WINDOWS_ASIO ) |
965 | + rtapi_ = new RtApiAsio(); |
966 | +#endif |
967 | +#if defined(__WINDOWS_WASAPI__) |
968 | + if ( api == WINDOWS_WASAPI ) |
969 | + rtapi_ = new RtApiWasapi(); |
970 | +#endif |
971 | +#if defined(__WINDOWS_DS__) |
972 | + if ( api == WINDOWS_DS ) |
973 | + rtapi_ = new RtApiDs(); |
974 | +#endif |
975 | +#if defined(__MACOSX_CORE__) |
976 | + if ( api == MACOSX_CORE ) |
977 | + rtapi_ = new RtApiCore(); |
978 | +#endif |
979 | +#if defined(__RTAUDIO_DUMMY__) |
980 | + if ( api == RTAUDIO_DUMMY ) |
981 | + rtapi_ = new RtApiDummy(); |
982 | +#endif |
983 | +} |
984 | + |
985 | +RtAudio :: RtAudio( RtAudio::Api api ) |
986 | +{ |
987 | + rtapi_ = 0; |
988 | + |
989 | + if ( api != UNSPECIFIED ) { |
990 | + // Attempt to open the specified API. |
991 | + openRtApi( api ); |
992 | + if ( rtapi_ ) return; |
993 | + |
994 | + // No compiled support for specified API value. Issue a debug |
995 | + // warning and continue as if no API was specified. |
996 | + std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl; |
997 | + } |
998 | + |
999 | + // Iterate through the compiled APIs and return as soon as we find |
1000 | + // one with at least one device or we reach the end of the list. |
1001 | + std::vector< RtAudio::Api > apis; |
1002 | + getCompiledApi( apis ); |
1003 | + for ( unsigned int i=0; i<apis.size(); i++ ) { |
1004 | + openRtApi( apis[i] ); |
1005 | + if ( rtapi_->getDeviceCount() ) break; |
1006 | + } |
1007 | + |
1008 | + if ( rtapi_ ) return; |
1009 | + |
1010 | + // It should not be possible to get here because the preprocessor |
1011 | + // definition __RTAUDIO_DUMMY__ is automatically defined if no |
1012 | + // API-specific definitions are passed to the compiler. But just in |
1013 | + // case something weird happens, we'll thow an error. |
1014 | + std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n"; |
1015 | + throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) ); |
1016 | +} |
1017 | + |
1018 | +RtAudio :: ~RtAudio() throw() |
1019 | +{ |
1020 | + if ( rtapi_ ) |
1021 | + delete rtapi_; |
1022 | +} |
1023 | + |
1024 | +void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters, |
1025 | + RtAudio::StreamParameters *inputParameters, |
1026 | + RtAudioFormat format, unsigned int sampleRate, |
1027 | + unsigned int *bufferFrames, |
1028 | + RtAudioCallback callback, void *userData, |
1029 | + RtAudio::StreamOptions *options, |
1030 | + RtAudioErrorCallback errorCallback ) |
1031 | +{ |
1032 | + return rtapi_->openStream( outputParameters, inputParameters, format, |
1033 | + sampleRate, bufferFrames, callback, |
1034 | + userData, options, errorCallback ); |
1035 | +} |
1036 | + |
1037 | +// *************************************************** // |
1038 | +// |
1039 | +// Public RtApi definitions (see end of file for |
1040 | +// private or protected utility functions). |
1041 | +// |
1042 | +// *************************************************** // |
1043 | + |
1044 | +RtApi :: RtApi() |
1045 | +{ |
1046 | + stream_.state = STREAM_CLOSED; |
1047 | + stream_.mode = UNINITIALIZED; |
1048 | + stream_.apiHandle = 0; |
1049 | + stream_.userBuffer[0] = 0; |
1050 | + stream_.userBuffer[1] = 0; |
1051 | + MUTEX_INITIALIZE( &stream_.mutex ); |
1052 | + showWarnings_ = true; |
1053 | + firstErrorOccurred_ = false; |
1054 | +} |
1055 | + |
1056 | +RtApi :: ~RtApi() |
1057 | +{ |
1058 | + MUTEX_DESTROY( &stream_.mutex ); |
1059 | +} |
1060 | + |
1061 | +void RtApi :: openStream( RtAudio::StreamParameters *oParams, |
1062 | + RtAudio::StreamParameters *iParams, |
1063 | + RtAudioFormat format, unsigned int sampleRate, |
1064 | + unsigned int *bufferFrames, |
1065 | + RtAudioCallback callback, void *userData, |
1066 | + RtAudio::StreamOptions *options, |
1067 | + RtAudioErrorCallback errorCallback ) |
1068 | +{ |
1069 | + if ( stream_.state != STREAM_CLOSED ) { |
1070 | + errorText_ = "RtApi::openStream: a stream is already open!"; |
1071 | + error( RtAudioError::INVALID_USE ); |
1072 | + return; |
1073 | + } |
1074 | + |
1075 | + // Clear stream information potentially left from a previously open stream. |
1076 | + clearStreamInfo(); |
1077 | + |
1078 | + if ( oParams && oParams->nChannels < 1 ) { |
1079 | + errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one."; |
1080 | + error( RtAudioError::INVALID_USE ); |
1081 | + return; |
1082 | + } |
1083 | + |
1084 | + if ( iParams && iParams->nChannels < 1 ) { |
1085 | + errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one."; |
1086 | + error( RtAudioError::INVALID_USE ); |
1087 | + return; |
1088 | + } |
1089 | + |
1090 | + if ( oParams == NULL && iParams == NULL ) { |
1091 | + errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!"; |
1092 | + error( RtAudioError::INVALID_USE ); |
1093 | + return; |
1094 | + } |
1095 | + |
1096 | + if ( formatBytes(format) == 0 ) { |
1097 | + errorText_ = "RtApi::openStream: 'format' parameter value is undefined."; |
1098 | + error( RtAudioError::INVALID_USE ); |
1099 | + return; |
1100 | + } |
1101 | + |
1102 | + unsigned int nDevices = getDeviceCount(); |
1103 | + unsigned int oChannels = 0; |
1104 | + if ( oParams ) { |
1105 | + oChannels = oParams->nChannels; |
1106 | + if ( oParams->deviceId >= nDevices ) { |
1107 | + errorText_ = "RtApi::openStream: output device parameter value is invalid."; |
1108 | + error( RtAudioError::INVALID_USE ); |
1109 | + return; |
1110 | + } |
1111 | + } |
1112 | + |
1113 | + unsigned int iChannels = 0; |
1114 | + if ( iParams ) { |
1115 | + iChannels = iParams->nChannels; |
1116 | + if ( iParams->deviceId >= nDevices ) { |
1117 | + errorText_ = "RtApi::openStream: input device parameter value is invalid."; |
1118 | + error( RtAudioError::INVALID_USE ); |
1119 | + return; |
1120 | + } |
1121 | + } |
1122 | + |
1123 | + bool result; |
1124 | + |
1125 | + if ( oChannels > 0 ) { |
1126 | + |
1127 | + result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel, |
1128 | + sampleRate, format, bufferFrames, options ); |
1129 | + if ( result == false ) { |
1130 | + error( RtAudioError::SYSTEM_ERROR ); |
1131 | + return; |
1132 | + } |
1133 | + } |
1134 | + |
1135 | + if ( iChannels > 0 ) { |
1136 | + |
1137 | + result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel, |
1138 | + sampleRate, format, bufferFrames, options ); |
1139 | + if ( result == false ) { |
1140 | + if ( oChannels > 0 ) closeStream(); |
1141 | + error( RtAudioError::SYSTEM_ERROR ); |
1142 | + return; |
1143 | + } |
1144 | + } |
1145 | + |
1146 | + stream_.callbackInfo.callback = (void *) callback; |
1147 | + stream_.callbackInfo.userData = userData; |
1148 | + stream_.callbackInfo.errorCallback = (void *) errorCallback; |
1149 | + |
1150 | + if ( options ) options->numberOfBuffers = stream_.nBuffers; |
1151 | + stream_.state = STREAM_STOPPED; |
1152 | +} |
1153 | + |
1154 | +unsigned int RtApi :: getDefaultInputDevice( void ) |
1155 | +{ |
1156 | + // Should be implemented in subclasses if possible. |
1157 | + return 0; |
1158 | +} |
1159 | + |
1160 | +unsigned int RtApi :: getDefaultOutputDevice( void ) |
1161 | +{ |
1162 | + // Should be implemented in subclasses if possible. |
1163 | + return 0; |
1164 | +} |
1165 | + |
1166 | +void RtApi :: closeStream( void ) |
1167 | +{ |
1168 | + // MUST be implemented in subclasses! |
1169 | + return; |
1170 | +} |
1171 | + |
1172 | +bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/, |
1173 | + unsigned int /*firstChannel*/, unsigned int /*sampleRate*/, |
1174 | + RtAudioFormat /*format*/, unsigned int * /*bufferSize*/, |
1175 | + RtAudio::StreamOptions * /*options*/ ) |
1176 | +{ |
1177 | + // MUST be implemented in subclasses! |
1178 | + return FAILURE; |
1179 | +} |
1180 | + |
1181 | +void RtApi :: tickStreamTime( void ) |
1182 | +{ |
1183 | + // Subclasses that do not provide their own implementation of |
1184 | + // getStreamTime should call this function once per buffer I/O to |
1185 | + // provide basic stream time support. |
1186 | + |
1187 | + stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate ); |
1188 | + |
1189 | +#if defined( HAVE_GETTIMEOFDAY ) |
1190 | + gettimeofday( &stream_.lastTickTimestamp, NULL ); |
1191 | +#endif |
1192 | +} |
1193 | + |
1194 | +long RtApi :: getStreamLatency( void ) |
1195 | +{ |
1196 | + verifyStream(); |
1197 | + |
1198 | + long totalLatency = 0; |
1199 | + if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) |
1200 | + totalLatency = stream_.latency[0]; |
1201 | + if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) |
1202 | + totalLatency += stream_.latency[1]; |
1203 | + |
1204 | + return totalLatency; |
1205 | +} |
1206 | + |
1207 | +double RtApi :: getStreamTime( void ) |
1208 | +{ |
1209 | + verifyStream(); |
1210 | + |
1211 | +#if defined( HAVE_GETTIMEOFDAY ) |
1212 | + // Return a very accurate estimate of the stream time by |
1213 | + // adding in the elapsed time since the last tick. |
1214 | + struct timeval then; |
1215 | + struct timeval now; |
1216 | + |
1217 | + if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 ) |
1218 | + return stream_.streamTime; |
1219 | + |
1220 | + gettimeofday( &now, NULL ); |
1221 | + then = stream_.lastTickTimestamp; |
1222 | + return stream_.streamTime + |
1223 | + ((now.tv_sec + 0.000001 * now.tv_usec) - |
1224 | + (then.tv_sec + 0.000001 * then.tv_usec)); |
1225 | +#else |
1226 | + return stream_.streamTime; |
1227 | +#endif |
1228 | +} |
1229 | + |
1230 | +void RtApi :: setStreamTime( double time ) |
1231 | +{ |
1232 | + verifyStream(); |
1233 | + |
1234 | + if ( time >= 0.0 ) |
1235 | + stream_.streamTime = time; |
1236 | +} |
1237 | + |
1238 | +unsigned int RtApi :: getStreamSampleRate( void ) |
1239 | +{ |
1240 | + verifyStream(); |
1241 | + |
1242 | + return stream_.sampleRate; |
1243 | +} |
1244 | + |
1245 | + |
1246 | +// *************************************************** // |
1247 | +// |
1248 | +// OS/API-specific methods. |
1249 | +// |
1250 | +// *************************************************** // |
1251 | + |
1252 | +#if defined(__MACOSX_CORE__) |
1253 | + |
1254 | +// The OS X CoreAudio API is designed to use a separate callback |
1255 | +// procedure for each of its audio devices. A single RtAudio duplex |
1256 | +// stream using two different devices is supported here, though it |
1257 | +// cannot be guaranteed to always behave correctly because we cannot |
1258 | +// synchronize these two callbacks. |
1259 | +// |
1260 | +// A property listener is installed for over/underrun information. |
1261 | +// However, no functionality is currently provided to allow property |
1262 | +// listeners to trigger user handlers because it is unclear what could |
1263 | +// be done if a critical stream parameter (buffer size, sample rate, |
1264 | +// device disconnect) notification arrived. The listeners entail |
1265 | +// quite a bit of extra code and most likely, a user program wouldn't |
1266 | +// be prepared for the result anyway. However, we do provide a flag |
1267 | +// to the client callback function to inform of an over/underrun. |
1268 | + |
1269 | +// A structure to hold various information related to the CoreAudio API |
1270 | +// implementation. |
1271 | +struct CoreHandle { |
1272 | + AudioDeviceID id[2]; // device ids |
1273 | +#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 ) |
1274 | + AudioDeviceIOProcID procId[2]; |
1275 | +#endif |
1276 | + UInt32 iStream[2]; // device stream index (or first if using multiple) |
1277 | + UInt32 nStreams[2]; // number of streams to use |
1278 | + bool xrun[2]; |
1279 | + char *deviceBuffer; |
1280 | + pthread_cond_t condition; |
1281 | + int drainCounter; // Tracks callback counts when draining |
1282 | + bool internalDrain; // Indicates if stop is initiated from callback or not. |
1283 | + |
1284 | + CoreHandle() |
1285 | + :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; } |
1286 | +}; |
1287 | + |
1288 | +RtApiCore:: RtApiCore() |
1289 | +{ |
1290 | +#if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER ) |
1291 | + // This is a largely undocumented but absolutely necessary |
1292 | + // requirement starting with OS-X 10.6. If not called, queries and |
1293 | + // updates to various audio device properties are not handled |
1294 | + // correctly. |
1295 | + CFRunLoopRef theRunLoop = NULL; |
1296 | + AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop, |
1297 | + kAudioObjectPropertyScopeGlobal, |
1298 | + kAudioObjectPropertyElementMaster }; |
1299 | + OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop); |
1300 | + if ( result != noErr ) { |
1301 | + errorText_ = "RtApiCore::RtApiCore: error setting run loop property!"; |
1302 | + error( RtAudioError::WARNING ); |
1303 | + } |
1304 | +#endif |
1305 | +} |
1306 | + |
1307 | +RtApiCore :: ~RtApiCore() |
1308 | +{ |
1309 | + // The subclass destructor gets called before the base class |
1310 | + // destructor, so close an existing stream before deallocating |
1311 | + // apiDeviceId memory. |
1312 | + if ( stream_.state != STREAM_CLOSED ) closeStream(); |
1313 | +} |
1314 | + |
1315 | +unsigned int RtApiCore :: getDeviceCount( void ) |
1316 | +{ |
1317 | + // Find out how many audio devices there are, if any. |
1318 | + UInt32 dataSize; |
1319 | + AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; |
1320 | + OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize ); |
1321 | + if ( result != noErr ) { |
1322 | + errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!"; |
1323 | + error( RtAudioError::WARNING ); |
1324 | + return 0; |
1325 | + } |
1326 | + |
1327 | + return dataSize / sizeof( AudioDeviceID ); |
1328 | +} |
1329 | + |
1330 | +unsigned int RtApiCore :: getDefaultInputDevice( void ) |
1331 | +{ |
1332 | + unsigned int nDevices = getDeviceCount(); |
1333 | + if ( nDevices <= 1 ) return 0; |
1334 | + |
1335 | + AudioDeviceID id; |
1336 | + UInt32 dataSize = sizeof( AudioDeviceID ); |
1337 | + AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; |
1338 | + OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id ); |
1339 | + if ( result != noErr ) { |
1340 | + errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device."; |
1341 | + error( RtAudioError::WARNING ); |
1342 | + return 0; |
1343 | + } |
1344 | + |
1345 | + dataSize *= nDevices; |
1346 | + AudioDeviceID deviceList[ nDevices ]; |
1347 | + property.mSelector = kAudioHardwarePropertyDevices; |
1348 | + result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList ); |
1349 | + if ( result != noErr ) { |
1350 | + errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs."; |
1351 | + error( RtAudioError::WARNING ); |
1352 | + return 0; |
1353 | + } |
1354 | + |
1355 | + for ( unsigned int i=0; i<nDevices; i++ ) |
1356 | + if ( id == deviceList[i] ) return i; |
1357 | + |
1358 | + errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!"; |
1359 | + error( RtAudioError::WARNING ); |
1360 | + return 0; |
1361 | +} |
1362 | + |
1363 | +unsigned int RtApiCore :: getDefaultOutputDevice( void ) |
1364 | +{ |
1365 | + unsigned int nDevices = getDeviceCount(); |
1366 | + if ( nDevices <= 1 ) return 0; |
1367 | + |
1368 | + AudioDeviceID id; |
1369 | + UInt32 dataSize = sizeof( AudioDeviceID ); |
1370 | + AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; |
1371 | + OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id ); |
1372 | + if ( result != noErr ) { |
1373 | + errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device."; |
1374 | + error( RtAudioError::WARNING ); |
1375 | + return 0; |
1376 | + } |
1377 | + |
1378 | + dataSize = sizeof( AudioDeviceID ) * nDevices; |
1379 | + AudioDeviceID deviceList[ nDevices ]; |
1380 | + property.mSelector = kAudioHardwarePropertyDevices; |
1381 | + result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList ); |
1382 | + if ( result != noErr ) { |
1383 | + errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs."; |
1384 | + error( RtAudioError::WARNING ); |
1385 | + return 0; |
1386 | + } |
1387 | + |
1388 | + for ( unsigned int i=0; i<nDevices; i++ ) |
1389 | + if ( id == deviceList[i] ) return i; |
1390 | + |
1391 | + errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!"; |
1392 | + error( RtAudioError::WARNING ); |
1393 | + return 0; |
1394 | +} |
1395 | + |
1396 | +RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device ) |
1397 | +{ |
1398 | + RtAudio::DeviceInfo info; |
1399 | + info.probed = false; |
1400 | + |
1401 | + // Get device ID |
1402 | + unsigned int nDevices = getDeviceCount(); |
1403 | + if ( nDevices == 0 ) { |
1404 | + errorText_ = "RtApiCore::getDeviceInfo: no devices found!"; |
1405 | + error( RtAudioError::INVALID_USE ); |
1406 | + return info; |
1407 | + } |
1408 | + |
1409 | + if ( device >= nDevices ) { |
1410 | + errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!"; |
1411 | + error( RtAudioError::INVALID_USE ); |
1412 | + return info; |
1413 | + } |
1414 | + |
1415 | + AudioDeviceID deviceList[ nDevices ]; |
1416 | + UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices; |
1417 | + AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices, |
1418 | + kAudioObjectPropertyScopeGlobal, |
1419 | + kAudioObjectPropertyElementMaster }; |
1420 | + OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, |
1421 | + 0, NULL, &dataSize, (void *) &deviceList ); |
1422 | + if ( result != noErr ) { |
1423 | + errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs."; |
1424 | + error( RtAudioError::WARNING ); |
1425 | + return info; |
1426 | + } |
1427 | + |
1428 | + AudioDeviceID id = deviceList[ device ]; |
1429 | + |
1430 | + // Get the device name. |
1431 | + info.name.erase(); |
1432 | + CFStringRef cfname; |
1433 | + dataSize = sizeof( CFStringRef ); |
1434 | + property.mSelector = kAudioObjectPropertyManufacturer; |
1435 | + result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname ); |
1436 | + if ( result != noErr ) { |
1437 | + errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer."; |
1438 | + errorText_ = errorStream_.str(); |
1439 | + error( RtAudioError::WARNING ); |
1440 | + return info; |
1441 | + } |
1442 | + |
1443 | + //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() ); |
1444 | + int length = CFStringGetLength(cfname); |
1445 | + char *mname = (char *)malloc(length * 3 + 1); |
1446 | +#if defined( UNICODE ) || defined( _UNICODE ) |
1447 | + CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8); |
1448 | +#else |
1449 | + CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding()); |
1450 | +#endif |
1451 | + info.name.append( (const char *)mname, strlen(mname) ); |
1452 | + info.name.append( ": " ); |
1453 | + CFRelease( cfname ); |
1454 | + free(mname); |
1455 | + |
1456 | + property.mSelector = kAudioObjectPropertyName; |
1457 | + result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname ); |
1458 | + if ( result != noErr ) { |
1459 | + errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name."; |
1460 | + errorText_ = errorStream_.str(); |
1461 | + error( RtAudioError::WARNING ); |
1462 | + return info; |
1463 | + } |
1464 | + |
1465 | + //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() ); |
1466 | + length = CFStringGetLength(cfname); |
1467 | + char *name = (char *)malloc(length * 3 + 1); |
1468 | +#if defined( UNICODE ) || defined( _UNICODE ) |
1469 | + CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8); |
1470 | +#else |
1471 | + CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding()); |
1472 | +#endif |
1473 | + info.name.append( (const char *)name, strlen(name) ); |
1474 | + CFRelease( cfname ); |
1475 | + free(name); |
1476 | + |
1477 | + // Get the output stream "configuration". |
1478 | + AudioBufferList *bufferList = nil; |
1479 | + property.mSelector = kAudioDevicePropertyStreamConfiguration; |
1480 | + property.mScope = kAudioDevicePropertyScopeOutput; |
1481 | + // property.mElement = kAudioObjectPropertyElementWildcard; |
1482 | + dataSize = 0; |
1483 | + result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize ); |
1484 | + if ( result != noErr || dataSize == 0 ) { |
1485 | + errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ")."; |
1486 | + errorText_ = errorStream_.str(); |
1487 | + error( RtAudioError::WARNING ); |
1488 | + return info; |
1489 | + } |
1490 | + |
1491 | + // Allocate the AudioBufferList. |
1492 | + bufferList = (AudioBufferList *) malloc( dataSize ); |
1493 | + if ( bufferList == NULL ) { |
1494 | + errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList."; |
1495 | + error( RtAudioError::WARNING ); |
1496 | + return info; |
1497 | + } |
1498 | + |
1499 | + result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList ); |
1500 | + if ( result != noErr || dataSize == 0 ) { |
1501 | + free( bufferList ); |
1502 | + errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ")."; |
1503 | + errorText_ = errorStream_.str(); |
1504 | + error( RtAudioError::WARNING ); |
1505 | + return info; |
1506 | + } |
1507 | + |
1508 | + // Get output channel information. |
1509 | + unsigned int i, nStreams = bufferList->mNumberBuffers; |
1510 | + for ( i=0; i<nStreams; i++ ) |
1511 | + info.outputChannels += bufferList->mBuffers[i].mNumberChannels; |
1512 | + free( bufferList ); |
1513 | + |
1514 | + // Get the input stream "configuration". |
1515 | + property.mScope = kAudioDevicePropertyScopeInput; |
1516 | + result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize ); |
1517 | + if ( result != noErr || dataSize == 0 ) { |
1518 | + errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ")."; |
1519 | + errorText_ = errorStream_.str(); |
1520 | + error( RtAudioError::WARNING ); |
1521 | + return info; |
1522 | + } |
1523 | + |
1524 | + // Allocate the AudioBufferList. |
1525 | + bufferList = (AudioBufferList *) malloc( dataSize ); |
1526 | + if ( bufferList == NULL ) { |
1527 | + errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList."; |
1528 | + error( RtAudioError::WARNING ); |
1529 | + return info; |
1530 | + } |
1531 | + |
1532 | + result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList ); |
1533 | + if (result != noErr || dataSize == 0) { |
1534 | + free( bufferList ); |
1535 | + errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ")."; |
1536 | + errorText_ = errorStream_.str(); |
1537 | + error( RtAudioError::WARNING ); |
1538 | + return info; |
1539 | + } |
1540 | + |
1541 | + // Get input channel information. |
1542 | + nStreams = bufferList->mNumberBuffers; |
1543 | + for ( i=0; i<nStreams; i++ ) |
1544 | + info.inputChannels += bufferList->mBuffers[i].mNumberChannels; |
1545 | + free( bufferList ); |
1546 | + |
1547 | + // If device opens for both playback and capture, we determine the channels. |
1548 | + if ( info.outputChannels > 0 && info.inputChannels > 0 ) |
1549 | + info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels; |
1550 | + |
1551 | + // Probe the device sample rates. |
1552 | + bool isInput = false; |
1553 | + if ( info.outputChannels == 0 ) isInput = true; |
1554 | + |
1555 | + // Determine the supported sample rates. |
1556 | + property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates; |
1557 | + if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput; |
1558 | + result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize ); |
1559 | + if ( result != kAudioHardwareNoError || dataSize == 0 ) { |
1560 | + errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info."; |
1561 | + errorText_ = errorStream_.str(); |
1562 | + error( RtAudioError::WARNING ); |
1563 | + return info; |
1564 | + } |
1565 | + |
1566 | + UInt32 nRanges = dataSize / sizeof( AudioValueRange ); |
1567 | + AudioValueRange rangeList[ nRanges ]; |
1568 | + result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList ); |
1569 | + if ( result != kAudioHardwareNoError ) { |
1570 | + errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates."; |
1571 | + errorText_ = errorStream_.str(); |
1572 | + error( RtAudioError::WARNING ); |
1573 | + return info; |
1574 | + } |
1575 | + |
1576 | + // The sample rate reporting mechanism is a bit of a mystery. It |
1577 | + // seems that it can either return individual rates or a range of |
1578 | + // rates. I assume that if the min / max range values are the same, |
1579 | + // then that represents a single supported rate and if the min / max |
1580 | + // range values are different, the device supports an arbitrary |
1581 | + // range of values (though there might be multiple ranges, so we'll |
1582 | + // use the most conservative range). |
1583 | + Float64 minimumRate = 1.0, maximumRate = 10000000000.0; |
1584 | + bool haveValueRange = false; |
1585 | + info.sampleRates.clear(); |
1586 | + for ( UInt32 i=0; i<nRanges; i++ ) { |
1587 | + if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) |
1588 | + info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum ); |
1589 | + else { |
1590 | + haveValueRange = true; |
1591 | + if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum; |
1592 | + if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum; |
1593 | + } |
1594 | + } |
1595 | + |
1596 | + if ( haveValueRange ) { |
1597 | + for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) { |
1598 | + if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) |
1599 | + info.sampleRates.push_back( SAMPLE_RATES[k] ); |
1600 | + } |
1601 | + } |
1602 | + |
1603 | + // Sort and remove any redundant values |
1604 | + std::sort( info.sampleRates.begin(), info.sampleRates.end() ); |
1605 | + info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() ); |
1606 | + |
1607 | + if ( info.sampleRates.size() == 0 ) { |
1608 | + errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ")."; |
1609 | + errorText_ = errorStream_.str(); |
1610 | + error( RtAudioError::WARNING ); |
1611 | + return info; |
1612 | + } |
1613 | + |
1614 | + // CoreAudio always uses 32-bit floating point data for PCM streams. |
1615 | + // Thus, any other "physical" formats supported by the device are of |
1616 | + // no interest to the client. |
1617 | + info.nativeFormats = RTAUDIO_FLOAT32; |
1618 | + |
1619 | + if ( info.outputChannels > 0 ) |
1620 | + if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true; |
1621 | + if ( info.inputChannels > 0 ) |
1622 | + if ( getDefaultInputDevice() == device ) info.isDefaultInput = true; |
1623 | + |
1624 | + info.probed = true; |
1625 | + return info; |
1626 | +} |
1627 | + |
1628 | +static OSStatus callbackHandler( AudioDeviceID inDevice, |
1629 | + const AudioTimeStamp* /*inNow*/, |
1630 | + const AudioBufferList* inInputData, |
1631 | + const AudioTimeStamp* /*inInputTime*/, |
1632 | + AudioBufferList* outOutputData, |
1633 | + const AudioTimeStamp* /*inOutputTime*/, |
1634 | + void* infoPointer ) |
1635 | +{ |
1636 | + CallbackInfo *info = (CallbackInfo *) infoPointer; |
1637 | + |
1638 | + RtApiCore *object = (RtApiCore *) info->object; |
1639 | + if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false ) |
1640 | + return kAudioHardwareUnspecifiedError; |
1641 | + else |
1642 | + return kAudioHardwareNoError; |
1643 | +} |
1644 | + |
1645 | +static OSStatus xrunListener( AudioObjectID /*inDevice*/, |
1646 | + UInt32 nAddresses, |
1647 | + const AudioObjectPropertyAddress properties[], |
1648 | + void* handlePointer ) |
1649 | +{ |
1650 | + CoreHandle *handle = (CoreHandle *) handlePointer; |
1651 | + for ( UInt32 i=0; i<nAddresses; i++ ) { |
1652 | + if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) { |
1653 | + if ( properties[i].mScope == kAudioDevicePropertyScopeInput ) |
1654 | + handle->xrun[1] = true; |
1655 | + else |
1656 | + handle->xrun[0] = true; |
1657 | + } |
1658 | + } |
1659 | + |
1660 | + return kAudioHardwareNoError; |
1661 | +} |
1662 | + |
1663 | +static OSStatus rateListener( AudioObjectID inDevice, |
1664 | + UInt32 /*nAddresses*/, |
1665 | + const AudioObjectPropertyAddress /*properties*/[], |
1666 | + void* ratePointer ) |
1667 | +{ |
1668 | + Float64 *rate = (Float64 *) ratePointer; |
1669 | + UInt32 dataSize = sizeof( Float64 ); |
1670 | + AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate, |
1671 | + kAudioObjectPropertyScopeGlobal, |
1672 | + kAudioObjectPropertyElementMaster }; |
1673 | + AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate ); |
1674 | + return kAudioHardwareNoError; |
1675 | +} |
1676 | + |
1677 | +bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels, |
1678 | + unsigned int firstChannel, unsigned int sampleRate, |
1679 | + RtAudioFormat format, unsigned int *bufferSize, |
1680 | + RtAudio::StreamOptions *options ) |
1681 | +{ |
1682 | + // Get device ID |
1683 | + unsigned int nDevices = getDeviceCount(); |
1684 | + if ( nDevices == 0 ) { |
1685 | + // This should not happen because a check is made before this function is called. |
1686 | + errorText_ = "RtApiCore::probeDeviceOpen: no devices found!"; |
1687 | + return FAILURE; |
1688 | + } |
1689 | + |
1690 | + if ( device >= nDevices ) { |
1691 | + // This should not happen because a check is made before this function is called. |
1692 | + errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!"; |
1693 | + return FAILURE; |
1694 | + } |
1695 | + |
1696 | + AudioDeviceID deviceList[ nDevices ]; |
1697 | + UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices; |
1698 | + AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices, |
1699 | + kAudioObjectPropertyScopeGlobal, |
1700 | + kAudioObjectPropertyElementMaster }; |
1701 | + OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, |
1702 | + 0, NULL, &dataSize, (void *) &deviceList ); |
1703 | + if ( result != noErr ) { |
1704 | + errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs."; |
1705 | + return FAILURE; |
1706 | + } |
1707 | + |
1708 | + AudioDeviceID id = deviceList[ device ]; |
1709 | + |
1710 | + // Setup for stream mode. |
1711 | + bool isInput = false; |
1712 | + if ( mode == INPUT ) { |
1713 | + isInput = true; |
1714 | + property.mScope = kAudioDevicePropertyScopeInput; |
1715 | + } |
1716 | + else |
1717 | + property.mScope = kAudioDevicePropertyScopeOutput; |
1718 | + |
1719 | + // Get the stream "configuration". |
1720 | + AudioBufferList *bufferList = nil; |
1721 | + dataSize = 0; |
1722 | + property.mSelector = kAudioDevicePropertyStreamConfiguration; |
1723 | + result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize ); |
1724 | + if ( result != noErr || dataSize == 0 ) { |
1725 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ")."; |
1726 | + errorText_ = errorStream_.str(); |
1727 | + return FAILURE; |
1728 | + } |
1729 | + |
1730 | + // Allocate the AudioBufferList. |
1731 | + bufferList = (AudioBufferList *) malloc( dataSize ); |
1732 | + if ( bufferList == NULL ) { |
1733 | + errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList."; |
1734 | + return FAILURE; |
1735 | + } |
1736 | + |
1737 | + result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList ); |
1738 | + if (result != noErr || dataSize == 0) { |
1739 | + free( bufferList ); |
1740 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ")."; |
1741 | + errorText_ = errorStream_.str(); |
1742 | + return FAILURE; |
1743 | + } |
1744 | + |
1745 | + // Search for one or more streams that contain the desired number of |
1746 | + // channels. CoreAudio devices can have an arbitrary number of |
1747 | + // streams and each stream can have an arbitrary number of channels. |
1748 | + // For each stream, a single buffer of interleaved samples is |
1749 | + // provided. RtAudio prefers the use of one stream of interleaved |
1750 | + // data or multiple consecutive single-channel streams. However, we |
1751 | + // now support multiple consecutive multi-channel streams of |
1752 | + // interleaved data as well. |
1753 | + UInt32 iStream, offsetCounter = firstChannel; |
1754 | + UInt32 nStreams = bufferList->mNumberBuffers; |
1755 | + bool monoMode = false; |
1756 | + bool foundStream = false; |
1757 | + |
1758 | + // First check that the device supports the requested number of |
1759 | + // channels. |
1760 | + UInt32 deviceChannels = 0; |
1761 | + for ( iStream=0; iStream<nStreams; iStream++ ) |
1762 | + deviceChannels += bufferList->mBuffers[iStream].mNumberChannels; |
1763 | + |
1764 | + if ( deviceChannels < ( channels + firstChannel ) ) { |
1765 | + free( bufferList ); |
1766 | + errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count."; |
1767 | + errorText_ = errorStream_.str(); |
1768 | + return FAILURE; |
1769 | + } |
1770 | + |
1771 | + // Look for a single stream meeting our needs. |
1772 | + UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0; |
1773 | + for ( iStream=0; iStream<nStreams; iStream++ ) { |
1774 | + streamChannels = bufferList->mBuffers[iStream].mNumberChannels; |
1775 | + if ( streamChannels >= channels + offsetCounter ) { |
1776 | + firstStream = iStream; |
1777 | + channelOffset = offsetCounter; |
1778 | + foundStream = true; |
1779 | + break; |
1780 | + } |
1781 | + if ( streamChannels > offsetCounter ) break; |
1782 | + offsetCounter -= streamChannels; |
1783 | + } |
1784 | + |
1785 | + // If we didn't find a single stream above, then we should be able |
1786 | + // to meet the channel specification with multiple streams. |
1787 | + if ( foundStream == false ) { |
1788 | + monoMode = true; |
1789 | + offsetCounter = firstChannel; |
1790 | + for ( iStream=0; iStream<nStreams; iStream++ ) { |
1791 | + streamChannels = bufferList->mBuffers[iStream].mNumberChannels; |
1792 | + if ( streamChannels > offsetCounter ) break; |
1793 | + offsetCounter -= streamChannels; |
1794 | + } |
1795 | + |
1796 | + firstStream = iStream; |
1797 | + channelOffset = offsetCounter; |
1798 | + Int32 channelCounter = channels + offsetCounter - streamChannels; |
1799 | + |
1800 | + if ( streamChannels > 1 ) monoMode = false; |
1801 | + while ( channelCounter > 0 ) { |
1802 | + streamChannels = bufferList->mBuffers[++iStream].mNumberChannels; |
1803 | + if ( streamChannels > 1 ) monoMode = false; |
1804 | + channelCounter -= streamChannels; |
1805 | + streamCount++; |
1806 | + } |
1807 | + } |
1808 | + |
1809 | + free( bufferList ); |
1810 | + |
1811 | + // Determine the buffer size. |
1812 | + AudioValueRange bufferRange; |
1813 | + dataSize = sizeof( AudioValueRange ); |
1814 | + property.mSelector = kAudioDevicePropertyBufferFrameSizeRange; |
1815 | + result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange ); |
1816 | + |
1817 | + if ( result != noErr ) { |
1818 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ")."; |
1819 | + errorText_ = errorStream_.str(); |
1820 | + return FAILURE; |
1821 | + } |
1822 | + |
1823 | + if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum; |
1824 | + else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum; |
1825 | + if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum; |
1826 | + |
1827 | + // Set the buffer size. For multiple streams, I'm assuming we only |
1828 | + // need to make this setting for the master channel. |
1829 | + UInt32 theSize = (UInt32) *bufferSize; |
1830 | + dataSize = sizeof( UInt32 ); |
1831 | + property.mSelector = kAudioDevicePropertyBufferFrameSize; |
1832 | + result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize ); |
1833 | + |
1834 | + if ( result != noErr ) { |
1835 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ")."; |
1836 | + errorText_ = errorStream_.str(); |
1837 | + return FAILURE; |
1838 | + } |
1839 | + |
1840 | + // If attempting to setup a duplex stream, the bufferSize parameter |
1841 | + // MUST be the same in both directions! |
1842 | + *bufferSize = theSize; |
1843 | + if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) { |
1844 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ")."; |
1845 | + errorText_ = errorStream_.str(); |
1846 | + return FAILURE; |
1847 | + } |
1848 | + |
1849 | + stream_.bufferSize = *bufferSize; |
1850 | + stream_.nBuffers = 1; |
1851 | + |
1852 | + // Try to set "hog" mode ... it's not clear to me this is working. |
1853 | + if ( options && options->flags & RTAUDIO_HOG_DEVICE ) { |
1854 | + pid_t hog_pid; |
1855 | + dataSize = sizeof( hog_pid ); |
1856 | + property.mSelector = kAudioDevicePropertyHogMode; |
1857 | + result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid ); |
1858 | + if ( result != noErr ) { |
1859 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!"; |
1860 | + errorText_ = errorStream_.str(); |
1861 | + return FAILURE; |
1862 | + } |
1863 | + |
1864 | + if ( hog_pid != getpid() ) { |
1865 | + hog_pid = getpid(); |
1866 | + result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid ); |
1867 | + if ( result != noErr ) { |
1868 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!"; |
1869 | + errorText_ = errorStream_.str(); |
1870 | + return FAILURE; |
1871 | + } |
1872 | + } |
1873 | + } |
1874 | + |
1875 | + // Check and if necessary, change the sample rate for the device. |
1876 | + Float64 nominalRate; |
1877 | + dataSize = sizeof( Float64 ); |
1878 | + property.mSelector = kAudioDevicePropertyNominalSampleRate; |
1879 | + result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate ); |
1880 | + if ( result != noErr ) { |
1881 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate."; |
1882 | + errorText_ = errorStream_.str(); |
1883 | + return FAILURE; |
1884 | + } |
1885 | + |
1886 | + // Only change the sample rate if off by more than 1 Hz. |
1887 | + if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) { |
1888 | + |
1889 | + // Set a property listener for the sample rate change |
1890 | + Float64 reportedRate = 0.0; |
1891 | + AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; |
1892 | + result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate ); |
1893 | + if ( result != noErr ) { |
1894 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ")."; |
1895 | + errorText_ = errorStream_.str(); |
1896 | + return FAILURE; |
1897 | + } |
1898 | + |
1899 | + nominalRate = (Float64) sampleRate; |
1900 | + result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate ); |
1901 | + if ( result != noErr ) { |
1902 | + AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate ); |
1903 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ")."; |
1904 | + errorText_ = errorStream_.str(); |
1905 | + return FAILURE; |
1906 | + } |
1907 | + |
1908 | + // Now wait until the reported nominal rate is what we just set. |
1909 | + UInt32 microCounter = 0; |
1910 | + while ( reportedRate != nominalRate ) { |
1911 | + microCounter += 5000; |
1912 | + if ( microCounter > 5000000 ) break; |
1913 | + usleep( 5000 ); |
1914 | + } |
1915 | + |
1916 | + // Remove the property listener. |
1917 | + AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate ); |
1918 | + |
1919 | + if ( microCounter > 5000000 ) { |
1920 | + errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ")."; |
1921 | + errorText_ = errorStream_.str(); |
1922 | + return FAILURE; |
1923 | + } |
1924 | + } |
1925 | + |
1926 | + // Now set the stream format for all streams. Also, check the |
1927 | + // physical format of the device and change that if necessary. |
1928 | + AudioStreamBasicDescription description; |
1929 | + dataSize = sizeof( AudioStreamBasicDescription ); |
1930 | + property.mSelector = kAudioStreamPropertyVirtualFormat; |
1931 | + result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description ); |
1932 | + if ( result != noErr ) { |
1933 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ")."; |
1934 | + errorText_ = errorStream_.str(); |
1935 | + return FAILURE; |
1936 | + } |
1937 | + |
1938 | + // Set the sample rate and data format id. However, only make the |
1939 | + // change if the sample rate is not within 1.0 of the desired |
1940 | + // rate and the format is not linear pcm. |
1941 | + bool updateFormat = false; |
1942 | + if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) { |
1943 | + description.mSampleRate = (Float64) sampleRate; |
1944 | + updateFormat = true; |
1945 | + } |
1946 | + |
1947 | + if ( description.mFormatID != kAudioFormatLinearPCM ) { |
1948 | + description.mFormatID = kAudioFormatLinearPCM; |
1949 | + updateFormat = true; |
1950 | + } |
1951 | + |
1952 | + if ( updateFormat ) { |
1953 | + result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description ); |
1954 | + if ( result != noErr ) { |
1955 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ")."; |
1956 | + errorText_ = errorStream_.str(); |
1957 | + return FAILURE; |
1958 | + } |
1959 | + } |
1960 | + |
1961 | + // Now check the physical format. |
1962 | + property.mSelector = kAudioStreamPropertyPhysicalFormat; |
1963 | + result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description ); |
1964 | + if ( result != noErr ) { |
1965 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ")."; |
1966 | + errorText_ = errorStream_.str(); |
1967 | + return FAILURE; |
1968 | + } |
1969 | + |
1970 | + //std::cout << "Current physical stream format:" << std::endl; |
1971 | + //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl; |
1972 | + //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl; |
1973 | + //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl; |
1974 | + //std::cout << " sample rate = " << description.mSampleRate << std::endl; |
1975 | + |
1976 | + if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) { |
1977 | + description.mFormatID = kAudioFormatLinearPCM; |
1978 | + //description.mSampleRate = (Float64) sampleRate; |
1979 | + AudioStreamBasicDescription testDescription = description; |
1980 | + UInt32 formatFlags; |
1981 | + |
1982 | + // We'll try higher bit rates first and then work our way down. |
1983 | + std::vector< std::pair<UInt32, UInt32> > physicalFormats; |
1984 | + formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger; |
1985 | + physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) ); |
1986 | + formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat; |
1987 | + physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) ); |
1988 | + physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed |
1989 | + formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh ); |
1990 | + physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low |
1991 | + formatFlags |= kAudioFormatFlagIsAlignedHigh; |
1992 | + physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high |
1993 | + formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat; |
1994 | + physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) ); |
1995 | + physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) ); |
1996 | + |
1997 | + bool setPhysicalFormat = false; |
1998 | + for( unsigned int i=0; i<physicalFormats.size(); i++ ) { |
1999 | + testDescription = description; |
2000 | + testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first; |
2001 | + testDescription.mFormatFlags = physicalFormats[i].second; |
2002 | + if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) ) |
2003 | + testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame; |
2004 | + else |
2005 | + testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame; |
2006 | + testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket; |
2007 | + result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription ); |
2008 | + if ( result == noErr ) { |
2009 | + setPhysicalFormat = true; |
2010 | + //std::cout << "Updated physical stream format:" << std::endl; |
2011 | + //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl; |
2012 | + //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl; |
2013 | + //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl; |
2014 | + //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl; |
2015 | + break; |
2016 | + } |
2017 | + } |
2018 | + |
2019 | + if ( !setPhysicalFormat ) { |
2020 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ")."; |
2021 | + errorText_ = errorStream_.str(); |
2022 | + return FAILURE; |
2023 | + } |
2024 | + } // done setting virtual/physical formats. |
2025 | + |
2026 | + // Get the stream / device latency. |
2027 | + UInt32 latency; |
2028 | + dataSize = sizeof( UInt32 ); |
2029 | + property.mSelector = kAudioDevicePropertyLatency; |
2030 | + if ( AudioObjectHasProperty( id, &property ) == true ) { |
2031 | + result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency ); |
2032 | + if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency; |
2033 | + else { |
2034 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ")."; |
2035 | + errorText_ = errorStream_.str(); |
2036 | + error( RtAudioError::WARNING ); |
2037 | + } |
2038 | + } |
2039 | + |
2040 | + // Byte-swapping: According to AudioHardware.h, the stream data will |
2041 | + // always be presented in native-endian format, so we should never |
2042 | + // need to byte swap. |
2043 | + stream_.doByteSwap[mode] = false; |
2044 | + |
2045 | + // From the CoreAudio documentation, PCM data must be supplied as |
2046 | + // 32-bit floats. |
2047 | + stream_.userFormat = format; |
2048 | + stream_.deviceFormat[mode] = RTAUDIO_FLOAT32; |
2049 | + |
2050 | + if ( streamCount == 1 ) |
2051 | + stream_.nDeviceChannels[mode] = description.mChannelsPerFrame; |
2052 | + else // multiple streams |
2053 | + stream_.nDeviceChannels[mode] = channels; |
2054 | + stream_.nUserChannels[mode] = channels; |
2055 | + stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream |
2056 | + if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false; |
2057 | + else stream_.userInterleaved = true; |
2058 | + stream_.deviceInterleaved[mode] = true; |
2059 | + if ( monoMode == true ) stream_.deviceInterleaved[mode] = false; |
2060 | + |
2061 | + // Set flags for buffer conversion. |
2062 | + stream_.doConvertBuffer[mode] = false; |
2063 | + if ( stream_.userFormat != stream_.deviceFormat[mode] ) |
2064 | + stream_.doConvertBuffer[mode] = true; |
2065 | + if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] ) |
2066 | + stream_.doConvertBuffer[mode] = true; |
2067 | + if ( streamCount == 1 ) { |
2068 | + if ( stream_.nUserChannels[mode] > 1 && |
2069 | + stream_.userInterleaved != stream_.deviceInterleaved[mode] ) |
2070 | + stream_.doConvertBuffer[mode] = true; |
2071 | + } |
2072 | + else if ( monoMode && stream_.userInterleaved ) |
2073 | + stream_.doConvertBuffer[mode] = true; |
2074 | + |
2075 | + // Allocate our CoreHandle structure for the stream. |
2076 | + CoreHandle *handle = 0; |
2077 | + if ( stream_.apiHandle == 0 ) { |
2078 | + try { |
2079 | + handle = new CoreHandle; |
2080 | + } |
2081 | + catch ( std::bad_alloc& ) { |
2082 | + errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory."; |
2083 | + goto error; |
2084 | + } |
2085 | + |
2086 | + if ( pthread_cond_init( &handle->condition, NULL ) ) { |
2087 | + errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable."; |
2088 | + goto error; |
2089 | + } |
2090 | + stream_.apiHandle = (void *) handle; |
2091 | + } |
2092 | + else |
2093 | + handle = (CoreHandle *) stream_.apiHandle; |
2094 | + handle->iStream[mode] = firstStream; |
2095 | + handle->nStreams[mode] = streamCount; |
2096 | + handle->id[mode] = id; |
2097 | + |
2098 | + // Allocate necessary internal buffers. |
2099 | + unsigned long bufferBytes; |
2100 | + bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat ); |
2101 | + // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 ); |
2102 | + stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) ); |
2103 | + memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) ); |
2104 | + if ( stream_.userBuffer[mode] == NULL ) { |
2105 | + errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory."; |
2106 | + goto error; |
2107 | + } |
2108 | + |
2109 | + // If possible, we will make use of the CoreAudio stream buffers as |
2110 | + // "device buffers". However, we can't do this if using multiple |
2111 | + // streams. |
2112 | + if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) { |
2113 | + |
2114 | + bool makeBuffer = true; |
2115 | + bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] ); |
2116 | + if ( mode == INPUT ) { |
2117 | + if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) { |
2118 | + unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] ); |
2119 | + if ( bufferBytes <= bytesOut ) makeBuffer = false; |
2120 | + } |
2121 | + } |
2122 | + |
2123 | + if ( makeBuffer ) { |
2124 | + bufferBytes *= *bufferSize; |
2125 | + if ( stream_.deviceBuffer ) free( stream_.deviceBuffer ); |
2126 | + stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 ); |
2127 | + if ( stream_.deviceBuffer == NULL ) { |
2128 | + errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory."; |
2129 | + goto error; |
2130 | + } |
2131 | + } |
2132 | + } |
2133 | + |
2134 | + stream_.sampleRate = sampleRate; |
2135 | + stream_.device[mode] = device; |
2136 | + stream_.state = STREAM_STOPPED; |
2137 | + stream_.callbackInfo.object = (void *) this; |
2138 | + |
2139 | + // Setup the buffer conversion information structure. |
2140 | + if ( stream_.doConvertBuffer[mode] ) { |
2141 | + if ( streamCount > 1 ) setConvertInfo( mode, 0 ); |
2142 | + else setConvertInfo( mode, channelOffset ); |
2143 | + } |
2144 | + |
2145 | + if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device ) |
2146 | + // Only one callback procedure per device. |
2147 | + stream_.mode = DUPLEX; |
2148 | + else { |
2149 | +#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 ) |
2150 | + result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] ); |
2151 | +#else |
2152 | + // deprecated in favor of AudioDeviceCreateIOProcID() |
2153 | + result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo ); |
2154 | +#endif |
2155 | + if ( result != noErr ) { |
2156 | + errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ")."; |
2157 | + errorText_ = errorStream_.str(); |
2158 | + goto error; |
2159 | + } |
2160 | + if ( stream_.mode == OUTPUT && mode == INPUT ) |
2161 | + stream_.mode = DUPLEX; |
2162 | + else |
2163 | + stream_.mode = mode; |
2164 | + } |
2165 | + |
2166 | + // Setup the device property listener for over/underload. |
2167 | + property.mSelector = kAudioDeviceProcessorOverload; |
2168 | + property.mScope = kAudioObjectPropertyScopeGlobal; |
2169 | + result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle ); |
2170 | + |
2171 | + return SUCCESS; |
2172 | + |
2173 | + error: |
2174 | + if ( handle ) { |
2175 | + pthread_cond_destroy( &handle->condition ); |
2176 | + delete handle; |
2177 | + stream_.apiHandle = 0; |
2178 | + } |
2179 | + |
2180 | + for ( int i=0; i<2; i++ ) { |
2181 | + if ( stream_.userBuffer[i] ) { |
2182 | + free( stream_.userBuffer[i] ); |
2183 | + stream_.userBuffer[i] = 0; |
2184 | + } |
2185 | + } |
2186 | + |
2187 | + if ( stream_.deviceBuffer ) { |
2188 | + free( stream_.deviceBuffer ); |
2189 | + stream_.deviceBuffer = 0; |
2190 | + } |
2191 | + |
2192 | + stream_.state = STREAM_CLOSED; |
2193 | + return FAILURE; |
2194 | +} |
2195 | + |
2196 | +void RtApiCore :: closeStream( void ) |
2197 | +{ |
2198 | + if ( stream_.state == STREAM_CLOSED ) { |
2199 | + errorText_ = "RtApiCore::closeStream(): no open stream to close!"; |
2200 | + error( RtAudioError::WARNING ); |
2201 | + return; |
2202 | + } |
2203 | + |
2204 | + CoreHandle *handle = (CoreHandle *) stream_.apiHandle; |
2205 | + if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { |
2206 | + if ( stream_.state == STREAM_RUNNING ) |
2207 | + AudioDeviceStop( handle->id[0], callbackHandler ); |
2208 | +#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 ) |
2209 | + AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] ); |
2210 | +#else |
2211 | + // deprecated in favor of AudioDeviceDestroyIOProcID() |
2212 | + AudioDeviceRemoveIOProc( handle->id[0], callbackHandler ); |
2213 | +#endif |
2214 | + } |
2215 | + |
2216 | + if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) { |
2217 | + if ( stream_.state == STREAM_RUNNING ) |
2218 | + AudioDeviceStop( handle->id[1], callbackHandler ); |
2219 | +#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 ) |
2220 | + AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] ); |
2221 | +#else |
2222 | + // deprecated in favor of AudioDeviceDestroyIOProcID() |
2223 | + AudioDeviceRemoveIOProc( handle->id[1], callbackHandler ); |
2224 | +#endif |
2225 | + } |
2226 | + |
2227 | + for ( int i=0; i<2; i++ ) { |
2228 | + if ( stream_.userBuffer[i] ) { |
2229 | + free( stream_.userBuffer[i] ); |
2230 | + stream_.userBuffer[i] = 0; |
2231 | + } |
2232 | + } |
2233 | + |
2234 | + if ( stream_.deviceBuffer ) { |
2235 | + free( stream_.deviceBuffer ); |
2236 | + stream_.deviceBuffer = 0; |
2237 | + } |
2238 | + |
2239 | + // Destroy pthread condition variable. |
2240 | + pthread_cond_destroy( &handle->condition ); |
2241 | + delete handle; |
2242 | + stream_.apiHandle = 0; |
2243 | + |
2244 | + stream_.mode = UNINITIALIZED; |
2245 | + stream_.state = STREAM_CLOSED; |
2246 | +} |
2247 | + |
2248 | +void RtApiCore :: startStream( void ) |
2249 | +{ |
2250 | + verifyStream(); |
2251 | + if ( stream_.state == STREAM_RUNNING ) { |
2252 | + errorText_ = "RtApiCore::startStream(): the stream is already running!"; |
2253 | + error( RtAudioError::WARNING ); |
2254 | + return; |
2255 | + } |
2256 | + |
2257 | + OSStatus result = noErr; |
2258 | + CoreHandle *handle = (CoreHandle *) stream_.apiHandle; |
2259 | + if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { |
2260 | + |
2261 | + result = AudioDeviceStart( handle->id[0], callbackHandler ); |
2262 | + if ( result != noErr ) { |
2263 | + errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ")."; |
2264 | + errorText_ = errorStream_.str(); |
2265 | + goto unlock; |
2266 | + } |
2267 | + } |
2268 | + |
2269 | + if ( stream_.mode == INPUT || |
2270 | + ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) { |
2271 | + |
2272 | + result = AudioDeviceStart( handle->id[1], callbackHandler ); |
2273 | + if ( result != noErr ) { |
2274 | + errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ")."; |
2275 | + errorText_ = errorStream_.str(); |
2276 | + goto unlock; |
2277 | + } |
2278 | + } |
2279 | + |
2280 | + handle->drainCounter = 0; |
2281 | + handle->internalDrain = false; |
2282 | + stream_.state = STREAM_RUNNING; |
2283 | + |
2284 | + unlock: |
2285 | + if ( result == noErr ) return; |
2286 | + error( RtAudioError::SYSTEM_ERROR ); |
2287 | +} |
2288 | + |
2289 | +void RtApiCore :: stopStream( void ) |
2290 | +{ |
2291 | + verifyStream(); |
2292 | + if ( stream_.state == STREAM_STOPPED ) { |
2293 | + errorText_ = "RtApiCore::stopStream(): the stream is already stopped!"; |
2294 | + error( RtAudioError::WARNING ); |
2295 | + return; |
2296 | + } |
2297 | + |
2298 | + OSStatus result = noErr; |
2299 | + CoreHandle *handle = (CoreHandle *) stream_.apiHandle; |
2300 | + if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { |
2301 | + |
2302 | + if ( handle->drainCounter == 0 ) { |
2303 | + handle->drainCounter = 2; |
2304 | + pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled |
2305 | + } |
2306 | + |
2307 | + result = AudioDeviceStop( handle->id[0], callbackHandler ); |
2308 | + if ( result != noErr ) { |
2309 | + errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ")."; |
2310 | + errorText_ = errorStream_.str(); |
2311 | + goto unlock; |
2312 | + } |
2313 | + } |
2314 | + |
2315 | + if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) { |
2316 | + |
2317 | + result = AudioDeviceStop( handle->id[1], callbackHandler ); |
2318 | + if ( result != noErr ) { |
2319 | + errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ")."; |
2320 | + errorText_ = errorStream_.str(); |
2321 | + goto unlock; |
2322 | + } |
2323 | + } |
2324 | + |
2325 | + stream_.state = STREAM_STOPPED; |
2326 | + |
2327 | + unlock: |
2328 | + if ( result == noErr ) return; |
2329 | + error( RtAudioError::SYSTEM_ERROR ); |
2330 | +} |
2331 | + |
2332 | +void RtApiCore :: abortStream( void ) |
2333 | +{ |
2334 | + verifyStream(); |
2335 | + if ( stream_.state == STREAM_STOPPED ) { |
2336 | + errorText_ = "RtApiCore::abortStream(): the stream is already stopped!"; |
2337 | + error( RtAudioError::WARNING ); |
2338 | + return; |
2339 | + } |
2340 | + |
2341 | + CoreHandle *handle = (CoreHandle *) stream_.apiHandle; |
2342 | + handle->drainCounter = 2; |
2343 | + |
2344 | + stopStream(); |
2345 | +} |
2346 | + |
2347 | +// This function will be called by a spawned thread when the user |
2348 | +// callback function signals that the stream should be stopped or |
2349 | +// aborted. It is better to handle it this way because the |
2350 | +// callbackEvent() function probably should return before the AudioDeviceStop() |
2351 | +// function is called. |
2352 | +static void *coreStopStream( void *ptr ) |
2353 | +{ |
2354 | + CallbackInfo *info = (CallbackInfo *) ptr; |
2355 | + RtApiCore *object = (RtApiCore *) info->object; |
2356 | + |
2357 | + object->stopStream(); |
2358 | + pthread_exit( NULL ); |
2359 | +} |
2360 | + |
2361 | +bool RtApiCore :: callbackEvent( AudioDeviceID deviceId, |
2362 | + const AudioBufferList *inBufferList, |
2363 | + const AudioBufferList *outBufferList ) |
2364 | +{ |
2365 | + if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS; |
2366 | + if ( stream_.state == STREAM_CLOSED ) { |
2367 | + errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!"; |
2368 | + error( RtAudioError::WARNING ); |
2369 | + return FAILURE; |
2370 | + } |
2371 | + |
2372 | + CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo; |
2373 | + CoreHandle *handle = (CoreHandle *) stream_.apiHandle; |
2374 | + |
2375 | + // Check if we were draining the stream and signal is finished. |
2376 | + if ( handle->drainCounter > 3 ) { |
2377 | + ThreadHandle threadId; |
2378 | + |
2379 | + stream_.state = STREAM_STOPPING; |
2380 | + if ( handle->internalDrain == true ) |
2381 | + pthread_create( &threadId, NULL, coreStopStream, info ); |
2382 | + else // external call to stopStream() |
2383 | + pthread_cond_signal( &handle->condition ); |
2384 | + return SUCCESS; |
2385 | + } |
2386 | + |
2387 | + AudioDeviceID outputDevice = handle->id[0]; |
2388 | + |
2389 | + // Invoke user callback to get fresh output data UNLESS we are |
2390 | + // draining stream or duplex mode AND the input/output devices are |
2391 | + // different AND this function is called for the input device. |
2392 | + if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) { |
2393 | + RtAudioCallback callback = (RtAudioCallback) info->callback; |
2394 | + double streamTime = getStreamTime(); |
2395 | + RtAudioStreamStatus status = 0; |
2396 | + if ( stream_.mode != INPUT && handle->xrun[0] == true ) { |
2397 | + status |= RTAUDIO_OUTPUT_UNDERFLOW; |
2398 | + handle->xrun[0] = false; |
2399 | + } |
2400 | + if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) { |
2401 | + status |= RTAUDIO_INPUT_OVERFLOW; |
2402 | + handle->xrun[1] = false; |
2403 | + } |
2404 | + |
2405 | + int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1], |
2406 | + stream_.bufferSize, streamTime, status, info->userData ); |
2407 | + if ( cbReturnValue == 2 ) { |
2408 | + stream_.state = STREAM_STOPPING; |
2409 | + handle->drainCounter = 2; |
2410 | + abortStream(); |
2411 | + return SUCCESS; |
2412 | + } |
2413 | + else if ( cbReturnValue == 1 ) { |
2414 | + handle->drainCounter = 1; |
2415 | + handle->internalDrain = true; |
2416 | + } |
2417 | + } |
2418 | + |
2419 | + if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) { |
2420 | + |
2421 | + if ( handle->drainCounter > 1 ) { // write zeros to the output stream |
2422 | + |
2423 | + if ( handle->nStreams[0] == 1 ) { |
2424 | + memset( outBufferList->mBuffers[handle->iStream[0]].mData, |
2425 | + 0, |
2426 | + outBufferList->mBuffers[handle->iStream[0]].mDataByteSize ); |
2427 | + } |
2428 | + else { // fill multiple streams with zeros |
2429 | + for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) { |
2430 | + memset( outBufferList->mBuffers[handle->iStream[0]+i].mData, |
2431 | + 0, |
2432 | + outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize ); |
2433 | + } |
2434 | + } |
2435 | + } |
2436 | + else if ( handle->nStreams[0] == 1 ) { |
2437 | + if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer |
2438 | + convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData, |
2439 | + stream_.userBuffer[0], stream_.convertInfo[0] ); |
2440 | + } |
2441 | + else { // copy from user buffer |
2442 | + memcpy( outBufferList->mBuffers[handle->iStream[0]].mData, |
2443 | + stream_.userBuffer[0], |
2444 | + outBufferList->mBuffers[handle->iStream[0]].mDataByteSize ); |
2445 | + } |
2446 | + } |
2447 | + else { // fill multiple streams |
2448 | + Float32 *inBuffer = (Float32 *) stream_.userBuffer[0]; |
2449 | + if ( stream_.doConvertBuffer[0] ) { |
2450 | + convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] ); |
2451 | + inBuffer = (Float32 *) stream_.deviceBuffer; |
2452 | + } |
2453 | + |
2454 | + if ( stream_.deviceInterleaved[0] == false ) { // mono mode |
2455 | + UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize; |
2456 | + for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) { |
2457 | + memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData, |
2458 | + (void *)&inBuffer[i*stream_.bufferSize], bufferBytes ); |
2459 | + } |
2460 | + } |
2461 | + else { // fill multiple multi-channel streams with interleaved data |
2462 | + UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset; |
2463 | + Float32 *out, *in; |
2464 | + |
2465 | + bool inInterleaved = ( stream_.userInterleaved ) ? true : false; |
2466 | + UInt32 inChannels = stream_.nUserChannels[0]; |
2467 | + if ( stream_.doConvertBuffer[0] ) { |
2468 | + inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode |
2469 | + inChannels = stream_.nDeviceChannels[0]; |
2470 | + } |
2471 | + |
2472 | + if ( inInterleaved ) inOffset = 1; |
2473 | + else inOffset = stream_.bufferSize; |
2474 | + |
2475 | + channelsLeft = inChannels; |
2476 | + for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) { |
2477 | + in = inBuffer; |
2478 | + out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData; |
2479 | + streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels; |
2480 | + |
2481 | + outJump = 0; |
2482 | + // Account for possible channel offset in first stream |
2483 | + if ( i == 0 && stream_.channelOffset[0] > 0 ) { |
2484 | + streamChannels -= stream_.channelOffset[0]; |
2485 | + outJump = stream_.channelOffset[0]; |
2486 | + out += outJump; |
2487 | + } |
2488 | + |
2489 | + // Account for possible unfilled channels at end of the last stream |
2490 | + if ( streamChannels > channelsLeft ) { |
2491 | + outJump = streamChannels - channelsLeft; |
2492 | + streamChannels = channelsLeft; |
2493 | + } |
2494 | + |
2495 | + // Determine input buffer offsets and skips |
2496 | + if ( inInterleaved ) { |
2497 | + inJump = inChannels; |
2498 | + in += inChannels - channelsLeft; |
2499 | + } |
2500 | + else { |
2501 | + inJump = 1; |
2502 | + in += (inChannels - channelsLeft) * inOffset; |
2503 | + } |
2504 | + |
2505 | + for ( unsigned int i=0; i<stream_.bufferSize; i++ ) { |
2506 | + for ( unsigned int j=0; j<streamChannels; j++ ) { |
2507 | + *out++ = in[j*inOffset]; |
2508 | + } |
2509 | + out += outJump; |
2510 | + in += inJump; |
2511 | + } |
2512 | + channelsLeft -= streamChannels; |
2513 | + } |
2514 | + } |
2515 | + } |
2516 | + } |
2517 | + |
2518 | + // Don't bother draining input |
2519 | + if ( handle->drainCounter ) { |
2520 | + handle->drainCounter++; |
2521 | + goto unlock; |
2522 | + } |
2523 | + |
2524 | + AudioDeviceID inputDevice; |
2525 | + inputDevice = handle->id[1]; |
2526 | + if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) { |
2527 | + |
2528 | + if ( handle->nStreams[1] == 1 ) { |
2529 | + if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer |
2530 | + convertBuffer( stream_.userBuffer[1], |
2531 | + (char *) inBufferList->mBuffers[handle->iStream[1]].mData, |
2532 | + stream_.convertInfo[1] ); |
2533 | + } |
2534 | + else { // copy to user buffer |
2535 | + memcpy( stream_.userBuffer[1], |
2536 | + inBufferList->mBuffers[handle->iStream[1]].mData, |
2537 | + inBufferList->mBuffers[handle->iStream[1]].mDataByteSize ); |
2538 | + } |
2539 | + } |
2540 | + else { // read from multiple streams |
2541 | + Float32 *outBuffer = (Float32 *) stream_.userBuffer[1]; |
2542 | + if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer; |
2543 | + |
2544 | + if ( stream_.deviceInterleaved[1] == false ) { // mono mode |
2545 | + UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize; |
2546 | + for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) { |
2547 | + memcpy( (void *)&outBuffer[i*stream_.bufferSize], |
2548 | + inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes ); |
2549 | + } |
2550 | + } |
2551 | + else { // read from multiple multi-channel streams |
2552 | + UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset; |
2553 | + Float32 *out, *in; |
2554 | + |
2555 | + bool outInterleaved = ( stream_.userInterleaved ) ? true : false; |
2556 | + UInt32 outChannels = stream_.nUserChannels[1]; |
2557 | + if ( stream_.doConvertBuffer[1] ) { |
2558 | + outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode |
2559 | + outChannels = stream_.nDeviceChannels[1]; |
2560 | + } |
2561 | + |
2562 | + if ( outInterleaved ) outOffset = 1; |
2563 | + else outOffset = stream_.bufferSize; |
2564 | + |
2565 | + channelsLeft = outChannels; |
2566 | + for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) { |
2567 | + out = outBuffer; |
2568 | + in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData; |
2569 | + streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels; |
2570 | + |
2571 | + inJump = 0; |
2572 | + // Account for possible channel offset in first stream |
2573 | + if ( i == 0 && stream_.channelOffset[1] > 0 ) { |
2574 | + streamChannels -= stream_.channelOffset[1]; |
2575 | + inJump = stream_.channelOffset[1]; |
2576 | + in += inJump; |
2577 | + } |
2578 | + |
2579 | + // Account for possible unread channels at end of the last stream |
2580 | + if ( streamChannels > channelsLeft ) { |
2581 | + inJump = streamChannels - channelsLeft; |
2582 | + streamChannels = channelsLeft; |
2583 | + } |
2584 | + |
2585 | + // Determine output buffer offsets and skips |
2586 | + if ( outInterleaved ) { |
2587 | + outJump = outChannels; |
2588 | + out += outChannels - channelsLeft; |
2589 | + } |
2590 | + else { |
2591 | + outJump = 1; |
2592 | + out += (outChannels - channelsLeft) * outOffset; |
2593 | + } |
2594 | + |
2595 | + for ( unsigned int i=0; i<stream_.bufferSize; i++ ) { |
2596 | + for ( unsigned int j=0; j<streamChannels; j++ ) { |
2597 | + out[j*outOffset] = *in++; |
2598 | + } |
2599 | + out += outJump; |
2600 | + in += inJump; |
2601 | + } |
2602 | + channelsLeft -= streamChannels; |
2603 | + } |
2604 | + } |
2605 | + |
2606 | + if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer |
2607 | + convertBuffer( stream_.userBuffer[1], |
2608 | + stream_.deviceBuffer, |
2609 | + stream_.convertInfo[1] ); |
2610 | + } |
2611 | + } |
2612 | + } |
2613 | + |
2614 | + unlock: |
2615 | + //MUTEX_UNLOCK( &stream_.mutex ); |
2616 | + |
2617 | + RtApi::tickStreamTime(); |
2618 | + return SUCCESS; |
2619 | +} |
2620 | + |
2621 | +const char* RtApiCore :: getErrorCode( OSStatus code ) |
2622 | +{ |
2623 | + switch( code ) { |
2624 | + |
2625 | + case kAudioHardwareNotRunningError: |
2626 | + return "kAudioHardwareNotRunningError"; |
2627 | + |
2628 | + case kAudioHardwareUnspecifiedError: |
2629 | + return "kAudioHardwareUnspecifiedError"; |
2630 | + |
2631 | + case kAudioHardwareUnknownPropertyError: |
2632 | + return "kAudioHardwareUnknownPropertyError"; |
2633 | + |
2634 | + case kAudioHardwareBadPropertySizeError: |
2635 | + return "kAudioHardwareBadPropertySizeError"; |
2636 | + |
2637 | + case kAudioHardwareIllegalOperationError: |
2638 | + return "kAudioHardwareIllegalOperationError"; |
2639 | + |
2640 | + case kAudioHardwareBadObjectError: |
2641 | + return "kAudioHardwareBadObjectError"; |
2642 | + |
2643 | + case kAudioHardwareBadDeviceError: |
2644 | + return "kAudioHardwareBadDeviceError"; |
2645 | + |
2646 | + case kAudioHardwareBadStreamError: |
2647 | + return "kAudioHardwareBadStreamError"; |
2648 | + |
2649 | + case kAudioHardwareUnsupportedOperationError: |
2650 | + return "kAudioHardwareUnsupportedOperationError"; |
2651 | + |
2652 | + case kAudioDeviceUnsupportedFormatError: |
2653 | + return "kAudioDeviceUnsupportedFormatError"; |
2654 | + |
2655 | + case kAudioDevicePermissionsError: |
2656 | + return "kAudioDevicePermissionsError"; |
2657 | + |
2658 | + default: |
2659 | + return "CoreAudio unknown error"; |
2660 | + } |
2661 | +} |
2662 | + |
2663 | + //******************** End of __MACOSX_CORE__ *********************// |
2664 | +#endif |
2665 | + |
2666 | +#if defined(__UNIX_JACK__) |
2667 | + |
2668 | +// JACK is a low-latency audio server, originally written for the |
2669 | +// GNU/Linux operating system and now also ported to OS-X. It can |
2670 | +// connect a number of different applications to an audio device, as |
2671 | +// well as allowing them to share audio between themselves. |
2672 | +// |
2673 | +// When using JACK with RtAudio, "devices" refer to JACK clients that |
2674 | +// have ports connected to the server. The JACK server is typically |
2675 | +// started in a terminal as follows: |
2676 | +// |
2677 | +// .jackd -d alsa -d hw:0 |
2678 | +// |
2679 | +// or through an interface program such as qjackctl. Many of the |
2680 | +// parameters normally set for a stream are fixed by the JACK server |
2681 | +// and can be specified when the JACK server is started. In |
2682 | +// particular, |
2683 | +// |
2684 | +// .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4 |
2685 | +// |
2686 | +// specifies a sample rate of 44100 Hz, a buffer size of 512 sample |
2687 | +// frames, and number of buffers = 4. Once the server is running, it |
2688 | +// is not possible to override these values. If the values are not |
2689 | +// specified in the command-line, the JACK server uses default values. |
2690 | +// |
2691 | +// The JACK server does not have to be running when an instance of |
2692 | +// RtApiJack is created, though the function getDeviceCount() will |
2693 | +// report 0 devices found until JACK has been started. When no |
2694 | +// devices are available (i.e., the JACK server is not running), a |
2695 | +// stream cannot be opened. |
2696 | + |
2697 | +#include <jack/jack.h> |
2698 | +#include <unistd.h> |
2699 | +#include <cstdio> |
2700 | + |
2701 | +// A structure to hold various information related to the Jack API |
2702 | +// implementation. |
2703 | +struct JackHandle { |
2704 | + jack_client_t *client; |
2705 | + jack_port_t **ports[2]; |
2706 | + std::string deviceName[2]; |
2707 | + bool xrun[2]; |
2708 | + pthread_cond_t condition; |
2709 | + int drainCounter; // Tracks callback counts when draining |
2710 | + bool internalDrain; // Indicates if stop is initiated from callback or not. |
2711 | + |
2712 | + JackHandle() |
2713 | + :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; } |
2714 | +}; |
2715 | + |
2716 | +static void jackSilentError( const char * ) {}; |
2717 | + |
2718 | +RtApiJack :: RtApiJack() |
2719 | +{ |
2720 | + // Nothing to do here. |
2721 | +#if !defined(__RTAUDIO_DEBUG__) |
2722 | + // Turn off Jack's internal error reporting. |
2723 | + jack_set_error_function( &jackSilentError ); |
2724 | +#endif |
2725 | +} |
2726 | + |
2727 | +RtApiJack :: ~RtApiJack() |
2728 | +{ |
2729 | + if ( stream_.state != STREAM_CLOSED ) closeStream(); |
2730 | +} |
2731 | + |
2732 | +unsigned int RtApiJack :: getDeviceCount( void ) |
2733 | +{ |
2734 | + // See if we can become a jack client. |
2735 | + jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption; |
2736 | + jack_status_t *status = NULL; |
2737 | + jack_client_t *client = jack_client_open( "RtApiJackCount", options, status ); |
2738 | + if ( client == 0 ) return 0; |
2739 | + |
2740 | + const char **ports; |
2741 | + std::string port, previousPort; |
2742 | + unsigned int nChannels = 0, nDevices = 0; |
2743 | + ports = jack_get_ports( client, NULL, NULL, 0 ); |
2744 | + if ( ports ) { |
2745 | + // Parse the port names up to the first colon (:). |
2746 | + size_t iColon = 0; |
2747 | + do { |
2748 | + port = (char *) ports[ nChannels ]; |
2749 | + iColon = port.find(":"); |
2750 | + if ( iColon != std::string::npos ) { |
2751 | + port = port.substr( 0, iColon + 1 ); |
2752 | + if ( port != previousPort ) { |
2753 | + nDevices++; |
2754 | + previousPort = port; |
2755 | + } |
2756 | + } |
2757 | + } while ( ports[++nChannels] ); |
2758 | + free( ports ); |
2759 | + } |
2760 | + |
2761 | + jack_client_close( client ); |
2762 | + return nDevices; |
2763 | +} |
2764 | + |
2765 | +RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device ) |
2766 | +{ |
2767 | + RtAudio::DeviceInfo info; |
2768 | + info.probed = false; |
2769 | + |
2770 | + jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption |
2771 | + jack_status_t *status = NULL; |
2772 | + jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status ); |
2773 | + if ( client == 0 ) { |
2774 | + errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!"; |
2775 | + error( RtAudioError::WARNING ); |
2776 | + return info; |
2777 | + } |
2778 | + |
2779 | + const char **ports; |
2780 | + std::string port, previousPort; |
2781 | + unsigned int nPorts = 0, nDevices = 0; |
2782 | + ports = jack_get_ports( client, NULL, NULL, 0 ); |
2783 | + if ( ports ) { |
2784 | + // Parse the port names up to the first colon (:). |
2785 | + size_t iColon = 0; |
2786 | + do { |
2787 | + port = (char *) ports[ nPorts ]; |
2788 | + iColon = port.find(":"); |
2789 | + if ( iColon != std::string::npos ) { |
2790 | + port = port.substr( 0, iColon ); |
2791 | + if ( port != previousPort ) { |
2792 | + if ( nDevices == device ) info.name = port; |
2793 | + nDevices++; |
2794 | + previousPort = port; |
2795 | + } |
2796 | + } |
2797 | + } while ( ports[++nPorts] ); |
2798 | + free( ports ); |
2799 | + } |
2800 | + |
2801 | + if ( device >= nDevices ) { |
2802 | + jack_client_close( client ); |
2803 | + errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!"; |
2804 | + error( RtAudioError::INVALID_USE ); |
2805 | + return info; |
2806 | + } |
2807 | + |
2808 | + // Get the current jack server sample rate. |
2809 | + info.sampleRates.clear(); |
2810 | + info.sampleRates.push_back( jack_get_sample_rate( client ) ); |
2811 | + |
2812 | + // Count the available ports containing the client name as device |
2813 | + // channels. Jack "input ports" equal RtAudio output channels. |
2814 | + unsigned int nChannels = 0; |
2815 | + ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput ); |
2816 | + if ( ports ) { |
2817 | + while ( ports[ nChannels ] ) nChannels++; |
2818 | + free( ports ); |
2819 | + info.outputChannels = nChannels; |
2820 | + } |
2821 | + |
2822 | + // Jack "output ports" equal RtAudio input channels. |
2823 | + nChannels = 0; |
2824 | + ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput ); |
2825 | + if ( ports ) { |
2826 | + while ( ports[ nChannels ] ) nChannels++; |
2827 | + free( ports ); |
2828 | + info.inputChannels = nChannels; |
2829 | + } |
2830 | + |
2831 | + if ( info.outputChannels == 0 && info.inputChannels == 0 ) { |
2832 | + jack_client_close(client); |
2833 | + errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!"; |
2834 | + error( RtAudioError::WARNING ); |
2835 | + return info; |
2836 | + } |
2837 | + |
2838 | + // If device opens for both playback and capture, we determine the channels. |
2839 | + if ( info.outputChannels > 0 && info.inputChannels > 0 ) |
2840 | + info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels; |
2841 | + |
2842 | + // Jack always uses 32-bit floats. |
2843 | + info.nativeFormats = RTAUDIO_FLOAT32; |
2844 | + |
2845 | + // Jack doesn't provide default devices so we'll use the first available one. |
2846 | + if ( device == 0 && info.outputChannels > 0 ) |
2847 | + info.isDefaultOutput = true; |
2848 | + if ( device == 0 && info.inputChannels > 0 ) |
2849 | + info.isDefaultInput = true; |
2850 | + |
2851 | + jack_client_close(client); |
2852 | + info.probed = true; |
2853 | + return info; |
2854 | +} |
2855 | + |
2856 | +static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer ) |
2857 | +{ |
2858 | + CallbackInfo *info = (CallbackInfo *) infoPointer; |
2859 | + |
2860 | + RtApiJack *object = (RtApiJack *) info->object; |
2861 | + if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1; |
2862 | + |
2863 | + return 0; |
2864 | +} |
2865 | + |
2866 | +// This function will be called by a spawned thread when the Jack |
2867 | +// server signals that it is shutting down. It is necessary to handle |
2868 | +// it this way because the jackShutdown() function must return before |
2869 | +// the jack_deactivate() function (in closeStream()) will return. |
2870 | +static void *jackCloseStream( void *ptr ) |
2871 | +{ |
2872 | + CallbackInfo *info = (CallbackInfo *) ptr; |
2873 | + RtApiJack *object = (RtApiJack *) info->object; |
2874 | + |
2875 | + object->closeStream(); |
2876 | + |
2877 | + pthread_exit( NULL ); |
2878 | +} |
2879 | +static void jackShutdown( void *infoPointer ) |
2880 | +{ |
2881 | + CallbackInfo *info = (CallbackInfo *) infoPointer; |
2882 | + RtApiJack *object = (RtApiJack *) info->object; |
2883 | + |
2884 | + // Check current stream state. If stopped, then we'll assume this |
2885 | + // was called as a result of a call to RtApiJack::stopStream (the |
2886 | + // deactivation of a client handle causes this function to be called). |
2887 | + // If not, we'll assume the Jack server is shutting down or some |
2888 | + // other problem occurred and we should close the stream. |
2889 | + if ( object->isStreamRunning() == false ) return; |
2890 | + |
2891 | + ThreadHandle threadId; |
2892 | + pthread_create( &threadId, NULL, jackCloseStream, info ); |
2893 | + std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl; |
2894 | +} |
2895 | + |
2896 | +static int jackXrun( void *infoPointer ) |
2897 | +{ |
2898 | + JackHandle *handle = (JackHandle *) infoPointer; |
2899 | + |
2900 | + if ( handle->ports[0] ) handle->xrun[0] = true; |
2901 | + if ( handle->ports[1] ) handle->xrun[1] = true; |
2902 | + |
2903 | + return 0; |
2904 | +} |
2905 | + |
2906 | +bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels, |
2907 | + unsigned int firstChannel, unsigned int sampleRate, |
2908 | + RtAudioFormat format, unsigned int *bufferSize, |
2909 | + RtAudio::StreamOptions *options ) |
2910 | +{ |
2911 | + JackHandle *handle = (JackHandle *) stream_.apiHandle; |
2912 | + |
2913 | + // Look for jack server and try to become a client (only do once per stream). |
2914 | + jack_client_t *client = 0; |
2915 | + if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) { |
2916 | + jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption; |
2917 | + jack_status_t *status = NULL; |
2918 | + if ( options && !options->streamName.empty() ) |
2919 | + client = jack_client_open( options->streamName.c_str(), jackoptions, status ); |
2920 | + else |
2921 | + client = jack_client_open( "RtApiJack", jackoptions, status ); |
2922 | + if ( client == 0 ) { |
2923 | + errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!"; |
2924 | + error( RtAudioError::WARNING ); |
2925 | + return FAILURE; |
2926 | + } |
2927 | + } |
2928 | + else { |
2929 | + // The handle must have been created on an earlier pass. |
2930 | + client = handle->client; |
2931 | + } |
2932 | + |
2933 | + const char **ports; |
2934 | + std::string port, previousPort, deviceName; |
2935 | + unsigned int nPorts = 0, nDevices = 0; |
2936 | + ports = jack_get_ports( client, NULL, NULL, 0 ); |
2937 | + if ( ports ) { |
2938 | + // Parse the port names up to the first colon (:). |
2939 | + size_t iColon = 0; |
2940 | + do { |
2941 | + port = (char *) ports[ nPorts ]; |
2942 | + iColon = port.find(":"); |
2943 | + if ( iColon != std::string::npos ) { |
2944 | + port = port.substr( 0, iColon ); |
2945 | + if ( port != previousPort ) { |
2946 | + if ( nDevices == device ) deviceName = port; |
2947 | + nDevices++; |
2948 | + previousPort = port; |
2949 | + } |
2950 | + } |
2951 | + } while ( ports[++nPorts] ); |
2952 | + free( ports ); |
2953 | + } |
2954 | + |
2955 | + if ( device >= nDevices ) { |
2956 | + errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!"; |
2957 | + return FAILURE; |
2958 | + } |
2959 | + |
2960 | + // Count the available ports containing the client name as device |
2961 | + // channels. Jack "input ports" equal RtAudio output channels. |
2962 | + unsigned int nChannels = 0; |
2963 | + unsigned long flag = JackPortIsInput; |
2964 | + if ( mode == INPUT ) flag = JackPortIsOutput; |
2965 | + ports = jack_get_ports( client, deviceName.c_str(), NULL, flag ); |
2966 | + if ( ports ) { |
2967 | + while ( ports[ nChannels ] ) nChannels++; |
2968 | + free( ports ); |
2969 | + } |
2970 | + |
2971 | + // Compare the jack ports for specified client to the requested number of channels. |
2972 | + if ( nChannels < (channels + firstChannel) ) { |
2973 | + errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ")."; |
2974 | + errorText_ = errorStream_.str(); |
2975 | + return FAILURE; |
2976 | + } |
2977 | + |
2978 | + // Check the jack server sample rate. |
2979 | + unsigned int jackRate = jack_get_sample_rate( client ); |
2980 | + if ( sampleRate != jackRate ) { |
2981 | + jack_client_close( client ); |
2982 | + errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ")."; |
2983 | + errorText_ = errorStream_.str(); |
2984 | + return FAILURE; |
2985 | + } |
2986 | + stream_.sampleRate = jackRate; |
2987 | + |
2988 | + // Get the latency of the JACK port. |
2989 | + ports = jack_get_ports( client, deviceName.c_str(), NULL, flag ); |
2990 | + if ( ports[ firstChannel ] ) { |
2991 | + // Added by Ge Wang |
2992 | + jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency); |
2993 | + // the range (usually the min and max are equal) |
2994 | + jack_latency_range_t latrange; latrange.min = latrange.max = 0; |
2995 | + // get the latency range |
2996 | + jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange ); |
2997 | + // be optimistic, use the min! |
2998 | + stream_.latency[mode] = latrange.min; |
2999 | + //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) ); |
3000 | + } |
3001 | + free( ports ); |
3002 | + |
3003 | + // The jack server always uses 32-bit floating-point data. |
3004 | + stream_.deviceFormat[mode] = RTAUDIO_FLOAT32; |
3005 | + stream_.userFormat = format; |
3006 | + |
3007 | + if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false; |
3008 | + else stream_.userInterleaved = true; |
3009 | + |
3010 | + // Jack always uses non-interleaved buffers. |
3011 | + stream_.deviceInterleaved[mode] = false; |
3012 | + |
3013 | + // Jack always provides host byte-ordered data. |
3014 | + stream_.doByteSwap[mode] = false; |
3015 | + |
3016 | + // Get the buffer size. The buffer size and number of buffers |
3017 | + // (periods) is set when the jack server is started. |
3018 | + stream_.bufferSize = (int) jack_get_buffer_size( client ); |
3019 | + *bufferSize = stream_.bufferSize; |
3020 | + |
3021 | + stream_.nDeviceChannels[mode] = channels; |
3022 | + stream_.nUserChannels[mode] = channels; |
3023 | + |
3024 | + // Set flags for buffer conversion. |
3025 | + stream_.doConvertBuffer[mode] = false; |
3026 | + if ( stream_.userFormat != stream_.deviceFormat[mode] ) |
3027 | + stream_.doConvertBuffer[mode] = true; |
3028 | + if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] && |
3029 | + stream_.nUserChannels[mode] > 1 ) |
3030 | + stream_.doConvertBuffer[mode] = true; |
3031 | + |
3032 | + // Allocate our JackHandle structure for the stream. |
3033 | + if ( handle == 0 ) { |
3034 | + try { |
3035 | + handle = new JackHandle; |
3036 | + } |
3037 | + catch ( std::bad_alloc& ) { |
3038 | + errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory."; |
3039 | + goto error; |
3040 | + } |
3041 | + |
3042 | + if ( pthread_cond_init(&handle->condition, NULL) ) { |
3043 | + errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable."; |
3044 | + goto error; |
3045 | + } |
3046 | + stream_.apiHandle = (void *) handle; |
3047 | + handle->client = client; |
3048 | + } |
3049 | + handle->deviceName[mode] = deviceName; |
3050 | + |
3051 | + // Allocate necessary internal buffers. |
3052 | + unsigned long bufferBytes; |
3053 | + bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat ); |
3054 | + stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 ); |
3055 | + if ( stream_.userBuffer[mode] == NULL ) { |
3056 | + errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory."; |
3057 | + goto error; |
3058 | + } |
3059 | + |
3060 | + if ( stream_.doConvertBuffer[mode] ) { |
3061 | + |
3062 | + bool makeBuffer = true; |
3063 | + if ( mode == OUTPUT ) |
3064 | + bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] ); |
3065 | + else { // mode == INPUT |
3066 | + bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] ); |
3067 | + if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) { |
3068 | + unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]); |
3069 | + if ( bufferBytes < bytesOut ) makeBuffer = false; |
3070 | + } |
3071 | + } |
3072 | + |
3073 | + if ( makeBuffer ) { |
3074 | + bufferBytes *= *bufferSize; |
3075 | + if ( stream_.deviceBuffer ) free( stream_.deviceBuffer ); |
3076 | + stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 ); |
3077 | + if ( stream_.deviceBuffer == NULL ) { |
3078 | + errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory."; |
3079 | + goto error; |
3080 | + } |
3081 | + } |
3082 | + } |
3083 | + |
3084 | + // Allocate memory for the Jack ports (channels) identifiers. |
3085 | + handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels ); |
3086 | + if ( handle->ports[mode] == NULL ) { |
3087 | + errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory."; |
3088 | + goto error; |
3089 | + } |
3090 | + |
3091 | + stream_.device[mode] = device; |
3092 | + stream_.channelOffset[mode] = firstChannel; |
3093 | + stream_.state = STREAM_STOPPED; |
3094 | + stream_.callbackInfo.object = (void *) this; |
3095 | + |
3096 | + if ( stream_.mode == OUTPUT && mode == INPUT ) |
3097 | + // We had already set up the stream for output. |
3098 | + stream_.mode = DUPLEX; |
3099 | + else { |
3100 | + stream_.mode = mode; |
3101 | + jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo ); |
3102 | + jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle ); |
3103 | + jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo ); |
3104 | + } |
3105 | + |
3106 | + // Register our ports. |
3107 | + char label[64]; |
3108 | + if ( mode == OUTPUT ) { |
3109 | + for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) { |
3110 | + snprintf( label, 64, "outport %d", i ); |
3111 | + handle->ports[0][i] = jack_port_register( handle->client, (const char *)label, |
3112 | + JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 ); |
3113 | + } |
3114 | + } |
3115 | + else { |
3116 | + for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) { |
3117 | + snprintf( label, 64, "inport %d", i ); |
3118 | + handle->ports[1][i] = jack_port_register( handle->client, (const char *)label, |
3119 | + JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 ); |
3120 | + } |
3121 | + } |
3122 | + |
3123 | + // Setup the buffer conversion information structure. We don't use |
3124 | + // buffers to do channel offsets, so we override that parameter |
3125 | + // here. |
3126 | + if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 ); |
3127 | + |
3128 | + return SUCCESS; |
3129 | + |
3130 | + error: |
3131 | + if ( handle ) { |
3132 | + pthread_cond_destroy( &handle->condition ); |
3133 | + jack_client_close( handle->client ); |
3134 | + |
3135 | + if ( handle->ports[0] ) free( handle->ports[0] ); |
3136 | + if ( handle->ports[1] ) free( handle->ports[1] ); |
3137 | + |
3138 | + delete handle; |
3139 | + stream_.apiHandle = 0; |
3140 | + } |
3141 | + |
3142 | + for ( int i=0; i<2; i++ ) { |
3143 | + if ( stream_.userBuffer[i] ) { |
3144 | + free( stream_.userBuffer[i] ); |
3145 | + stream_.userBuffer[i] = 0; |
3146 | + } |
3147 | + } |
3148 | + |
3149 | + if ( stream_.deviceBuffer ) { |
3150 | + free( stream_.deviceBuffer ); |
3151 | + stream_.deviceBuffer = 0; |
3152 | + } |
3153 | + |
3154 | + return FAILURE; |
3155 | +} |
3156 | + |
3157 | +void RtApiJack :: closeStream( void ) |
3158 | +{ |
3159 | + if ( stream_.state == STREAM_CLOSED ) { |
3160 | + errorText_ = "RtApiJack::closeStream(): no open stream to close!"; |
3161 | + error( RtAudioError::WARNING ); |
3162 | + return; |
3163 | + } |
3164 | + |
3165 | + JackHandle *handle = (JackHandle *) stream_.apiHandle; |
3166 | + if ( handle ) { |
3167 | + |
3168 | + if ( stream_.state == STREAM_RUNNING ) |
3169 | + jack_deactivate( handle->client ); |
3170 | + |
3171 | + jack_client_close( handle->client ); |
3172 | + } |
3173 | + |
3174 | + if ( handle ) { |
3175 | + if ( handle->ports[0] ) free( handle->ports[0] ); |
3176 | + if ( handle->ports[1] ) free( handle->ports[1] ); |
3177 | + pthread_cond_destroy( &handle->condition ); |
3178 | + delete handle; |
3179 | + stream_.apiHandle = 0; |
3180 | + } |
3181 | + |
3182 | + for ( int i=0; i<2; i++ ) { |
3183 | + if ( stream_.userBuffer[i] ) { |
3184 | + free( stream_.userBuffer[i] ); |
3185 | + stream_.userBuffer[i] = 0; |
3186 | + } |
3187 | + } |
3188 | + |
3189 | + if ( stream_.deviceBuffer ) { |
3190 | + free( stream_.deviceBuffer ); |
3191 | + stream_.deviceBuffer = 0; |
3192 | + } |
3193 | + |
3194 | + stream_.mode = UNINITIALIZED; |
3195 | + stream_.state = STREAM_CLOSED; |
3196 | +} |
3197 | + |
3198 | +void RtApiJack :: startStream( void ) |
3199 | +{ |
3200 | + verifyStream(); |
3201 | + if ( stream_.state == STREAM_RUNNING ) { |
3202 | + errorText_ = "RtApiJack::startStream(): the stream is already running!"; |
3203 | + error( RtAudioError::WARNING ); |
3204 | + return; |
3205 | + } |
3206 | + |
3207 | + JackHandle *handle = (JackHandle *) stream_.apiHandle; |
3208 | + int result = jack_activate( handle->client ); |
3209 | + if ( result ) { |
3210 | + errorText_ = "RtApiJack::startStream(): unable to activate JACK client!"; |
3211 | + goto unlock; |
3212 | + } |
3213 | + |
3214 | + const char **ports; |
3215 | + |
3216 | + // Get the list of available ports. |
3217 | + if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { |
3218 | + result = 1; |
3219 | + ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput); |
3220 | + if ( ports == NULL) { |
3221 | + errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!"; |
3222 | + goto unlock; |
3223 | + } |
3224 | + |
3225 | + // Now make the port connections. Since RtAudio wasn't designed to |
3226 | + // allow the user to select particular channels of a device, we'll |
3227 | + // just open the first "nChannels" ports with offset. |
3228 | + for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) { |
3229 | + result = 1; |
3230 | + if ( ports[ stream_.channelOffset[0] + i ] ) |
3231 | + result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] ); |
3232 | + if ( result ) { |
3233 | + free( ports ); |
3234 | + errorText_ = "RtApiJack::startStream(): error connecting output ports!"; |
3235 | + goto unlock; |
3236 | + } |
3237 | + } |
3238 | + free(ports); |
3239 | + } |
3240 | + |
3241 | + if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) { |
3242 | + result = 1; |
3243 | + ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput ); |
3244 | + if ( ports == NULL) { |
3245 | + errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!"; |
3246 | + goto unlock; |
3247 | + } |
3248 | + |
3249 | + // Now make the port connections. See note above. |
3250 | + for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) { |
3251 | + result = 1; |
3252 | + if ( ports[ stream_.channelOffset[1] + i ] ) |
3253 | + result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) ); |
3254 | + if ( result ) { |
3255 | + free( ports ); |
3256 | + errorText_ = "RtApiJack::startStream(): error connecting input ports!"; |
3257 | + goto unlock; |
3258 | + } |
3259 | + } |
3260 | + free(ports); |
3261 | + } |
3262 | + |
3263 | + handle->drainCounter = 0; |
3264 | + handle->internalDrain = false; |
3265 | + stream_.state = STREAM_RUNNING; |
3266 | + |
3267 | + unlock: |
3268 | + if ( result == 0 ) return; |
3269 | + error( RtAudioError::SYSTEM_ERROR ); |
3270 | +} |
3271 | + |
3272 | +void RtApiJack :: stopStream( void ) |
3273 | +{ |
3274 | + verifyStream(); |
3275 | + if ( stream_.state == STREAM_STOPPED ) { |
3276 | + errorText_ = "RtApiJack::stopStream(): the stream is already stopped!"; |
3277 | + error( RtAudioError::WARNING ); |
3278 | + return; |
3279 | + } |
3280 | + |
3281 | + JackHandle *handle = (JackHandle *) stream_.apiHandle; |
3282 | + if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { |
3283 | + |
3284 | + if ( handle->drainCounter == 0 ) { |
3285 | + handle->drainCounter = 2; |
3286 | + pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled |
3287 | + } |
3288 | + } |
3289 | + |
3290 | + jack_deactivate( handle->client ); |
3291 | + stream_.state = STREAM_STOPPED; |
3292 | +} |
3293 | + |
3294 | +void RtApiJack :: abortStream( void ) |
3295 | +{ |
3296 | + verifyStream(); |
3297 | + if ( stream_.state == STREAM_STOPPED ) { |
3298 | + errorText_ = "RtApiJack::abortStream(): the stream is already stopped!"; |
3299 | + error( RtAudioError::WARNING ); |
3300 | + return; |
3301 | + } |
3302 | + |
3303 | + JackHandle *handle = (JackHandle *) stream_.apiHandle; |
3304 | + handle->drainCounter = 2; |
3305 | + |
3306 | + stopStream(); |
3307 | +} |
3308 | + |
3309 | +// This function will be called by a spawned thread when the user |
3310 | +// callback function signals that the stream should be stopped or |
3311 | +// aborted. It is necessary to handle it this way because the |
3312 | +// callbackEvent() function must return before the jack_deactivate() |
3313 | +// function will return. |
3314 | +static void *jackStopStream( void *ptr ) |
3315 | +{ |
3316 | + CallbackInfo *info = (CallbackInfo *) ptr; |
3317 | + RtApiJack *object = (RtApiJack *) info->object; |
3318 | + |
3319 | + object->stopStream(); |
3320 | + pthread_exit( NULL ); |
3321 | +} |
3322 | + |
3323 | +bool RtApiJack :: callbackEvent( unsigned long nframes ) |
3324 | +{ |
3325 | + if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS; |
3326 | + if ( stream_.state == STREAM_CLOSED ) { |
3327 | + errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!"; |
3328 | + error( RtAudioError::WARNING ); |
3329 | + return FAILURE; |
3330 | + } |
3331 | + if ( stream_.bufferSize != nframes ) { |
3332 | + errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!"; |
3333 | + error( RtAudioError::WARNING ); |
3334 | + return FAILURE; |
3335 | + } |
3336 | + |
3337 | + CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo; |
3338 | + JackHandle *handle = (JackHandle *) stream_.apiHandle; |
3339 | + |
3340 | + // Check if we were draining the stream and signal is finished. |
3341 | + if ( handle->drainCounter > 3 ) { |
3342 | + ThreadHandle threadId; |
3343 | + |
3344 | + stream_.state = STREAM_STOPPING; |
3345 | + if ( handle->internalDrain == true ) |
3346 | + pthread_create( &threadId, NULL, jackStopStream, info ); |
3347 | + else |
3348 | + pthread_cond_signal( &handle->condition ); |
3349 | + return SUCCESS; |
3350 | + } |
3351 | + |
3352 | + // Invoke user callback first, to get fresh output data. |
3353 | + if ( handle->drainCounter == 0 ) { |
3354 | + RtAudioCallback callback = (RtAudioCallback) info->callback; |
3355 | + double streamTime = getStreamTime(); |
3356 | + RtAudioStreamStatus status = 0; |
3357 | + if ( stream_.mode != INPUT && handle->xrun[0] == true ) { |
3358 | + status |= RTAUDIO_OUTPUT_UNDERFLOW; |
3359 | + handle->xrun[0] = false; |
3360 | + } |
3361 | + if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) { |
3362 | + status |= RTAUDIO_INPUT_OVERFLOW; |
3363 | + handle->xrun[1] = false; |
3364 | + } |
3365 | + int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1], |
3366 | + stream_.bufferSize, streamTime, status, info->userData ); |
3367 | + if ( cbReturnValue == 2 ) { |
3368 | + stream_.state = STREAM_STOPPING; |
3369 | + handle->drainCounter = 2; |
3370 | + ThreadHandle id; |
3371 | + pthread_create( &id, NULL, jackStopStream, info ); |
3372 | + return SUCCESS; |
3373 | + } |
3374 | + else if ( cbReturnValue == 1 ) { |
3375 | + handle->drainCounter = 1; |
3376 | + handle->internalDrain = true; |
3377 | + } |
3378 | + } |
3379 | + |
3380 | + jack_default_audio_sample_t *jackbuffer; |
3381 | + unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t ); |
3382 | + if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { |
3383 | + |
3384 | + if ( handle->drainCounter > 1 ) { // write zeros to the output stream |
3385 | + |
3386 | + for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) { |
3387 | + jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes ); |
3388 | + memset( jackbuffer, 0, bufferBytes ); |
3389 | + } |
3390 | + |
3391 | + } |
3392 | + else if ( stream_.doConvertBuffer[0] ) { |
3393 | + |
3394 | + convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] ); |
3395 | + |
3396 | + for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) { |
3397 | + jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes ); |
3398 | + memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes ); |
3399 | + } |
3400 | + } |
3401 | + else { // no buffer conversion |
3402 | + for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) { |
3403 | + jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes ); |
3404 | + memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes ); |
3405 | + } |
3406 | + } |
3407 | + } |
3408 | + |
3409 | + // Don't bother draining input |
3410 | + if ( handle->drainCounter ) { |
3411 | + handle->drainCounter++; |
3412 | + goto unlock; |
3413 | + } |
3414 | + |
3415 | + if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) { |
3416 | + |
3417 | + if ( stream_.doConvertBuffer[1] ) { |
3418 | + for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) { |
3419 | + jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes ); |
3420 | + memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes ); |
3421 | + } |
3422 | + convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] ); |
3423 | + } |
3424 | + else { // no buffer conversion |
3425 | + for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) { |
3426 | + jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes ); |
3427 | + memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes ); |
3428 | + } |
3429 | + } |
3430 | + } |
3431 | + |
3432 | + unlock: |
3433 | + RtApi::tickStreamTime(); |
3434 | + return SUCCESS; |
3435 | +} |
3436 | + //******************** End of __UNIX_JACK__ *********************// |
3437 | +#endif |
3438 | + |
3439 | +#if defined(__WINDOWS_ASIO__) // ASIO API on Windows |
3440 | + |
3441 | +// The ASIO API is designed around a callback scheme, so this |
3442 | +// implementation is similar to that used for OS-X CoreAudio and Linux |
3443 | +// Jack. The primary constraint with ASIO is that it only allows |
3444 | +// access to a single driver at a time. Thus, it is not possible to |
3445 | +// have more than one simultaneous RtAudio stream. |
3446 | +// |
3447 | +// This implementation also requires a number of external ASIO files |
3448 | +// and a few global variables. The ASIO callback scheme does not |
3449 | +// allow for the passing of user data, so we must create a global |
3450 | +// pointer to our callbackInfo structure. |
3451 | +// |
3452 | +// On unix systems, we make use of a pthread condition variable. |
3453 | +// Since there is no equivalent in Windows, I hacked something based |
3454 | +// on information found in |
3455 | +// http://www.cs.wustl.edu/~schmidt/win32-cv-1.html. |
3456 | + |
3457 | +#include "asiosys.h" |
3458 | +#include "asio.h" |
3459 | +#include "iasiothiscallresolver.h" |
3460 | +#include "asiodrivers.h" |
3461 | +#include <cmath> |
3462 | + |
3463 | +static AsioDrivers drivers; |
3464 | +static ASIOCallbacks asioCallbacks; |
3465 | +static ASIODriverInfo driverInfo; |
3466 | +static CallbackInfo *asioCallbackInfo; |
3467 | +static bool asioXRun; |
3468 | + |
3469 | +struct AsioHandle { |
3470 | + int drainCounter; // Tracks callback counts when draining |
3471 | + bool internalDrain; // Indicates if stop is initiated from callback or not. |
3472 | + ASIOBufferInfo *bufferInfos; |
3473 | + HANDLE condition; |
3474 | + |
3475 | + AsioHandle() |
3476 | + :drainCounter(0), internalDrain(false), bufferInfos(0) {} |
3477 | +}; |
3478 | + |
3479 | +// Function declarations (definitions at end of section) |
3480 | +static const char* getAsioErrorString( ASIOError result ); |
3481 | +static void sampleRateChanged( ASIOSampleRate sRate ); |
3482 | +static long asioMessages( long selector, long value, void* message, double* opt ); |
3483 | + |
3484 | +RtApiAsio :: RtApiAsio() |
3485 | +{ |
3486 | + // ASIO cannot run on a multi-threaded appartment. You can call |
3487 | + // CoInitialize beforehand, but it must be for appartment threading |
3488 | + // (in which case, CoInitilialize will return S_FALSE here). |
3489 | + coInitialized_ = false; |
3490 | + HRESULT hr = CoInitialize( NULL ); |
3491 | + if ( FAILED(hr) ) { |
3492 | + errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)"; |
3493 | + error( RtAudioError::WARNING ); |
3494 | + } |
3495 | + coInitialized_ = true; |
3496 | + |
3497 | + drivers.removeCurrentDriver(); |
3498 | + driverInfo.asioVersion = 2; |
3499 | + |
3500 | + // See note in DirectSound implementation about GetDesktopWindow(). |
3501 | + driverInfo.sysRef = GetForegroundWindow(); |
3502 | +} |
3503 | + |
3504 | +RtApiAsio :: ~RtApiAsio() |
3505 | +{ |
3506 | + if ( stream_.state != STREAM_CLOSED ) closeStream(); |
3507 | + if ( coInitialized_ ) CoUninitialize(); |
3508 | +} |
3509 | + |
3510 | +unsigned int RtApiAsio :: getDeviceCount( void ) |
3511 | +{ |
3512 | + return (unsigned int) drivers.asioGetNumDev(); |
3513 | +} |
3514 | + |
3515 | +RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device ) |
3516 | +{ |
3517 | + RtAudio::DeviceInfo info; |
3518 | + info.probed = false; |
3519 | + |
3520 | + // Get device ID |
3521 | + unsigned int nDevices = getDeviceCount(); |
3522 | + if ( nDevices == 0 ) { |
3523 | + errorText_ = "RtApiAsio::getDeviceInfo: no devices found!"; |
3524 | + error( RtAudioError::INVALID_USE ); |
3525 | + return info; |
3526 | + } |
3527 | + |
3528 | + if ( device >= nDevices ) { |
3529 | + errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!"; |
3530 | + error( RtAudioError::INVALID_USE ); |
3531 | + return info; |
3532 | + } |
3533 | + |
3534 | + // If a stream is already open, we cannot probe other devices. Thus, use the saved results. |
3535 | + if ( stream_.state != STREAM_CLOSED ) { |
3536 | + if ( device >= devices_.size() ) { |
3537 | + errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened."; |
3538 | + error( RtAudioError::WARNING ); |
3539 | + return info; |
3540 | + } |
3541 | + return devices_[ device ]; |
3542 | + } |
3543 | + |
3544 | + char driverName[32]; |
3545 | + ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 ); |
3546 | + if ( result != ASE_OK ) { |
3547 | + errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ")."; |
3548 | + errorText_ = errorStream_.str(); |
3549 | + error( RtAudioError::WARNING ); |
3550 | + return info; |
3551 | + } |
3552 | + |
3553 | + info.name = driverName; |
3554 | + |
3555 | + if ( !drivers.loadDriver( driverName ) ) { |
3556 | + errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ")."; |
3557 | + errorText_ = errorStream_.str(); |
3558 | + error( RtAudioError::WARNING ); |
3559 | + return info; |
3560 | + } |
3561 | + |
3562 | + result = ASIOInit( &driverInfo ); |
3563 | + if ( result != ASE_OK ) { |
3564 | + errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ")."; |
3565 | + errorText_ = errorStream_.str(); |
3566 | + error( RtAudioError::WARNING ); |
3567 | + return info; |
3568 | + } |
3569 | + |
3570 | + // Determine the device channel information. |
3571 | + long inputChannels, outputChannels; |
3572 | + result = ASIOGetChannels( &inputChannels, &outputChannels ); |
3573 | + if ( result != ASE_OK ) { |
3574 | + drivers.removeCurrentDriver(); |
3575 | + errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ")."; |
3576 | + errorText_ = errorStream_.str(); |
3577 | + error( RtAudioError::WARNING ); |
3578 | + return info; |
3579 | + } |
3580 | + |
3581 | + info.outputChannels = outputChannels; |
3582 | + info.inputChannels = inputChannels; |
3583 | + if ( info.outputChannels > 0 && info.inputChannels > 0 ) |
3584 | + info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels; |
3585 | + |
3586 | + // Determine the supported sample rates. |
3587 | + info.sampleRates.clear(); |
3588 | + for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) { |
3589 | + result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] ); |
3590 | + if ( result == ASE_OK ) |
3591 | + info.sampleRates.push_back( SAMPLE_RATES[i] ); |
3592 | + } |
3593 | + |
3594 | + // Determine supported data types ... just check first channel and assume rest are the same. |
3595 | + ASIOChannelInfo channelInfo; |
3596 | + channelInfo.channel = 0; |
3597 | + channelInfo.isInput = true; |
3598 | + if ( info.inputChannels <= 0 ) channelInfo.isInput = false; |
3599 | + result = ASIOGetChannelInfo( &channelInfo ); |
3600 | + if ( result != ASE_OK ) { |
3601 | + drivers.removeCurrentDriver(); |
3602 | + errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ")."; |
3603 | + errorText_ = errorStream_.str(); |
3604 | + error( RtAudioError::WARNING ); |
3605 | + return info; |
3606 | + } |
3607 | + |
3608 | + info.nativeFormats = 0; |
3609 | + if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) |
3610 | + info.nativeFormats |= RTAUDIO_SINT16; |
3611 | + else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) |
3612 | + info.nativeFormats |= RTAUDIO_SINT32; |
3613 | + else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) |
3614 | + info.nativeFormats |= RTAUDIO_FLOAT32; |
3615 | + else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) |
3616 | + info.nativeFormats |= RTAUDIO_FLOAT64; |
3617 | + else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) |
3618 | + info.nativeFormats |= RTAUDIO_SINT24; |
3619 | + |
3620 | + if ( info.outputChannels > 0 ) |
3621 | + if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true; |
3622 | + if ( info.inputChannels > 0 ) |
3623 | + if ( getDefaultInputDevice() == device ) info.isDefaultInput = true; |
3624 | + |
3625 | + info.probed = true; |
3626 | + drivers.removeCurrentDriver(); |
3627 | + return info; |
3628 | +} |
3629 | + |
3630 | +static void bufferSwitch( long index, ASIOBool /*processNow*/ ) |
3631 | +{ |
3632 | + RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object; |
3633 | + object->callbackEvent( index ); |
3634 | +} |
3635 | + |
3636 | +void RtApiAsio :: saveDeviceInfo( void ) |
3637 | +{ |
3638 | + devices_.clear(); |
3639 | + |
3640 | + unsigned int nDevices = getDeviceCount(); |
3641 | + devices_.resize( nDevices ); |
3642 | + for ( unsigned int i=0; i<nDevices; i++ ) |
3643 | + devices_[i] = getDeviceInfo( i ); |
3644 | +} |
3645 | + |
3646 | +bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels, |
3647 | + unsigned int firstChannel, unsigned int sampleRate, |
3648 | + RtAudioFormat format, unsigned int *bufferSize, |
3649 | + RtAudio::StreamOptions *options ) |
3650 | +{ |
3651 | + // For ASIO, a duplex stream MUST use the same driver. |
3652 | + if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) { |
3653 | + errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!"; |
3654 | + return FAILURE; |
3655 | + } |
3656 | + |
3657 | + char driverName[32]; |
3658 | + ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 ); |
3659 | + if ( result != ASE_OK ) { |
3660 | + errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ")."; |
3661 | + errorText_ = errorStream_.str(); |
3662 | + return FAILURE; |
3663 | + } |
3664 | + |
3665 | + // Only load the driver once for duplex stream. |
3666 | + if ( mode != INPUT || stream_.mode != OUTPUT ) { |
3667 | + // The getDeviceInfo() function will not work when a stream is open |
3668 | + // because ASIO does not allow multiple devices to run at the same |
3669 | + // time. Thus, we'll probe the system before opening a stream and |
3670 | + // save the results for use by getDeviceInfo(). |
3671 | + this->saveDeviceInfo(); |
3672 | + |
3673 | + if ( !drivers.loadDriver( driverName ) ) { |
3674 | + errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ")."; |
3675 | + errorText_ = errorStream_.str(); |
3676 | + return FAILURE; |
3677 | + } |
3678 | + |
3679 | + result = ASIOInit( &driverInfo ); |
3680 | + if ( result != ASE_OK ) { |
3681 | + errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ")."; |
3682 | + errorText_ = errorStream_.str(); |
3683 | + return FAILURE; |
3684 | + } |
3685 | + } |
3686 | + |
3687 | + // Check the device channel count. |
3688 | + long inputChannels, outputChannels; |
3689 | + result = ASIOGetChannels( &inputChannels, &outputChannels ); |
3690 | + if ( result != ASE_OK ) { |
3691 | + drivers.removeCurrentDriver(); |
3692 | + errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ")."; |
3693 | + errorText_ = errorStream_.str(); |
3694 | + return FAILURE; |
3695 | + } |
3696 | + |
3697 | + if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) || |
3698 | + ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) { |
3699 | + drivers.removeCurrentDriver(); |
3700 | + errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ")."; |
3701 | + errorText_ = errorStream_.str(); |
3702 | + return FAILURE; |
3703 | + } |
3704 | + stream_.nDeviceChannels[mode] = channels; |
3705 | + stream_.nUserChannels[mode] = channels; |
3706 | + stream_.channelOffset[mode] = firstChannel; |
3707 | + |
3708 | + // Verify the sample rate is supported. |
3709 | + result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate ); |
3710 | + if ( result != ASE_OK ) { |
3711 | + drivers.removeCurrentDriver(); |
3712 | + errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ")."; |
3713 | + errorText_ = errorStream_.str(); |
3714 | + return FAILURE; |
3715 | + } |
3716 | + |
3717 | + // Get the current sample rate |
3718 | + ASIOSampleRate currentRate; |
3719 | + result = ASIOGetSampleRate( ¤tRate ); |
3720 | + if ( result != ASE_OK ) { |
3721 | + drivers.removeCurrentDriver(); |
3722 | + errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate."; |
3723 | + errorText_ = errorStream_.str(); |
3724 | + return FAILURE; |
3725 | + } |
3726 | + |
3727 | + // Set the sample rate only if necessary |
3728 | + if ( currentRate != sampleRate ) { |
3729 | + result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate ); |
3730 | + if ( result != ASE_OK ) { |
3731 | + drivers.removeCurrentDriver(); |
3732 | + errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ")."; |
3733 | + errorText_ = errorStream_.str(); |
3734 | + return FAILURE; |
3735 | + } |
3736 | + } |
3737 | + |
3738 | + // Determine the driver data type. |
3739 | + ASIOChannelInfo channelInfo; |
3740 | + channelInfo.channel = 0; |
3741 | + if ( mode == OUTPUT ) channelInfo.isInput = false; |
3742 | + else channelInfo.isInput = true; |
3743 | + result = ASIOGetChannelInfo( &channelInfo ); |
3744 | + if ( result != ASE_OK ) { |
3745 | + drivers.removeCurrentDriver(); |
3746 | + errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format."; |
3747 | + errorText_ = errorStream_.str(); |
3748 | + return FAILURE; |
3749 | + } |
3750 | + |
3751 | + // Assuming WINDOWS host is always little-endian. |
3752 | + stream_.doByteSwap[mode] = false; |
3753 | + stream_.userFormat = format; |
3754 | + stream_.deviceFormat[mode] = 0; |
3755 | + if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) { |
3756 | + stream_.deviceFormat[mode] = RTAUDIO_SINT16; |
3757 | + if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true; |
3758 | + } |
3759 | + else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) { |
3760 | + stream_.deviceFormat[mode] = RTAUDIO_SINT32; |
3761 | + if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true; |
3762 | + } |
3763 | + else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) { |
3764 | + stream_.deviceFormat[mode] = RTAUDIO_FLOAT32; |
3765 | + if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true; |
3766 | + } |
3767 | + else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) { |
3768 | + stream_.deviceFormat[mode] = RTAUDIO_FLOAT64; |
3769 | + if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true; |
3770 | + } |
3771 | + else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) { |
3772 | + stream_.deviceFormat[mode] = RTAUDIO_SINT24; |
3773 | + if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true; |
3774 | + } |
3775 | + |
3776 | + if ( stream_.deviceFormat[mode] == 0 ) { |
3777 | + drivers.removeCurrentDriver(); |
3778 | + errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio."; |
3779 | + errorText_ = errorStream_.str(); |
3780 | + return FAILURE; |
3781 | + } |
3782 | + |
3783 | + // Set the buffer size. For a duplex stream, this will end up |
3784 | + // setting the buffer size based on the input constraints, which |
3785 | + // should be ok. |
3786 | + long minSize, maxSize, preferSize, granularity; |
3787 | + result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity ); |
3788 | + if ( result != ASE_OK ) { |
3789 | + drivers.removeCurrentDriver(); |
3790 | + errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size."; |
3791 | + errorText_ = errorStream_.str(); |
3792 | + return FAILURE; |
3793 | + } |
3794 | + |
3795 | + if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize; |
3796 | + else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize; |
3797 | + else if ( granularity == -1 ) { |
3798 | + // Make sure bufferSize is a power of two. |
3799 | + int log2_of_min_size = 0; |
3800 | + int log2_of_max_size = 0; |
3801 | + |
3802 | + for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) { |
3803 | + if ( minSize & ((long)1 << i) ) log2_of_min_size = i; |
3804 | + if ( maxSize & ((long)1 << i) ) log2_of_max_size = i; |
3805 | + } |
3806 | + |
3807 | + long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) ); |
3808 | + int min_delta_num = log2_of_min_size; |
3809 | + |
3810 | + for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) { |
3811 | + long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) ); |
3812 | + if (current_delta < min_delta) { |
3813 | + min_delta = current_delta; |
3814 | + min_delta_num = i; |
3815 | + } |
3816 | + } |
3817 | + |
3818 | + *bufferSize = ( (unsigned int)1 << min_delta_num ); |
3819 | + if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize; |
3820 | + else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize; |
3821 | + } |
3822 | + else if ( granularity != 0 ) { |
3823 | + // Set to an even multiple of granularity, rounding up. |
3824 | + *bufferSize = (*bufferSize + granularity-1) / granularity * granularity; |
3825 | + } |
3826 | + |
3827 | + if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) { |
3828 | + drivers.removeCurrentDriver(); |
3829 | + errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!"; |
3830 | + return FAILURE; |
3831 | + } |
3832 | + |
3833 | + stream_.bufferSize = *bufferSize; |
3834 | + stream_.nBuffers = 2; |
3835 | + |
3836 | + if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false; |
3837 | + else stream_.userInterleaved = true; |
3838 | + |
3839 | + // ASIO always uses non-interleaved buffers. |
3840 | + stream_.deviceInterleaved[mode] = false; |
3841 | + |
3842 | + // Allocate, if necessary, our AsioHandle structure for the stream. |
3843 | + AsioHandle *handle = (AsioHandle *) stream_.apiHandle; |
3844 | + if ( handle == 0 ) { |
3845 | + try { |
3846 | + handle = new AsioHandle; |
3847 | + } |
3848 | + catch ( std::bad_alloc& ) { |
3849 | + //if ( handle == NULL ) { |
3850 | + drivers.removeCurrentDriver(); |
3851 | + errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory."; |
3852 | + return FAILURE; |
3853 | + } |
3854 | + handle->bufferInfos = 0; |
3855 | + |
3856 | + // Create a manual-reset event. |
3857 | + handle->condition = CreateEvent( NULL, // no security |
3858 | + TRUE, // manual-reset |
3859 | + FALSE, // non-signaled initially |
3860 | + NULL ); // unnamed |
3861 | + stream_.apiHandle = (void *) handle; |
3862 | + } |
3863 | + |
3864 | + // Create the ASIO internal buffers. Since RtAudio sets up input |
3865 | + // and output separately, we'll have to dispose of previously |
3866 | + // created output buffers for a duplex stream. |
3867 | + long inputLatency, outputLatency; |
3868 | + if ( mode == INPUT && stream_.mode == OUTPUT ) { |
3869 | + ASIODisposeBuffers(); |
3870 | + if ( handle->bufferInfos ) free( handle->bufferInfos ); |
3871 | + } |
3872 | + |
3873 | + // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure. |
3874 | + bool buffersAllocated = false; |
3875 | + unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1]; |
3876 | + handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) ); |
3877 | + if ( handle->bufferInfos == NULL ) { |
3878 | + errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ")."; |
3879 | + errorText_ = errorStream_.str(); |
3880 | + goto error; |
3881 | + } |
3882 | + |
3883 | + ASIOBufferInfo *infos; |
3884 | + infos = handle->bufferInfos; |
3885 | + for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) { |
3886 | + infos->isInput = ASIOFalse; |
3887 | + infos->channelNum = i + stream_.channelOffset[0]; |
3888 | + infos->buffers[0] = infos->buffers[1] = 0; |
3889 | + } |
3890 | + for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) { |
3891 | + infos->isInput = ASIOTrue; |
3892 | + infos->channelNum = i + stream_.channelOffset[1]; |
3893 | + infos->buffers[0] = infos->buffers[1] = 0; |
3894 | + } |
3895 | + |
3896 | + // Set up the ASIO callback structure and create the ASIO data buffers. |
3897 | + asioCallbacks.bufferSwitch = &bufferSwitch; |
3898 | + asioCallbacks.sampleRateDidChange = &sampleRateChanged; |
3899 | + asioCallbacks.asioMessage = &asioMessages; |
3900 | + asioCallbacks.bufferSwitchTimeInfo = NULL; |
3901 | + result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks ); |
3902 | + if ( result != ASE_OK ) { |
3903 | + errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers."; |
3904 | + errorText_ = errorStream_.str(); |
3905 | + goto error; |
3906 | + } |
3907 | + buffersAllocated = true; |
3908 | + |
3909 | + // Set flags for buffer conversion. |
3910 | + stream_.doConvertBuffer[mode] = false; |
3911 | + if ( stream_.userFormat != stream_.deviceFormat[mode] ) |
3912 | + stream_.doConvertBuffer[mode] = true; |
3913 | + if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] && |
3914 | + stream_.nUserChannels[mode] > 1 ) |
3915 | + stream_.doConvertBuffer[mode] = true; |
3916 | + |
3917 | + // Allocate necessary internal buffers |
3918 | + unsigned long bufferBytes; |
3919 | + bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat ); |
3920 | + stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 ); |
3921 | + if ( stream_.userBuffer[mode] == NULL ) { |
3922 | + errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory."; |
3923 | + goto error; |
3924 | + } |
3925 | + |
3926 | + if ( stream_.doConvertBuffer[mode] ) { |
3927 | + |
3928 | + bool makeBuffer = true; |
3929 | + bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] ); |
3930 | + if ( mode == INPUT ) { |
3931 | + if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) { |
3932 | + unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] ); |
3933 | + if ( bufferBytes <= bytesOut ) makeBuffer = false; |
3934 | + } |
3935 | + } |
3936 | + |
3937 | + if ( makeBuffer ) { |
3938 | + bufferBytes *= *bufferSize; |
3939 | + if ( stream_.deviceBuffer ) free( stream_.deviceBuffer ); |
3940 | + stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 ); |
3941 | + if ( stream_.deviceBuffer == NULL ) { |
3942 | + errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory."; |
3943 | + goto error; |
3944 | + } |
3945 | + } |
3946 | + } |
3947 | + |
3948 | + stream_.sampleRate = sampleRate; |
3949 | + stream_.device[mode] = device; |
3950 | + stream_.state = STREAM_STOPPED; |
3951 | + asioCallbackInfo = &stream_.callbackInfo; |
3952 | + stream_.callbackInfo.object = (void *) this; |
3953 | + if ( stream_.mode == OUTPUT && mode == INPUT ) |
3954 | + // We had already set up an output stream. |
3955 | + stream_.mode = DUPLEX; |
3956 | + else |
3957 | + stream_.mode = mode; |
3958 | + |
3959 | + // Determine device latencies |
3960 | + result = ASIOGetLatencies( &inputLatency, &outputLatency ); |
3961 | + if ( result != ASE_OK ) { |
3962 | + errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency."; |
3963 | + errorText_ = errorStream_.str(); |
3964 | + error( RtAudioError::WARNING); // warn but don't fail |
3965 | + } |
3966 | + else { |
3967 | + stream_.latency[0] = outputLatency; |
3968 | + stream_.latency[1] = inputLatency; |
3969 | + } |
3970 | + |
3971 | + // Setup the buffer conversion information structure. We don't use |
3972 | + // buffers to do channel offsets, so we override that parameter |
3973 | + // here. |
3974 | + if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 ); |
3975 | + |
3976 | + return SUCCESS; |
3977 | + |
3978 | + error: |
3979 | + if ( buffersAllocated ) |
3980 | + ASIODisposeBuffers(); |
3981 | + drivers.removeCurrentDriver(); |
3982 | + |
3983 | + if ( handle ) { |
3984 | + CloseHandle( handle->condition ); |
3985 | + if ( handle->bufferInfos ) |
3986 | + free( handle->bufferInfos ); |
3987 | + delete handle; |
3988 | + stream_.apiHandle = 0; |
3989 | + } |
3990 | + |
3991 | + for ( int i=0; i<2; i++ ) { |
3992 | + if ( stream_.userBuffer[i] ) { |
3993 | + free( stream_.userBuffer[i] ); |
3994 | + stream_.userBuffer[i] = 0; |
3995 | + } |
3996 | + } |
3997 | + |
3998 | + if ( stream_.deviceBuffer ) { |
3999 | + free( stream_.deviceBuffer ); |
4000 | + stream_.deviceBuffer = 0; |
4001 | + } |
4002 | + |
4003 | + return FAILURE; |
4004 | +} |
4005 | + |
4006 | +void RtApiAsio :: closeStream() |
4007 | +{ |
4008 | + if ( stream_.state == STREAM_CLOSED ) { |
4009 | + errorText_ = "RtApiAsio::closeStream(): no open stream to close!"; |
4010 | + error( RtAudioError::WARNING ); |
4011 | + return; |
4012 | + } |
4013 | + |
4014 | + if ( stream_.state == STREAM_RUNNING ) { |
4015 | + stream_.state = STREAM_STOPPED; |
4016 | + ASIOStop(); |
4017 | + } |
4018 | + ASIODisposeBuffers(); |
4019 | + drivers.removeCurrentDriver(); |
4020 | + |
4021 | + AsioHandle *handle = (AsioHandle *) stream_.apiHandle; |
4022 | + if ( handle ) { |
4023 | + CloseHandle( handle->condition ); |
4024 | + if ( handle->bufferInfos ) |
4025 | + free( handle->bufferInfos ); |
4026 | + delete handle; |
4027 | + stream_.apiHandle = 0; |
4028 | + } |
4029 | + |
4030 | + for ( int i=0; i<2; i++ ) { |
4031 | + if ( stream_.userBuffer[i] ) { |
4032 | + free( stream_.userBuffer[i] ); |
4033 | + stream_.userBuffer[i] = 0; |
4034 | + } |
4035 | + } |
4036 | + |
4037 | + if ( stream_.deviceBuffer ) { |
4038 | + free( stream_.deviceBuffer ); |
4039 | + stream_.deviceBuffer = 0; |
4040 | + } |
4041 | + |
4042 | + stream_.mode = UNINITIALIZED; |
4043 | + stream_.state = STREAM_CLOSED; |
4044 | +} |
4045 | + |
4046 | +bool stopThreadCalled = false; |
4047 | + |
4048 | +void RtApiAsio :: startStream() |
4049 | +{ |
4050 | + verifyStream(); |
4051 | + if ( stream_.state == STREAM_RUNNING ) { |
4052 | + errorText_ = "RtApiAsio::startStream(): the stream is already running!"; |
4053 | + error( RtAudioError::WARNING ); |
4054 | + return; |
4055 | + } |
4056 | + |
4057 | + AsioHandle *handle = (AsioHandle *) stream_.apiHandle; |
4058 | + ASIOError result = ASIOStart(); |
4059 | + if ( result != ASE_OK ) { |
4060 | + errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device."; |
4061 | + errorText_ = errorStream_.str(); |
4062 | + goto unlock; |
4063 | + } |
4064 | + |
4065 | + handle->drainCounter = 0; |
4066 | + handle->internalDrain = false; |
4067 | + ResetEvent( handle->condition ); |
4068 | + stream_.state = STREAM_RUNNING; |
4069 | + asioXRun = false; |
4070 | + |
4071 | + unlock: |
4072 | + stopThreadCalled = false; |
4073 | + |
4074 | + if ( result == ASE_OK ) return; |
4075 | + error( RtAudioError::SYSTEM_ERROR ); |
4076 | +} |
4077 | + |
4078 | +void RtApiAsio :: stopStream() |
4079 | +{ |
4080 | + verifyStream(); |
4081 | + if ( stream_.state == STREAM_STOPPED ) { |
4082 | + errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!"; |
4083 | + error( RtAudioError::WARNING ); |
4084 | + return; |
4085 | + } |
4086 | + |
4087 | + AsioHandle *handle = (AsioHandle *) stream_.apiHandle; |
4088 | + if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { |
4089 | + if ( handle->drainCounter == 0 ) { |
4090 | + handle->drainCounter = 2; |
4091 | + WaitForSingleObject( handle->condition, INFINITE ); // block until signaled |
4092 | + } |
4093 | + } |
4094 | + |
4095 | + stream_.state = STREAM_STOPPED; |
4096 | + |
4097 | + ASIOError result = ASIOStop(); |
4098 | + if ( result != ASE_OK ) { |
4099 | + errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device."; |
4100 | + errorText_ = errorStream_.str(); |
4101 | + } |
4102 | + |
4103 | + if ( result == ASE_OK ) return; |
4104 | + error( RtAudioError::SYSTEM_ERROR ); |
4105 | +} |
4106 | + |
4107 | +void RtApiAsio :: abortStream() |
4108 | +{ |
4109 | + verifyStream(); |
4110 | + if ( stream_.state == STREAM_STOPPED ) { |
4111 | + errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!"; |
4112 | + error( RtAudioError::WARNING ); |
4113 | + return; |
4114 | + } |
4115 | + |
4116 | + // The following lines were commented-out because some behavior was |
4117 | + // noted where the device buffers need to be zeroed to avoid |
4118 | + // continuing sound, even when the device buffers are completely |
4119 | + // disposed. So now, calling abort is the same as calling stop. |
4120 | + // AsioHandle *handle = (AsioHandle *) stream_.apiHandle; |
4121 | + // handle->drainCounter = 2; |
4122 | + stopStream(); |
4123 | +} |
4124 | + |
4125 | +// This function will be called by a spawned thread when the user |
4126 | +// callback function signals that the stream should be stopped or |
4127 | +// aborted. It is necessary to handle it this way because the |
4128 | +// callbackEvent() function must return before the ASIOStop() |
4129 | +// function will return. |
4130 | +static unsigned __stdcall asioStopStream( void *ptr ) |
4131 | +{ |
4132 | + CallbackInfo *info = (CallbackInfo *) ptr; |
4133 | + RtApiAsio *object = (RtApiAsio *) info->object; |
4134 | + |
4135 | + object->stopStream(); |
4136 | + _endthreadex( 0 ); |
4137 | + return 0; |
4138 | +} |
4139 | + |
4140 | +bool RtApiAsio :: callbackEvent( long bufferIndex ) |
4141 | +{ |
4142 | + if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS; |
4143 | + if ( stream_.state == STREAM_CLOSED ) { |
4144 | + errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!"; |
4145 | + error( RtAudioError::WARNING ); |
4146 | + return FAILURE; |
4147 | + } |
4148 | + |
4149 | + CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo; |
4150 | + AsioHandle *handle = (AsioHandle *) stream_.apiHandle; |
4151 | + |
4152 | + // Check if we were draining the stream and signal if finished. |
4153 | + if ( handle->drainCounter > 3 ) { |
4154 | + |
4155 | + stream_.state = STREAM_STOPPING; |
4156 | + if ( handle->internalDrain == false ) |
4157 | + SetEvent( handle->condition ); |
4158 | + else { // spawn a thread to stop the stream |
4159 | + unsigned threadId; |
4160 | + stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream, |
4161 | + &stream_.callbackInfo, 0, &threadId ); |
4162 | + } |
4163 | + return SUCCESS; |
4164 | + } |
4165 | + |
4166 | + // Invoke user callback to get fresh output data UNLESS we are |
4167 | + // draining stream. |
4168 | + if ( handle->drainCounter == 0 ) { |
4169 | + RtAudioCallback callback = (RtAudioCallback) info->callback; |
4170 | + double streamTime = getStreamTime(); |
4171 | + RtAudioStreamStatus status = 0; |
4172 | + if ( stream_.mode != INPUT && asioXRun == true ) { |
4173 | + status |= RTAUDIO_OUTPUT_UNDERFLOW; |
4174 | + asioXRun = false; |
4175 | + } |
4176 | + if ( stream_.mode != OUTPUT && asioXRun == true ) { |
4177 | + status |= RTAUDIO_INPUT_OVERFLOW; |
4178 | + asioXRun = false; |
4179 | + } |
4180 | + int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1], |
4181 | + stream_.bufferSize, streamTime, status, info->userData ); |
4182 | + if ( cbReturnValue == 2 ) { |
4183 | + stream_.state = STREAM_STOPPING; |
4184 | + handle->drainCounter = 2; |
4185 | + unsigned threadId; |
4186 | + stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream, |
4187 | + &stream_.callbackInfo, 0, &threadId ); |
4188 | + return SUCCESS; |
4189 | + } |
4190 | + else if ( cbReturnValue == 1 ) { |
4191 | + handle->drainCounter = 1; |
4192 | + handle->internalDrain = true; |
4193 | + } |
4194 | + } |
4195 | + |
4196 | + unsigned int nChannels, bufferBytes, i, j; |
4197 | + nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1]; |
4198 | + if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { |
4199 | + |
4200 | + bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] ); |
4201 | + |
4202 | + if ( handle->drainCounter > 1 ) { // write zeros to the output stream |
4203 | + |
4204 | + for ( i=0, j=0; i<nChannels; i++ ) { |
4205 | + if ( handle->bufferInfos[i].isInput != ASIOTrue ) |
4206 | + memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes ); |
4207 | + } |
4208 | + |
4209 | + } |
4210 | + else if ( stream_.doConvertBuffer[0] ) { |
4211 | + |
4212 | + convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] ); |
4213 | + if ( stream_.doByteSwap[0] ) |
4214 | + byteSwapBuffer( stream_.deviceBuffer, |
4215 | + stream_.bufferSize * stream_.nDeviceChannels[0], |
4216 | + stream_.deviceFormat[0] ); |
4217 | + |
4218 | + for ( i=0, j=0; i<nChannels; i++ ) { |
4219 | + if ( handle->bufferInfos[i].isInput != ASIOTrue ) |
4220 | + memcpy( handle->bufferInfos[i].buffers[bufferIndex], |
4221 | + &stream_.deviceBuffer[j++*bufferBytes], bufferBytes ); |
4222 | + } |
4223 | + |
4224 | + } |
4225 | + else { |
4226 | + |
4227 | + if ( stream_.doByteSwap[0] ) |
4228 | + byteSwapBuffer( stream_.userBuffer[0], |
4229 | + stream_.bufferSize * stream_.nUserChannels[0], |
4230 | + stream_.userFormat ); |
4231 | + |
4232 | + for ( i=0, j=0; i<nChannels; i++ ) { |
4233 | + if ( handle->bufferInfos[i].isInput != ASIOTrue ) |
4234 | + memcpy( handle->bufferInfos[i].buffers[bufferIndex], |
4235 | + &stream_.userBuffer[0][bufferBytes*j++], bufferBytes ); |
4236 | + } |
4237 | + |
4238 | + } |
4239 | + } |
4240 | + |
4241 | + // Don't bother draining input |
4242 | + if ( handle->drainCounter ) { |
4243 | + handle->drainCounter++; |
4244 | + goto unlock; |
4245 | + } |
4246 | + |
4247 | + if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) { |
4248 | + |
4249 | + bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]); |
4250 | + |
4251 | + if (stream_.doConvertBuffer[1]) { |
4252 | + |
4253 | + // Always interleave ASIO input data. |
4254 | + for ( i=0, j=0; i<nChannels; i++ ) { |
4255 | + if ( handle->bufferInfos[i].isInput == ASIOTrue ) |
4256 | + memcpy( &stream_.deviceBuffer[j++*bufferBytes], |
4257 | + handle->bufferInfos[i].buffers[bufferIndex], |
4258 | + bufferBytes ); |
4259 | + } |
4260 | + |
4261 | + if ( stream_.doByteSwap[1] ) |
4262 | + byteSwapBuffer( stream_.deviceBuffer, |
4263 | + stream_.bufferSize * stream_.nDeviceChannels[1], |
4264 | + stream_.deviceFormat[1] ); |
4265 | + convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] ); |
4266 | + |
4267 | + } |
4268 | + else { |
4269 | + for ( i=0, j=0; i<nChannels; i++ ) { |
4270 | + if ( handle->bufferInfos[i].isInput == ASIOTrue ) { |
4271 | + memcpy( &stream_.userBuffer[1][bufferBytes*j++], |
4272 | + handle->bufferInfos[i].buffers[bufferIndex], |
4273 | + bufferBytes ); |
4274 | + } |
4275 | + } |
4276 | + |
4277 | + if ( stream_.doByteSwap[1] ) |
4278 | + byteSwapBuffer( stream_.userBuffer[1], |
4279 | + stream_.bufferSize * stream_.nUserChannels[1], |
4280 | + stream_.userFormat ); |
4281 | + } |
4282 | + } |
4283 | + |
4284 | + unlock: |
4285 | + // The following call was suggested by Malte Clasen. While the API |
4286 | + // documentation indicates it should not be required, some device |
4287 | + // drivers apparently do not function correctly without it. |
4288 | + ASIOOutputReady(); |
4289 | + |
4290 | + RtApi::tickStreamTime(); |
4291 | + return SUCCESS; |
4292 | +} |
4293 | + |
4294 | +static void sampleRateChanged( ASIOSampleRate sRate ) |
4295 | +{ |
4296 | + // The ASIO documentation says that this usually only happens during |
4297 | + // external sync. Audio processing is not stopped by the driver, |
4298 | + // actual sample rate might not have even changed, maybe only the |
4299 | + // sample rate status of an AES/EBU or S/PDIF digital input at the |
4300 | + // audio device. |
4301 | + |
4302 | + RtApi *object = (RtApi *) asioCallbackInfo->object; |
4303 | + try { |
4304 | + object->stopStream(); |
4305 | + } |
4306 | + catch ( RtAudioError &exception ) { |
4307 | + std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl; |
4308 | + return; |
4309 | + } |
4310 | + |
4311 | + std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl; |
4312 | +} |
4313 | + |
4314 | +static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ ) |
4315 | +{ |
4316 | + long ret = 0; |
4317 | + |
4318 | + switch( selector ) { |
4319 | + case kAsioSelectorSupported: |
4320 | + if ( value == kAsioResetRequest |
4321 | + || value == kAsioEngineVersion |
4322 | + || value == kAsioResyncRequest |
4323 | + || value == kAsioLatenciesChanged |
4324 | + // The following three were added for ASIO 2.0, you don't |
4325 | + // necessarily have to support them. |
4326 | + || value == kAsioSupportsTimeInfo |
4327 | + || value == kAsioSupportsTimeCode |
4328 | + || value == kAsioSupportsInputMonitor) |
4329 | + ret = 1L; |
4330 | + break; |
4331 | + case kAsioResetRequest: |
4332 | + // Defer the task and perform the reset of the driver during the |
4333 | + // next "safe" situation. You cannot reset the driver right now, |
4334 | + // as this code is called from the driver. Reset the driver is |
4335 | + // done by completely destruct is. I.e. ASIOStop(), |
4336 | + // ASIODisposeBuffers(), Destruction Afterwards you initialize the |
4337 | + // driver again. |
4338 | + std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl; |
4339 | + ret = 1L; |
4340 | + break; |
4341 | + case kAsioResyncRequest: |
4342 | + // This informs the application that the driver encountered some |
4343 | + // non-fatal data loss. It is used for synchronization purposes |
4344 | + // of different media. Added mainly to work around the Win16Mutex |
4345 | + // problems in Windows 95/98 with the Windows Multimedia system, |
4346 | + // which could lose data because the Mutex was held too long by |
4347 | + // another thread. However a driver can issue it in other |
4348 | + // situations, too. |
4349 | + // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl; |
4350 | + asioXRun = true; |
4351 | + ret = 1L; |
4352 | + break; |
4353 | + case kAsioLatenciesChanged: |
4354 | + // This will inform the host application that the drivers were |
4355 | + // latencies changed. Beware, it this does not mean that the |
4356 | + // buffer sizes have changed! You might need to update internal |
4357 | + // delay data. |
4358 | + std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl; |
4359 | + ret = 1L; |
4360 | + break; |
4361 | + case kAsioEngineVersion: |
4362 | + // Return the supported ASIO version of the host application. If |
4363 | + // a host application does not implement this selector, ASIO 1.0 |
4364 | + // is assumed by the driver. |
4365 | + ret = 2L; |
4366 | + break; |
4367 | + case kAsioSupportsTimeInfo: |
4368 | + // Informs the driver whether the |
4369 | + // asioCallbacks.bufferSwitchTimeInfo() callback is supported. |
4370 | + // For compatibility with ASIO 1.0 drivers the host application |
4371 | + // should always support the "old" bufferSwitch method, too. |
4372 | + ret = 0; |
4373 | + break; |
4374 | + case kAsioSupportsTimeCode: |
4375 | + // Informs the driver whether application is interested in time |
4376 | + // code info. If an application does not need to know about time |
4377 | + // code, the driver has less work to do. |
4378 | + ret = 0; |
4379 | + break; |
4380 | + } |
4381 | + return ret; |
4382 | +} |
4383 | + |
4384 | +static const char* getAsioErrorString( ASIOError result ) |
4385 | +{ |
4386 | + struct Messages |
4387 | + { |
4388 | + ASIOError value; |
4389 | + const char*message; |
4390 | + }; |
4391 | + |
4392 | + static const Messages m[] = |
4393 | + { |
4394 | + { ASE_NotPresent, "Hardware input or output is not present or available." }, |
4395 | + { ASE_HWMalfunction, "Hardware is malfunctioning." }, |
4396 | + { ASE_InvalidParameter, "Invalid input parameter." }, |
4397 | + { ASE_InvalidMode, "Invalid mode." }, |
4398 | + { ASE_SPNotAdvancing, "Sample position not advancing." }, |
4399 | + { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." }, |
4400 | + { ASE_NoMemory, "Not enough memory to complete the request." } |
4401 | + }; |
4402 | + |
4403 | + for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i ) |
4404 | + if ( m[i].value == result ) return m[i].message; |
4405 | + |
4406 | + return "Unknown error."; |
4407 | +} |
4408 | + |
4409 | +//******************** End of __WINDOWS_ASIO__ *********************// |
4410 | +#endif |
4411 | + |
4412 | + |
4413 | +#if defined(__WINDOWS_WASAPI__) // Windows WASAPI API |
4414 | + |
4415 | +// Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014 |
4416 | +// - Introduces support for the Windows WASAPI API |
4417 | +// - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required |
4418 | +// - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface |
4419 | +// - Includes automatic internal conversion of sample rate and buffer size between hardware and the user |
4420 | + |
4421 | +#ifndef INITGUID |
4422 | + #define INITGUID |
4423 | +#endif |
4424 | +#include <audioclient.h> |
4425 | +#include <avrt.h> |
4426 | +#include <mmdeviceapi.h> |
4427 | +#include <functiondiscoverykeys_devpkey.h> |
4428 | + |
4429 | +//============================================================================= |
4430 | + |
4431 | +#define SAFE_RELEASE( objectPtr )\ |
4432 | +if ( objectPtr )\ |
4433 | +{\ |
4434 | + objectPtr->Release();\ |
4435 | + objectPtr = NULL;\ |
4436 | +} |
4437 | + |
4438 | +typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex ); |
4439 | + |
4440 | +//----------------------------------------------------------------------------- |
4441 | + |
4442 | +// WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size. |
4443 | +// Therefore we must perform all necessary conversions to user buffers in order to satisfy these |
4444 | +// requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to |
4445 | +// provide intermediate storage for read / write synchronization. |
4446 | +class WasapiBuffer |
4447 | +{ |
4448 | +public: |
4449 | + WasapiBuffer() |
4450 | + : buffer_( NULL ), |
4451 | + bufferSize_( 0 ), |
4452 | + inIndex_( 0 ), |
4453 | + outIndex_( 0 ) {} |
4454 | + |
4455 | + ~WasapiBuffer() { |
4456 | + delete buffer_; |
4457 | + } |
4458 | + |
4459 | + // sets the length of the internal ring buffer |
4460 | + void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) { |
4461 | + delete buffer_; |
4462 | + |
4463 | + buffer_ = ( char* ) calloc( bufferSize, formatBytes ); |
4464 | + |
4465 | + bufferSize_ = bufferSize; |
4466 | + inIndex_ = 0; |
4467 | + outIndex_ = 0; |
4468 | + } |
4469 | + |
4470 | + // attempt to push a buffer into the ring buffer at the current "in" index |
4471 | + bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format ) |
4472 | + { |
4473 | + if ( !buffer || // incoming buffer is NULL |
4474 | + bufferSize == 0 || // incoming buffer has no data |
4475 | + bufferSize > bufferSize_ ) // incoming buffer too large |
4476 | + { |
4477 | + return false; |
4478 | + } |
4479 | + |
4480 | + unsigned int relOutIndex = outIndex_; |
4481 | + unsigned int inIndexEnd = inIndex_ + bufferSize; |
4482 | + if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) { |
4483 | + relOutIndex += bufferSize_; |
4484 | + } |
4485 | + |
4486 | + // "in" index can end on the "out" index but cannot begin at it |
4487 | + if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) { |
4488 | + return false; // not enough space between "in" index and "out" index |
4489 | + } |
4490 | + |
4491 | + // copy buffer from external to internal |
4492 | + int fromZeroSize = inIndex_ + bufferSize - bufferSize_; |
4493 | + fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize; |
4494 | + int fromInSize = bufferSize - fromZeroSize; |
4495 | + |
4496 | + switch( format ) |
4497 | + { |
4498 | + case RTAUDIO_SINT8: |
4499 | + memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) ); |
4500 | + memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) ); |
4501 | + break; |
4502 | + case RTAUDIO_SINT16: |
4503 | + memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) ); |
4504 | + memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) ); |
4505 | + break; |
4506 | + case RTAUDIO_SINT24: |
4507 | + memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) ); |
4508 | + memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) ); |
4509 | + break; |
4510 | + case RTAUDIO_SINT32: |
4511 | + memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) ); |
4512 | + memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) ); |
4513 | + break; |
4514 | + case RTAUDIO_FLOAT32: |
4515 | + memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) ); |
4516 | + memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) ); |
4517 | + break; |
4518 | + case RTAUDIO_FLOAT64: |
4519 | + memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) ); |
4520 | + memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) ); |
4521 | + break; |
4522 | + } |
4523 | + |
4524 | + // update "in" index |
4525 | + inIndex_ += bufferSize; |
4526 | + inIndex_ %= bufferSize_; |
4527 | + |
4528 | + return true; |
4529 | + } |
4530 | + |
4531 | + // attempt to pull a buffer from the ring buffer from the current "out" index |
4532 | + bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format ) |
4533 | + { |
4534 | + if ( !buffer || // incoming buffer is NULL |
4535 | + bufferSize == 0 || // incoming buffer has no data |
4536 | + bufferSize > bufferSize_ ) // incoming buffer too large |
4537 | + { |
4538 | + return false; |
4539 | + } |
4540 | + |
4541 | + unsigned int relInIndex = inIndex_; |
4542 | + unsigned int outIndexEnd = outIndex_ + bufferSize; |
4543 | + if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) { |
4544 | + relInIndex += bufferSize_; |
4545 | + } |
4546 | + |
4547 | + // "out" index can begin at and end on the "in" index |
4548 | + if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) { |
4549 | + return false; // not enough space between "out" index and "in" index |
4550 | + } |
4551 | + |
4552 | + // copy buffer from internal to external |
4553 | + int fromZeroSize = outIndex_ + bufferSize - bufferSize_; |
4554 | + fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize; |
4555 | + int fromOutSize = bufferSize - fromZeroSize; |
4556 | + |
4557 | + switch( format ) |
4558 | + { |
4559 | + case RTAUDIO_SINT8: |
4560 | + memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) ); |
4561 | + memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) ); |
4562 | + break; |
4563 | + case RTAUDIO_SINT16: |
4564 | + memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) ); |
4565 | + memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) ); |
4566 | + break; |
4567 | + case RTAUDIO_SINT24: |
4568 | + memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) ); |
4569 | + memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) ); |
4570 | + break; |
4571 | + case RTAUDIO_SINT32: |
4572 | + memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) ); |
4573 | + memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) ); |
4574 | + break; |
4575 | + case RTAUDIO_FLOAT32: |
4576 | + memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) ); |
4577 | + memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) ); |
4578 | + break; |
4579 | + case RTAUDIO_FLOAT64: |
4580 | + memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) ); |
4581 | + memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) ); |
4582 | + break; |
4583 | + } |
4584 | + |
4585 | + // update "out" index |
4586 | + outIndex_ += bufferSize; |
4587 | + outIndex_ %= bufferSize_; |
4588 | + |
4589 | + return true; |
4590 | + } |
4591 | + |
4592 | +private: |
4593 | + char* buffer_; |
4594 | + unsigned int bufferSize_; |
4595 | + unsigned int inIndex_; |
4596 | + unsigned int outIndex_; |
4597 | +}; |
4598 | + |
4599 | +//----------------------------------------------------------------------------- |
4600 | + |
4601 | +// In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate |
4602 | +// between HW and the user. The convertBufferWasapi function is used to perform this conversion |
4603 | +// between HwIn->UserIn and UserOut->HwOut during the stream callback loop. |
4604 | +// This sample rate converter favors speed over quality, and works best with conversions between |
4605 | +// one rate and its multiple. |
4606 | +void convertBufferWasapi( char* outBuffer, |
4607 | + const char* inBuffer, |
4608 | + const unsigned int& channelCount, |
4609 | + const unsigned int& inSampleRate, |
4610 | + const unsigned int& outSampleRate, |
4611 | + const unsigned int& inSampleCount, |
4612 | + unsigned int& outSampleCount, |
4613 | + const RtAudioFormat& format ) |
4614 | +{ |
4615 | + // calculate the new outSampleCount and relative sampleStep |
4616 | + float sampleRatio = ( float ) outSampleRate / inSampleRate; |
4617 | + float sampleStep = 1.0f / sampleRatio; |
4618 | + float inSampleFraction = 0.0f; |
4619 | + |
4620 | + outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio ); |
4621 | + |
4622 | + // frame-by-frame, copy each relative input sample into it's corresponding output sample |
4623 | + for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ ) |
4624 | + { |
4625 | + unsigned int inSample = ( unsigned int ) inSampleFraction; |
4626 | + |
4627 | + switch ( format ) |
4628 | + { |
4629 | + case RTAUDIO_SINT8: |
4630 | + memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) ); |
4631 | + break; |
4632 | + case RTAUDIO_SINT16: |
4633 | + memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) ); |
4634 | + break; |
4635 | + case RTAUDIO_SINT24: |
4636 | + memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) ); |
4637 | + break; |
4638 | + case RTAUDIO_SINT32: |
4639 | + memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) ); |
4640 | + break; |
4641 | + case RTAUDIO_FLOAT32: |
4642 | + memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) ); |
4643 | + break; |
4644 | + case RTAUDIO_FLOAT64: |
4645 | + memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) ); |
4646 | + break; |
4647 | + } |
4648 | + |
4649 | + // jump to next in sample |
4650 | + inSampleFraction += sampleStep; |
4651 | + } |
4652 | +} |
4653 | + |
4654 | +//----------------------------------------------------------------------------- |
4655 | + |
4656 | +// A structure to hold various information related to the WASAPI implementation. |
4657 | +struct WasapiHandle |
4658 | +{ |
4659 | + IAudioClient* captureAudioClient; |
4660 | + IAudioClient* renderAudioClient; |
4661 | + IAudioCaptureClient* captureClient; |
4662 | + IAudioRenderClient* renderClient; |
4663 | + HANDLE captureEvent; |
4664 | + HANDLE renderEvent; |
4665 | + |
4666 | + WasapiHandle() |
4667 | + : captureAudioClient( NULL ), |
4668 | + renderAudioClient( NULL ), |
4669 | + captureClient( NULL ), |
4670 | + renderClient( NULL ), |
4671 | + captureEvent( NULL ), |
4672 | + renderEvent( NULL ) {} |
4673 | +}; |
4674 | + |
4675 | +//============================================================================= |
4676 | + |
4677 | +RtApiWasapi::RtApiWasapi() |
4678 | + : coInitialized_( false ), deviceEnumerator_( NULL ) |
4679 | +{ |
4680 | + // WASAPI can run either apartment or multi-threaded |
4681 | + HRESULT hr = CoInitialize( NULL ); |
4682 | + if ( !FAILED( hr ) ) |
4683 | + coInitialized_ = true; |
4684 | + |
4685 | + // Instantiate device enumerator |
4686 | + hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL, |
4687 | + CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ), |
4688 | + ( void** ) &deviceEnumerator_ ); |
4689 | + |
4690 | + if ( FAILED( hr ) ) { |
4691 | + errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator"; |
4692 | + error( RtAudioError::DRIVER_ERROR ); |
4693 | + } |
4694 | +} |
4695 | + |
4696 | +//----------------------------------------------------------------------------- |
4697 | + |
4698 | +RtApiWasapi::~RtApiWasapi() |
4699 | +{ |
4700 | + if ( stream_.state != STREAM_CLOSED ) |
4701 | + closeStream(); |
4702 | + |
4703 | + SAFE_RELEASE( deviceEnumerator_ ); |
4704 | + |
4705 | + // If this object previously called CoInitialize() |
4706 | + if ( coInitialized_ ) |
4707 | + CoUninitialize(); |
4708 | +} |
4709 | + |
4710 | +//============================================================================= |
4711 | + |
4712 | +unsigned int RtApiWasapi::getDeviceCount( void ) |
4713 | +{ |
4714 | + unsigned int captureDeviceCount = 0; |
4715 | + unsigned int renderDeviceCount = 0; |
4716 | + |
4717 | + IMMDeviceCollection* captureDevices = NULL; |
4718 | + IMMDeviceCollection* renderDevices = NULL; |
4719 | + |
4720 | + // Count capture devices |
4721 | + errorText_.clear(); |
4722 | + HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices ); |
4723 | + if ( FAILED( hr ) ) { |
4724 | + errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection."; |
4725 | + goto Exit; |
4726 | + } |
4727 | + |
4728 | + hr = captureDevices->GetCount( &captureDeviceCount ); |
4729 | + if ( FAILED( hr ) ) { |
4730 | + errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count."; |
4731 | + goto Exit; |
4732 | + } |
4733 | + |
4734 | + // Count render devices |
4735 | + hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices ); |
4736 | + if ( FAILED( hr ) ) { |
4737 | + errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection."; |
4738 | + goto Exit; |
4739 | + } |
4740 | + |
4741 | + hr = renderDevices->GetCount( &renderDeviceCount ); |
4742 | + if ( FAILED( hr ) ) { |
4743 | + errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count."; |
4744 | + goto Exit; |
4745 | + } |
4746 | + |
4747 | +Exit: |
4748 | + // release all references |
4749 | + SAFE_RELEASE( captureDevices ); |
4750 | + SAFE_RELEASE( renderDevices ); |
4751 | + |
4752 | + if ( errorText_.empty() ) |
4753 | + return captureDeviceCount + renderDeviceCount; |
4754 | + |
4755 | + error( RtAudioError::DRIVER_ERROR ); |
4756 | + return 0; |
4757 | +} |
4758 | + |
4759 | +//----------------------------------------------------------------------------- |
4760 | + |
4761 | +RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device ) |
4762 | +{ |
4763 | + RtAudio::DeviceInfo info; |
4764 | + unsigned int captureDeviceCount = 0; |
4765 | + unsigned int renderDeviceCount = 0; |
4766 | + std::wstring deviceName; |
4767 | + std::string defaultDeviceName; |
4768 | + bool isCaptureDevice = false; |
4769 | + |
4770 | + PROPVARIANT deviceNameProp; |
4771 | + PROPVARIANT defaultDeviceNameProp; |
4772 | + |
4773 | + IMMDeviceCollection* captureDevices = NULL; |
4774 | + IMMDeviceCollection* renderDevices = NULL; |
4775 | + IMMDevice* devicePtr = NULL; |
4776 | + IMMDevice* defaultDevicePtr = NULL; |
4777 | + IAudioClient* audioClient = NULL; |
4778 | + IPropertyStore* devicePropStore = NULL; |
4779 | + IPropertyStore* defaultDevicePropStore = NULL; |
4780 | + |
4781 | + WAVEFORMATEX* deviceFormat = NULL; |
4782 | + WAVEFORMATEX* closestMatchFormat = NULL; |
4783 | + |
4784 | + // probed |
4785 | + info.probed = false; |
4786 | + |
4787 | + // Count capture devices |
4788 | + errorText_.clear(); |
4789 | + RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR; |
4790 | + HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices ); |
4791 | + if ( FAILED( hr ) ) { |
4792 | + errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection."; |
4793 | + goto Exit; |
4794 | + } |
4795 | + |
4796 | + hr = captureDevices->GetCount( &captureDeviceCount ); |
4797 | + if ( FAILED( hr ) ) { |
4798 | + errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count."; |
4799 | + goto Exit; |
4800 | + } |
4801 | + |
4802 | + // Count render devices |
4803 | + hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices ); |
4804 | + if ( FAILED( hr ) ) { |
4805 | + errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection."; |
4806 | + goto Exit; |
4807 | + } |
4808 | + |
4809 | + hr = renderDevices->GetCount( &renderDeviceCount ); |
4810 | + if ( FAILED( hr ) ) { |
4811 | + errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count."; |
4812 | + goto Exit; |
4813 | + } |
4814 | + |
4815 | + // validate device index |
4816 | + if ( device >= captureDeviceCount + renderDeviceCount ) { |
4817 | + errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index."; |
4818 | + errorType = RtAudioError::INVALID_USE; |
4819 | + goto Exit; |
4820 | + } |
4821 | + |
4822 | + // determine whether index falls within capture or render devices |
4823 | + if ( device >= renderDeviceCount ) { |
4824 | + hr = captureDevices->Item( device - renderDeviceCount, &devicePtr ); |
4825 | + if ( FAILED( hr ) ) { |
4826 | + errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle."; |
4827 | + goto Exit; |
4828 | + } |
4829 | + isCaptureDevice = true; |
4830 | + } |
4831 | + else { |
4832 | + hr = renderDevices->Item( device, &devicePtr ); |
4833 | + if ( FAILED( hr ) ) { |
4834 | + errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle."; |
4835 | + goto Exit; |
4836 | + } |
4837 | + isCaptureDevice = false; |
4838 | + } |
4839 | + |
4840 | + // get default device name |
4841 | + if ( isCaptureDevice ) { |
4842 | + hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr ); |
4843 | + if ( FAILED( hr ) ) { |
4844 | + errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle."; |
4845 | + goto Exit; |
4846 | + } |
4847 | + } |
4848 | + else { |
4849 | + hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr ); |
4850 | + if ( FAILED( hr ) ) { |
4851 | + errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle."; |
4852 | + goto Exit; |
4853 | + } |
4854 | + } |
4855 | + |
4856 | + hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore ); |
4857 | + if ( FAILED( hr ) ) { |
4858 | + errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store."; |
4859 | + goto Exit; |
4860 | + } |
4861 | + PropVariantInit( &defaultDeviceNameProp ); |
4862 | + |
4863 | + hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp ); |
4864 | + if ( FAILED( hr ) ) { |
4865 | + errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName."; |
4866 | + goto Exit; |
4867 | + } |
4868 | + |
4869 | + deviceName = defaultDeviceNameProp.pwszVal; |
4870 | + defaultDeviceName = std::string( deviceName.begin(), deviceName.end() ); |
4871 | + |
4872 | + // name |
4873 | + hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore ); |
4874 | + if ( FAILED( hr ) ) { |
4875 | + errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store."; |
4876 | + goto Exit; |
4877 | + } |
4878 | + |
4879 | + PropVariantInit( &deviceNameProp ); |
4880 | + |
4881 | + hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp ); |
4882 | + if ( FAILED( hr ) ) { |
4883 | + errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName."; |
4884 | + goto Exit; |
4885 | + } |
4886 | + |
4887 | + deviceName = deviceNameProp.pwszVal; |
4888 | + info.name = std::string( deviceName.begin(), deviceName.end() ); |
4889 | + |
4890 | + // is default |
4891 | + if ( isCaptureDevice ) { |
4892 | + info.isDefaultInput = info.name == defaultDeviceName; |
4893 | + info.isDefaultOutput = false; |
4894 | + } |
4895 | + else { |
4896 | + info.isDefaultInput = false; |
4897 | + info.isDefaultOutput = info.name == defaultDeviceName; |
4898 | + } |
4899 | + |
4900 | + // channel count |
4901 | + hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient ); |
4902 | + if ( FAILED( hr ) ) { |
4903 | + errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client."; |
4904 | + goto Exit; |
4905 | + } |
4906 | + |
4907 | + hr = audioClient->GetMixFormat( &deviceFormat ); |
4908 | + if ( FAILED( hr ) ) { |
4909 | + errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format."; |
4910 | + goto Exit; |
4911 | + } |
4912 | + |
4913 | + if ( isCaptureDevice ) { |
4914 | + info.inputChannels = deviceFormat->nChannels; |
4915 | + info.outputChannels = 0; |
4916 | + info.duplexChannels = 0; |
4917 | + } |
4918 | + else { |
4919 | + info.inputChannels = 0; |
4920 | + info.outputChannels = deviceFormat->nChannels; |
4921 | + info.duplexChannels = 0; |
4922 | + } |
4923 | + |
4924 | + // sample rates |
4925 | + info.sampleRates.clear(); |
4926 | + |
4927 | + // allow support for all sample rates as we have a built-in sample rate converter |
4928 | + for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) { |
4929 | + info.sampleRates.push_back( SAMPLE_RATES[i] ); |
4930 | + } |
4931 | + |
4932 | + // native format |
4933 | + info.nativeFormats = 0; |
4934 | + |
4935 | + if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT || |
4936 | + ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE && |
4937 | + ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) ) |
4938 | + { |
4939 | + if ( deviceFormat->wBitsPerSample == 32 ) { |
4940 | + info.nativeFormats |= RTAUDIO_FLOAT32; |
4941 | + } |
4942 | + else if ( deviceFormat->wBitsPerSample == 64 ) { |
4943 | + info.nativeFormats |= RTAUDIO_FLOAT64; |
4944 | + } |
4945 | + } |
4946 | + else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM || |
4947 | + ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE && |
4948 | + ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) ) |
4949 | + { |
4950 | + if ( deviceFormat->wBitsPerSample == 8 ) { |
4951 | + info.nativeFormats |= RTAUDIO_SINT8; |
4952 | + } |
4953 | + else if ( deviceFormat->wBitsPerSample == 16 ) { |
4954 | + info.nativeFormats |= RTAUDIO_SINT16; |
4955 | + } |
4956 | + else if ( deviceFormat->wBitsPerSample == 24 ) { |
4957 | + info.nativeFormats |= RTAUDIO_SINT24; |
4958 | + } |
4959 | + else if ( deviceFormat->wBitsPerSample == 32 ) { |
4960 | + info.nativeFormats |= RTAUDIO_SINT32; |
4961 | + } |
4962 | + } |
4963 | + |
4964 | + // probed |
4965 | + info.probed = true; |
4966 | + |
4967 | +Exit: |
4968 | + // release all references |
4969 | + PropVariantClear( &deviceNameProp ); |
4970 | + PropVariantClear( &defaultDeviceNameProp ); |
4971 | + |
4972 | + SAFE_RELEASE( captureDevices ); |
4973 | + SAFE_RELEASE( renderDevices ); |
4974 | + SAFE_RELEASE( devicePtr ); |
4975 | + SAFE_RELEASE( defaultDevicePtr ); |
4976 | + SAFE_RELEASE( audioClient ); |
4977 | + SAFE_RELEASE( devicePropStore ); |
4978 | + SAFE_RELEASE( defaultDevicePropStore ); |
4979 | + |
4980 | + CoTaskMemFree( deviceFormat ); |
4981 | + CoTaskMemFree( closestMatchFormat ); |
4982 | + |
4983 | + if ( !errorText_.empty() ) |
4984 | + error( errorType ); |
4985 | + return info; |
4986 | +} |
4987 | + |
4988 | +//----------------------------------------------------------------------------- |
4989 | + |
4990 | +unsigned int RtApiWasapi::getDefaultOutputDevice( void ) |
4991 | +{ |
4992 | + for ( unsigned int i = 0; i < getDeviceCount(); i++ ) { |
4993 | + if ( getDeviceInfo( i ).isDefaultOutput ) { |
4994 | + return i; |
4995 | + } |
4996 | + } |
4997 | + |
4998 | + return 0; |
4999 | +} |
5000 | + |
The diff has been truncated for viewing.